Backed out 25 changesets (bug 1858958, bug 1749044, bug 1859536) for causing bustages on AppleATDecoder.cpp and AudioData related failures CLOSED TREE

Backed out changeset 21402b00cc81 (bug 1749044)
Backed out changeset 20d58a991964 (bug 1749044)
Backed out changeset 6e7ee116c90e (bug 1749044)
Backed out changeset 7a3cd601c7a1 (bug 1749044)
Backed out changeset 2ec9140f8724 (bug 1749044)
Backed out changeset a33714bc62c4 (bug 1749044)
Backed out changeset 583617a9e949 (bug 1749044)
Backed out changeset 145a8190f6fc (bug 1749044)
Backed out changeset e67de77033d2 (bug 1858958)
Backed out changeset e8a916a76c18 (bug 1749044)
Backed out changeset dec9ce7a61a0 (bug 1749044)
Backed out changeset 961feba09c71 (bug 1749044)
Backed out changeset 7badb720d24b (bug 1749044)
Backed out changeset fd71b745d729 (bug 1749044)
Backed out changeset 874f792bb36f (bug 1858958)
Backed out changeset 22ff6bc3cb95 (bug 1859536)
Backed out changeset a04a238d1688 (bug 1859536)
Backed out changeset ac3bb1773f97 (bug 1858958)
Backed out changeset 886b419887c8 (bug 1858958)
Backed out changeset ea637545c497 (bug 1858958)
Backed out changeset fdc20c3b80d6 (bug 1858958)
Backed out changeset 5fec5cbf2af7 (bug 1858958)
Backed out changeset 73fffa240ac4 (bug 1858958)
Backed out changeset 25477e671dc0 (bug 1858958)
Backed out changeset 745df1a31541 (bug 1858958)
This commit is contained in:
Norisz Fay 2024-03-05 19:06:31 +02:00
parent 077b930ac0
commit f21db3bc57
54 changed files with 1380 additions and 3818 deletions

View File

@ -34,8 +34,6 @@
#include "mozilla/dom/DOMTypes.h"
#include "mozilla/dom/Directory.h"
#include "mozilla/dom/DocGroup.h"
#include "mozilla/dom/EncodedAudioChunk.h"
#include "mozilla/dom/EncodedAudioChunkBinding.h"
#include "mozilla/dom/EncodedVideoChunk.h"
#include "mozilla/dom/EncodedVideoChunkBinding.h"
#include "mozilla/dom/File.h"
@ -60,7 +58,6 @@
#include "mozilla/dom/TransformStream.h"
#include "mozilla/dom/TransformStreamBinding.h"
#include "mozilla/dom/VideoFrame.h"
#include "mozilla/dom/AudioData.h"
#include "mozilla/dom/VideoFrameBinding.h"
#include "mozilla/dom/WebIDLSerializable.h"
#include "mozilla/dom/WritableStream.h"
@ -402,7 +399,6 @@ void StructuredCloneHolder::Read(nsIGlobalObject* aGlobal, JSContext* aCx,
mClonedSurfaces.Clear();
mInputStreamArray.Clear();
mVideoFrames.Clear();
mEncodedAudioChunks.Clear();
mEncodedVideoChunks.Clear();
Clear();
}
@ -1131,28 +1127,6 @@ JSObject* StructuredCloneHolder::CustomReadHandler(
}
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_AUDIODATA &&
CloneScope() == StructuredCloneScope::SameProcess &&
aCloneDataPolicy.areIntraClusterClonableSharedObjectsAllowed()) {
JS::Rooted<JSObject*> global(aCx, mGlobal->GetGlobalJSObject());
if (AudioData_Binding::ConstructorEnabled(aCx, global)) {
return AudioData::ReadStructuredClone(aCx, mGlobal, aReader,
AudioData()[aIndex]);
}
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_ENCODEDAUDIOCHUNK &&
CloneScope() == StructuredCloneScope::SameProcess &&
aCloneDataPolicy.areIntraClusterClonableSharedObjectsAllowed()) {
JS::Rooted<JSObject*> global(aCx, mGlobal->GetGlobalJSObject());
if (EncodedAudioChunk_Binding::ConstructorEnabled(aCx, global)) {
return EncodedAudioChunk::ReadStructuredClone(
aCx, mGlobal, aReader, EncodedAudioChunks()[aIndex]);
}
}
return ReadFullySerializableObjects(aCx, aReader, aTag, false);
}
@ -1272,29 +1246,6 @@ bool StructuredCloneHolder::CustomWriteHandler(
}
}
// See if this is an AudioData object.
if (StaticPrefs::dom_media_webcodecs_enabled()) {
mozilla::dom::AudioData* audioData = nullptr;
if (NS_SUCCEEDED(UNWRAP_OBJECT(AudioData, &obj, audioData))) {
SameProcessScopeRequired(aSameProcessScopeRequired);
return CloneScope() == StructuredCloneScope::SameProcess
? audioData->WriteStructuredClone(aWriter, this)
: false;
}
}
// See if this is a EncodedAudioChunk object.
if (StaticPrefs::dom_media_webcodecs_enabled()) {
EncodedAudioChunk* encodedAudioChunk = nullptr;
if (NS_SUCCEEDED(
UNWRAP_OBJECT(EncodedAudioChunk, &obj, encodedAudioChunk))) {
SameProcessScopeRequired(aSameProcessScopeRequired);
return CloneScope() == StructuredCloneScope::SameProcess
? encodedAudioChunk->WriteStructuredClone(aWriter, this)
: false;
}
}
{
// We only care about streams, so ReflectorToISupportsStatic is fine.
nsCOMPtr<nsISupports> base = xpc::ReflectorToISupportsStatic(aObj);
@ -1478,39 +1429,6 @@ StructuredCloneHolder::CustomReadTransferHandler(
return true;
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_AUDIODATA &&
CloneScope() == StructuredCloneScope::SameProcess &&
aCloneDataPolicy.areIntraClusterClonableSharedObjectsAllowed()) {
MOZ_ASSERT(aContent);
JS::Rooted<JSObject*> globalObj(aCx, mGlobal->GetGlobalJSObject());
// aContent will be released in CustomFreeTransferHandler.
if (!AudioData_Binding::ConstructorEnabled(aCx, globalObj)) {
return false;
}
AudioData::TransferredData* data =
static_cast<AudioData::TransferredData*>(aContent);
nsCOMPtr<nsIGlobalObject> global = mGlobal;
RefPtr<mozilla::dom::AudioData> audioData =
AudioData::FromTransferred(global.get(), data);
// aContent will be released in CustomFreeTransferHandler if frame is null.
if (!audioData) {
return false;
}
delete data;
aContent = nullptr;
JS::Rooted<JS::Value> value(aCx);
if (!GetOrCreateDOMReflector(aCx, audioData, &value)) {
JS_ClearPendingException(aCx);
return false;
}
aReturnObject.set(&value.toObject());
return true;
}
return false;
}
@ -1612,26 +1530,6 @@ StructuredCloneHolder::CustomWriteTransferHandler(
return true;
}
}
if (StaticPrefs::dom_media_webcodecs_enabled()) {
mozilla::dom::AudioData* audioData = nullptr;
rv = UNWRAP_OBJECT(AudioData, &obj, audioData);
if (NS_SUCCEEDED(rv)) {
MOZ_ASSERT(audioData);
*aExtraData = 0;
*aTag = SCTAG_DOM_AUDIODATA;
*aContent = nullptr;
UniquePtr<AudioData::TransferredData> data = audioData->Transfer();
if (!data) {
return false;
}
*aContent = data.release();
MOZ_ASSERT(*aContent);
*aOwnership = JS::SCTAG_TMO_CUSTOM;
return true;
}
}
}
{
@ -1769,16 +1667,6 @@ void StructuredCloneHolder::CustomFreeTransferHandler(
}
return;
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_AUDIODATA &&
CloneScope() == StructuredCloneScope::SameProcess) {
if (aContent) {
AudioData::TransferredData* data =
static_cast<AudioData::TransferredData*>(aContent);
delete data;
}
return;
}
}
bool StructuredCloneHolder::CustomCanTransferHandler(
@ -1862,15 +1750,6 @@ bool StructuredCloneHolder::CustomCanTransferHandler(
}
}
if (StaticPrefs::dom_media_webcodecs_enabled()) {
mozilla::dom::AudioData* audioData = nullptr;
nsresult rv = UNWRAP_OBJECT(AudioData, &obj, audioData);
if (NS_SUCCEEDED(rv)) {
SameProcessScopeRequired(aSameProcessScopeRequired);
return CloneScope() == StructuredCloneScope::SameProcess;
}
}
return false;
}

View File

@ -165,12 +165,10 @@ class StructuredCloneHolderBase {
};
class BlobImpl;
class EncodedAudioChunkData;
class EncodedVideoChunkData;
class MessagePort;
class MessagePortIdentifier;
struct VideoFrameSerializedData;
struct AudioDataSerializedData;
class StructuredCloneHolder : public StructuredCloneHolderBase {
public:
@ -272,16 +270,10 @@ class StructuredCloneHolder : public StructuredCloneHolderBase {
nsTArray<VideoFrameSerializedData>& VideoFrames() { return mVideoFrames; }
nsTArray<AudioDataSerializedData>& AudioData() { return mAudioData; }
nsTArray<EncodedVideoChunkData>& EncodedVideoChunks() {
return mEncodedVideoChunks;
}
nsTArray<EncodedAudioChunkData>& EncodedAudioChunks() {
return mEncodedAudioChunks;
}
// Implementations of the virtual methods to allow cloning of objects which
// JS engine itself doesn't clone.
@ -387,15 +379,9 @@ class StructuredCloneHolder : public StructuredCloneHolderBase {
// Used for cloning VideoFrame in the structured cloning algorithm.
nsTArray<VideoFrameSerializedData> mVideoFrames;
// Used for cloning AudioData in the structured cloning algorithm.
nsTArray<AudioDataSerializedData> mAudioData;
// Used for cloning EncodedVideoChunk in the structured cloning algorithm.
nsTArray<EncodedVideoChunkData> mEncodedVideoChunks;
// Used for cloning EncodedAudioChunk in the structured cloning algorithm.
nsTArray<EncodedAudioChunkData> mEncodedAudioChunks;
// This raw pointer is only set within ::Read() and is unset by the end.
nsIGlobalObject* MOZ_NON_OWNING_REF mGlobal;

View File

@ -157,10 +157,6 @@ enum StructuredCloneTags : uint32_t {
SCTAG_DOM_ENCODEDVIDEOCHUNK,
SCTAG_DOM_AUDIODATA,
SCTAG_DOM_ENCODEDAUDIOCHUNK,
// IMPORTANT: If you plan to add an new IDB tag, it _must_ be add before the
// "less stable" tags!
};

View File

@ -10,7 +10,6 @@
#include "VideoUtils.h"
#include "mozilla/Logging.h"
#include "mozilla/UniquePtr.h"
#include "Adts.h"
#include <inttypes.h>
extern mozilla::LazyLogModule gMediaDemuxerLog;
@ -22,6 +21,227 @@ extern mozilla::LazyLogModule gMediaDemuxerLog;
DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Verbose, msg, ##__VA_ARGS__)
namespace mozilla {
namespace adts {
// adts::FrameHeader - Holds the ADTS frame header and its parsing
// state.
//
// ADTS Frame Structure
//
// 11111111 1111BCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP(QQQQQQQQ
// QQQQQQQQ)
//
// Header consists of 7 or 9 bytes(without or with CRC).
// Letter Length(bits) Description
// { sync } 12 syncword 0xFFF, all bits must be 1
// B 1 MPEG Version: 0 for MPEG-4, 1 for MPEG-2
// C 2 Layer: always 0
// D 1 protection absent, Warning, set to 1 if there is no
// CRC and 0 if there is CRC
// E 2 profile, the MPEG-4 Audio Object Type minus 1
// F 4 MPEG-4 Sampling Frequency Index (15 is forbidden)
// H 3 MPEG-4 Channel Configuration (in the case of 0, the
// channel configuration is sent via an in-band PCE)
// M 13 frame length, this value must include 7 or 9 bytes of
// header length: FrameLength =
// (ProtectionAbsent == 1 ? 7 : 9) + size(AACFrame)
// O 11 Buffer fullness
// P 2 Number of AAC frames(RDBs) in ADTS frame minus 1, for
// maximum compatibility always use 1 AAC frame per ADTS
// frame
// Q 16 CRC if protection absent is 0
class FrameHeader {
public:
uint32_t mFrameLength{};
uint32_t mSampleRate{};
uint32_t mSamples{};
uint32_t mChannels{};
uint8_t mObjectType{};
uint8_t mSamplingIndex{};
uint8_t mChannelConfig{};
uint8_t mNumAACFrames{};
bool mHaveCrc{};
// Returns whether aPtr matches a valid ADTS header sync marker
static bool MatchesSync(const uint8_t* aPtr) {
return aPtr[0] == 0xFF && (aPtr[1] & 0xF6) == 0xF0;
}
FrameHeader() { Reset(); }
// Header size
uint64_t HeaderSize() const { return (mHaveCrc) ? 9 : 7; }
bool IsValid() const { return mFrameLength > 0; }
// Resets the state to allow for a new parsing session.
void Reset() { PodZero(this); }
// Returns whether the byte creates a valid sequence up to this point.
bool Parse(const uint8_t* aPtr) {
const uint8_t* p = aPtr;
if (!MatchesSync(p)) {
return false;
}
// AAC has 1024 samples per frame per channel.
mSamples = 1024;
mHaveCrc = !(p[1] & 0x01);
mObjectType = ((p[2] & 0xC0) >> 6) + 1;
mSamplingIndex = (p[2] & 0x3C) >> 2;
mChannelConfig = (p[2] & 0x01) << 2 | (p[3] & 0xC0) >> 6;
mFrameLength = static_cast<uint32_t>(
(p[3] & 0x03) << 11 | (p[4] & 0xFF) << 3 | (p[5] & 0xE0) >> 5);
mNumAACFrames = (p[6] & 0x03) + 1;
static const uint32_t SAMPLE_RATES[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350};
if (mSamplingIndex >= ArrayLength(SAMPLE_RATES)) {
LOG(("ADTS: Init() failure: invalid sample-rate index value: %" PRIu32
".",
mSamplingIndex));
return false;
}
mSampleRate = SAMPLE_RATES[mSamplingIndex];
MOZ_ASSERT(mChannelConfig < 8);
mChannels = (mChannelConfig == 7) ? 8 : mChannelConfig;
return true;
}
};
// adts::Frame - Frame meta container used to parse and hold a frame
// header and side info.
class Frame {
public:
Frame() : mOffset(0) {}
uint64_t Offset() const { return mOffset; }
size_t Length() const {
// TODO: If fields are zero'd when invalid, this check wouldn't be
// necessary.
if (!mHeader.IsValid()) {
return 0;
}
return mHeader.mFrameLength;
}
// Returns the offset to the start of frame's raw data.
uint64_t PayloadOffset() const { return mOffset + mHeader.HeaderSize(); }
// Returns the length of the frame's raw data (excluding the header) in bytes.
size_t PayloadLength() const {
// TODO: If fields are zero'd when invalid, this check wouldn't be
// necessary.
if (!mHeader.IsValid()) {
return 0;
}
return mHeader.mFrameLength - mHeader.HeaderSize();
}
// Returns the parsed frame header.
const FrameHeader& Header() const { return mHeader; }
bool IsValid() const { return mHeader.IsValid(); }
// Resets the frame header and data.
void Reset() {
mHeader.Reset();
mOffset = 0;
}
// Returns whether the valid
bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) {
MOZ_ASSERT(aStart && aEnd);
bool found = false;
const uint8_t* ptr = aStart;
// Require at least 7 bytes of data at the end of the buffer for the minimum
// ADTS frame header.
while (ptr < aEnd - 7 && !found) {
found = mHeader.Parse(ptr);
ptr++;
}
mOffset = aOffset + (static_cast<size_t>(ptr - aStart)) - 1u;
return found;
}
private:
// The offset to the start of the header.
uint64_t mOffset;
// The currently parsed frame header.
FrameHeader mHeader;
};
class FrameParser {
public:
// Returns the currently parsed frame. Reset via Reset or EndFrameSession.
const Frame& CurrentFrame() const { return mFrame; }
// Returns the first parsed frame. Reset via Reset.
const Frame& FirstFrame() const { return mFirstFrame; }
// Resets the parser. Don't use between frames as first frame data is reset.
void Reset() {
EndFrameSession();
mFirstFrame.Reset();
}
// Clear the last parsed frame to allow for next frame parsing, i.e.:
// - sets PrevFrame to CurrentFrame
// - resets the CurrentFrame
// - resets ID3Header if no valid header was parsed yet
void EndFrameSession() { mFrame.Reset(); }
// Parses contents of given ByteReader for a valid frame header and returns
// true if one was found. After returning, the variable passed to
// 'aBytesToSkip' holds the amount of bytes to be skipped (if any) in order to
// jump across a large ID3v2 tag spanning multiple buffers.
bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) {
const bool found = mFrame.Parse(aOffset, aStart, aEnd);
if (mFrame.Length() && !mFirstFrame.Length()) {
mFirstFrame = mFrame;
}
return found;
}
private:
// We keep the first parsed frame around for static info access, the
// previously parsed frame for debugging and the currently parsed frame.
Frame mFirstFrame;
Frame mFrame;
};
// Initialize the AAC AudioSpecificConfig.
// Only handles two-byte version for AAC-LC.
static void InitAudioSpecificConfig(const Frame& frame,
MediaByteBuffer* aBuffer) {
const FrameHeader& header = frame.Header();
MOZ_ASSERT(header.IsValid());
int audioObjectType = header.mObjectType;
int samplingFrequencyIndex = header.mSamplingIndex;
int channelConfig = header.mChannelConfig;
uint8_t asc[2];
asc[0] = (audioObjectType & 0x1F) << 3 | (samplingFrequencyIndex & 0x0E) >> 1;
asc[1] = (samplingFrequencyIndex & 0x01) << 7 | (channelConfig & 0x0F) << 3;
aBuffer->AppendElements(asc, 2);
}
} // namespace adts
using media::TimeUnit;
@ -72,7 +292,7 @@ bool ADTSDemuxer::IsSeekable() const {
// ADTSTrackDemuxer
ADTSTrackDemuxer::ADTSTrackDemuxer(MediaResource* aSource)
: mSource(aSource),
mParser(new ADTS::FrameParser()),
mParser(new adts::FrameParser()),
mOffset(0),
mNumParsedFrames(0),
mFrameIndex(0),
@ -315,7 +535,7 @@ TimeUnit ADTSTrackDemuxer::Duration(int64_t aNumFrames) const {
return TimeUnit(aNumFrames * mSamplesPerFrame, mSamplesPerSecond);
}
const ADTS::Frame& ADTSTrackDemuxer::FindNextFrame(
const adts::Frame& ADTSTrackDemuxer::FindNextFrame(
bool findFirstFrame /*= false*/) {
static const int BUFFER_SIZE = 4096;
static const int MAX_SKIPPED_BYTES = 10 * BUFFER_SIZE;
@ -348,7 +568,7 @@ const ADTS::Frame& ADTSTrackDemuxer::FindNextFrame(
break;
}
const ADTS::Frame& currentFrame = mParser->CurrentFrame();
const adts::Frame& currentFrame = mParser->CurrentFrame();
foundFrame = mParser->Parse(frameHeaderOffset, buffer, buffer + read);
if (findFirstFrame && foundFrame) {
// Check for sync marker after the found frame, since it's
@ -359,7 +579,7 @@ const ADTS::Frame& ADTSTrackDemuxer::FindNextFrame(
currentFrame.Offset() + currentFrame.Length();
uint32_t read =
Read(buffer, AssertedCast<int64_t>(nextFrameHeaderOffset), 2);
if (read != 2 || !ADTS::FrameHeader::MatchesSync(buffer)) {
if (read != 2 || !adts::FrameHeader::MatchesSync(buffer)) {
frameHeaderOffset = currentFrame.Offset() + 1;
mParser->Reset();
foundFrame = false;
@ -401,7 +621,7 @@ const ADTS::Frame& ADTSTrackDemuxer::FindNextFrame(
return mParser->CurrentFrame();
}
bool ADTSTrackDemuxer::SkipNextFrame(const ADTS::Frame& aFrame) {
bool ADTSTrackDemuxer::SkipNextFrame(const adts::Frame& aFrame) {
if (!mNumParsedFrames || !aFrame.Length()) {
RefPtr<MediaRawData> frame(GetNextFrame(aFrame));
return frame;
@ -419,7 +639,7 @@ bool ADTSTrackDemuxer::SkipNextFrame(const ADTS::Frame& aFrame) {
}
already_AddRefed<MediaRawData> ADTSTrackDemuxer::GetNextFrame(
const ADTS::Frame& aFrame) {
const adts::Frame& aFrame) {
ADTSLOG("GetNext() Begin({mOffset=%" PRIu64 " HeaderSize()=%" PRIu64
" Length()=%zu})",
aFrame.Offset(), aFrame.Header().HeaderSize(),
@ -515,7 +735,7 @@ int64_t ADTSTrackDemuxer::FrameIndexFromTime(const TimeUnit& aTime) const {
return std::max<int64_t>(0, frameIndex);
}
void ADTSTrackDemuxer::UpdateState(const ADTS::Frame& aFrame) {
void ADTSTrackDemuxer::UpdateState(const adts::Frame& aFrame) {
uint32_t frameLength = aFrame.Length();
// Prevent overflow.
if (mTotalFrameLen + frameLength < mTotalFrameLen) {
@ -530,7 +750,7 @@ void ADTSTrackDemuxer::UpdateState(const ADTS::Frame& aFrame) {
mTotalFrameLen += frameLength;
if (!mSamplesPerFrame) {
const ADTS::FrameHeader& header = aFrame.Header();
const adts::FrameHeader& header = aFrame.Header();
mSamplesPerFrame = header.mSamples;
mSamplesPerSecond = header.mSampleRate;
mChannels = header.mChannels;
@ -575,15 +795,15 @@ bool ADTSDemuxer::ADTSSniffer(const uint8_t* aData, const uint32_t aLength) {
if (aLength < 7) {
return false;
}
if (!ADTS::FrameHeader::MatchesSync(Span(aData, aLength))) {
if (!adts::FrameHeader::MatchesSync(aData)) {
return false;
}
auto parser = MakeUnique<ADTS::FrameParser>();
auto parser = MakeUnique<adts::FrameParser>();
if (!parser->Parse(0, aData, aData + aLength)) {
return false;
}
const ADTS::Frame& currentFrame = parser->CurrentFrame();
const adts::Frame& currentFrame = parser->CurrentFrame();
// Check for sync marker after the found frame, since it's
// possible to find sync marker in AAC data. If sync marker
// exists after the current frame then we've found a frame
@ -592,8 +812,7 @@ bool ADTSDemuxer::ADTSSniffer(const uint8_t* aData, const uint32_t aLength) {
currentFrame.Offset() + currentFrame.Length();
return aLength > nextFrameHeaderOffset &&
aLength - nextFrameHeaderOffset >= 2 &&
ADTS::FrameHeader::MatchesSync(Span(aData + nextFrameHeaderOffset,
aLength - nextFrameHeaderOffset));
adts::FrameHeader::MatchesSync(aData + nextFrameHeaderOffset);
}
} // namespace mozilla

View File

@ -11,10 +11,14 @@
#include "mozilla/Maybe.h"
#include "MediaDataDemuxer.h"
#include "MediaResource.h"
#include "Adts.h"
namespace mozilla {
namespace adts {
class Frame;
class FrameParser;
} // namespace adts
class ADTSTrackDemuxer;
DDLoggedTypeDeclNameAndBase(ADTSDemuxer, MediaDataDemuxer);
@ -83,16 +87,16 @@ class ADTSTrackDemuxer : public MediaTrackDemuxer,
media::TimeUnit ScanUntil(const media::TimeUnit& aTime);
// Finds the next valid frame and returns its byte range.
const ADTS::Frame& FindNextFrame(bool findFirstFrame = false);
const adts::Frame& FindNextFrame(bool findFirstFrame = false);
// Skips the next frame given the provided byte range.
bool SkipNextFrame(const ADTS::Frame& aFrame);
bool SkipNextFrame(const adts::Frame& aFrame);
// Returns the next ADTS frame, if available.
already_AddRefed<MediaRawData> GetNextFrame(const ADTS::Frame& aFrame);
already_AddRefed<MediaRawData> GetNextFrame(const adts::Frame& aFrame);
// Updates post-read meta data.
void UpdateState(const ADTS::Frame& aFrame);
void UpdateState(const adts::Frame& aFrame);
// Returns the frame index for the given offset.
int64_t FrameIndexFromOffset(uint64_t aOffset) const;
@ -111,7 +115,7 @@ class ADTSTrackDemuxer : public MediaTrackDemuxer,
MediaResourceIndex mSource;
// ADTS frame parser used to detect frames and extract side info.
ADTS::FrameParser* mParser;
adts::FrameParser* mParser;
// Current byte offset in the source stream.
uint64_t mOffset;

View File

@ -67,7 +67,7 @@ class AudibilityMonitor {
for (uint32_t i = 0; i < frameCount; i++) {
bool atLeastOneAudible = false;
for (uint32_t j = 0; j < aChannels; j++) {
if (std::fabs(ConvertAudioSample<float>(samples[readIndex++])) >
if (std::fabs(AudioSampleToFloat(samples[readIndex++])) >
AUDIBILITY_THRESHOLD) {
atLeastOneAudible = true;
}

View File

@ -1,15 +1,13 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=2 et sw=2 tw=80: */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_AUDIOSAMPLEFORMAT_H_
#define MOZILLA_AUDIOSAMPLEFORMAT_H_
#include "mozilla/Assertions.h"
#include "mozilla/PodOperations.h"
#include <algorithm>
#include <type_traits>
namespace mozilla {
@ -64,191 +62,113 @@ class AudioSampleTypeToFormat<short> {
static const AudioSampleFormat Format = AUDIO_FORMAT_S16;
};
template <typename T>
constexpr float MaxAsFloat() {
return static_cast<float>(std::numeric_limits<T>::max());
// Single-sample conversion
/*
* Use "2^N" conversion since it's simple, fast, "bit transparent", used by
* many other libraries and apparently behaves reasonably.
* http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html
* http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
*/
inline float AudioSampleToFloat(float aValue) { return aValue; }
inline float AudioSampleToFloat(int16_t aValue) {
return static_cast<float>(aValue) / 32768.0f;
}
inline float AudioSampleToFloat(int32_t aValue) {
return static_cast<float>(aValue) / (float)(1U << 31);
}
template <typename T>
constexpr float LowestAsFloat() {
return static_cast<float>(std::numeric_limits<T>::lowest());
T FloatToAudioSample(float aValue);
template <>
inline float FloatToAudioSample<float>(float aValue) {
return aValue;
}
template <>
inline int16_t FloatToAudioSample<int16_t>(float aValue) {
float v = aValue * 32768.0f;
float clamped = std::max(-32768.0f, std::min(32767.0f, v));
return int16_t(clamped);
}
// The maximum value for an audio sample. If T is signed, the absolute value of
// this number is smaller (by exactly 1) than ::Min().
template <typename T>
constexpr T Max() {
return std::numeric_limits<T>::max();
T UInt8bitToAudioSample(uint8_t aValue);
template <>
inline float UInt8bitToAudioSample<float>(uint8_t aValue) {
return static_cast<float>(aValue) * (static_cast<float>(2) / UINT8_MAX) -
static_cast<float>(1);
}
template <>
inline int16_t UInt8bitToAudioSample<int16_t>(uint8_t aValue) {
return static_cast<int16_t>((aValue << 8) + aValue + INT16_MIN);
}
// The minimum value for an audio sample. If T is signed, the absolute value of
// this number is greater (by exactly 1) than ::Max()
template <typename T>
constexpr T Min() {
return std::numeric_limits<T>::lowest();
T IntegerToAudioSample(int16_t aValue);
template <>
inline float IntegerToAudioSample<float>(int16_t aValue) {
return static_cast<float>(aValue) / 32768.0f;
}
template <>
inline int16_t IntegerToAudioSample<int16_t>(int16_t aValue) {
return aValue;
}
template <typename T>
T Int24bitToAudioSample(int32_t aValue);
template <>
inline float Int24bitToAudioSample<float>(int32_t aValue) {
return static_cast<float>(aValue) / static_cast<float>(1 << 23);
}
template <>
inline int16_t Int24bitToAudioSample<int16_t>(int32_t aValue) {
return static_cast<int16_t>(aValue / 256);
}
template <typename SrcT, typename DstT>
inline void ConvertAudioSample(SrcT aIn, DstT& aOut);
template <>
inline void ConvertAudioSample(int16_t aIn, int16_t& aOut) {
aOut = aIn;
}
template <>
constexpr float Max<float>() {
return 1.0f;
inline void ConvertAudioSample(int16_t aIn, float& aOut) {
aOut = AudioSampleToFloat(aIn);
}
template <>
constexpr float Min<float>() {
return -1.0f;
}
// The bias value is the middle of the range. In linear PCM audio, if the
// values are all equal to the bias value, the audio is silent.
template <typename T>
constexpr T Bias() {
return 0;
inline void ConvertAudioSample(float aIn, float& aOut) {
aOut = aIn;
}
template <>
constexpr uint8_t Bias<uint8_t>() {
return 128;
}
// Clip a floating point audio sample to its nominal range. This is
// destructive, and is only used here for avoiding overflow in some edge cases,
// so it's not going to be generally audible.
inline float Clip(float aValue) { return std::clamp(aValue, -1.0f, 1.0f); }
template <typename T>
T FloatToAudioSample(float aValue) {
if constexpr (std::is_same_v<float, T>) {
return aValue;
}
if constexpr (std::is_same_v<uint8_t, T>) {
return static_cast<T>(std::clamp((aValue + 1.0f) * 128.f,
LowestAsFloat<T>(), MaxAsFloat<T>()));
} else if constexpr (std::is_same_v<int16_t, T>) {
// This produces correct results accross the range.
return static_cast<T>(std::clamp(aValue * -LowestAsFloat<T>(),
LowestAsFloat<T>(), MaxAsFloat<T>()));
} else if constexpr (std::is_same_v<int32_t, T>) {
// We need to handle this differently because of rounding between INT32_MAX
// and float 32-bits, to maximise precision.
if (aValue >= 0.) {
// if the input sample is greater OR EQUAL to 1.0, then clip and return
// the max value.
if (aValue >= 1.0) {
return std::numeric_limits<T>::max();
}
// otherwise cast to a double and map to the positive range.
// float 32-bits cannot represent int32_max (but can represent int32_min)
constexpr double magnitudePos = std::numeric_limits<int32_t>::max();
return static_cast<int32_t>(aValue * magnitudePos);
}
// Similarly for the negative range.
if (aValue <= -1.0) {
return std::numeric_limits<T>::lowest();
}
constexpr double magnitudeNegative =
-1.0 * std::numeric_limits<int32_t>::lowest();
return static_cast<int32_t>(aValue * magnitudeNegative);
}
}
template <typename T>
T UInt8bitToAudioSample(uint8_t aValue) {
if constexpr (std::is_same_v<uint8_t, T>) {
return aValue;
} else if constexpr (std::is_same_v<int16_t, T>) {
return (static_cast<int16_t>(aValue) << 8) - (1 << 15);
} else if constexpr (std::is_same_v<int32_t, T>) {
return (static_cast<int32_t>(aValue) << 24) - (1 << 31);
} else if constexpr (std::is_same_v<float, T>) {
float biased = static_cast<float>(aValue) - Bias<uint8_t>();
if (aValue >= Bias<uint8_t>()) {
return Clip(biased / MaxAsFloat<int8_t>());
}
return Clip(biased / -LowestAsFloat<int8_t>());
}
}
template <typename T>
T Int16ToAudioSample(int16_t aValue) {
if constexpr (std::is_same_v<uint8_t, T>) {
return static_cast<uint8_t>(aValue >> 8) + 128;
} else if constexpr (std::is_same_v<int16_t, T>) {
return aValue;
} else if constexpr (std::is_same_v<int32_t, T>) {
return aValue << 16;
} else if constexpr (std::is_same_v<float, T>) {
if (aValue >= 0) {
return Clip(static_cast<float>(aValue) / MaxAsFloat<int16_t>());
}
return Clip(static_cast<float>(aValue) / -LowestAsFloat<int16_t>());
}
}
// 24-bits audio samples are stored in 32-bits variables.
template <typename T>
T Int24ToAudioSample(int32_t aValue) {
if constexpr (std::is_same_v<uint8_t, T>) {
return static_cast<uint8_t>(aValue >> 16) + 128;
} else if constexpr (std::is_same_v<int16_t, T>) {
return static_cast<int16_t>(aValue >> 8);
} else if constexpr (std::is_same_v<int32_t, T>) {
return aValue << 8;
} else if constexpr (std::is_same_v<float, T>) {
const int32_t min = -(2 << 22);
const int32_t max = (2 << 22) - 1;
if (aValue >= 0) {
return Clip(static_cast<float>(aValue) / static_cast<float>(max));
}
return Clip(static_cast<float>(aValue) / -static_cast<float>(min));
}
}
template <typename T>
T Int32ToAudioSample(int32_t aValue) {
if constexpr (std::is_same_v<uint8_t, T>) {
return static_cast<uint8_t>(aValue >> 24) + 128;
} else if constexpr (std::is_same_v<int16_t, T>) {
return aValue >> 16;
} else if constexpr (std::is_same_v<int32_t, T>) {
return aValue;
} else if constexpr (std::is_same_v<float, T>) {
if (aValue >= 0) {
return Clip(static_cast<float>(aValue) / MaxAsFloat<int32_t>());
}
return Clip(static_cast<float>(aValue) / -LowestAsFloat<int32_t>());
}
}
// This does not handle 24-bits audio, call the function explicitly when
// needed.
template <typename D, typename S>
inline D ConvertAudioSample(const S& aSource) {
if constexpr (std::is_same_v<S, D>) {
return aSource;
} else if constexpr (std::is_same_v<S, uint8_t>) {
return UInt8bitToAudioSample<D>(aSource);
} else if constexpr (std::is_same_v<S, int16_t>) {
return Int16ToAudioSample<D>(aSource);
} else if constexpr (std::is_same_v<S, int32_t>) {
return Int32ToAudioSample<D>(aSource);
} else if constexpr (std::is_same_v<S, float>) {
return FloatToAudioSample<D>(aSource);
}
inline void ConvertAudioSample(float aIn, int16_t& aOut) {
aOut = FloatToAudioSample<int16_t>(aIn);
}
// Sample buffer conversion
template <typename From, typename To>
inline void ConvertAudioSamples(const From* aFrom, To* aTo, int aCount) {
if constexpr (std::is_same_v<From, To>) {
PodCopy(aTo, aFrom, aCount);
return;
}
for (int i = 0; i < aCount; ++i) {
aTo[i] = ConvertAudioSample<To>(aFrom[i]);
aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i]));
}
}
inline void ConvertAudioSamples(const int16_t* aFrom, int16_t* aTo,
int aCount) {
memcpy(aTo, aFrom, sizeof(*aTo) * aCount);
}
inline void ConvertAudioSamples(const float* aFrom, float* aTo, int aCount) {
memcpy(aTo, aFrom, sizeof(*aTo) * aCount);
}
// Sample buffer conversion with scale
template <typename From, typename To>
inline void ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount,
float aScale) {
@ -257,8 +177,7 @@ inline void ConvertAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount,
return;
}
for (int i = 0; i < aCount; ++i) {
aTo[i] =
ConvertAudioSample<To>(ConvertAudioSample<float>(aFrom[i]) * aScale);
aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aFrom[i]) * aScale);
}
}
inline void ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo,
@ -275,8 +194,7 @@ inline void ConvertAudioSamplesWithScale(const int16_t* aFrom, int16_t* aTo,
return;
}
for (int i = 0; i < aCount; ++i) {
aTo[i] = FloatToAudioSample<int16_t>(ConvertAudioSample<float>(aFrom[i]) *
aScale);
aTo[i] = FloatToAudioSample<int16_t>(AudioSampleToFloat(aFrom[i]) * aScale);
}
}
@ -284,9 +202,8 @@ template <typename From, typename To>
inline void AddAudioSamplesWithScale(const From* aFrom, To* aTo, int aCount,
float aScale) {
for (int i = 0; i < aCount; ++i) {
aTo[i] =
ConvertAudioSample<To>(ConvertAudioSample<float>(aTo[i]) +
ConvertAudioSample<float>(aFrom[i]) * aScale);
aTo[i] = FloatToAudioSample<To>(AudioSampleToFloat(aTo[i]) +
AudioSampleToFloat(aFrom[i]) * aScale);
}
}

View File

@ -79,8 +79,7 @@ static void InterleaveAndConvertBuffer(const SrcT* const* aSourceChannels,
DestT* output = aOutput;
for (size_t i = 0; i < aLength; ++i) {
for (size_t channel = 0; channel < aChannels; ++channel) {
float v =
ConvertAudioSample<float>(aSourceChannels[channel][i]) * aVolume;
float v = AudioSampleToFloat(aSourceChannels[channel][i]) * aVolume;
*output = FloatToAudioSample<DestT>(v);
++output;
}
@ -94,8 +93,7 @@ static void DeinterleaveAndConvertBuffer(const SrcT* aSourceBuffer,
for (size_t i = 0; i < aChannels; i++) {
size_t interleavedIndex = i;
for (size_t j = 0; j < aFrames; j++) {
aOutput[i][j] =
ConvertAudioSample<DestT>(aSourceBuffer[interleavedIndex]);
ConvertAudioSample(aSourceBuffer[interleavedIndex], aOutput[i][j]);
interleavedIndex += aChannels;
}
}
@ -150,7 +148,7 @@ void DownmixAndInterleave(Span<const SrcT* const> aChannelData,
* separate pointers to each channel's buffer.
*/
struct AudioChunk {
using SampleFormat = mozilla::AudioSampleFormat;
typedef mozilla::AudioSampleFormat SampleFormat;
AudioChunk() = default;

View File

@ -268,7 +268,7 @@ class InflatableShortBuffer {
// capacity, and the loop goes backward.
float* output = reinterpret_cast<float*>(mBuffer.mData);
for (size_t i = Length(); i--;) {
output[i] = ConvertAudioSample<float>(mBuffer.mData[i]);
output[i] = AudioSampleToFloat(mBuffer.mData[i]);
}
AlignedFloatBuffer rv;
rv.mBuffer = std::move(mBuffer.mBuffer);

View File

@ -219,6 +219,14 @@ inline already_AddRefed<MediaByteBuffer> ForceGetAudioCodecSpecificBlob(
// information as a blob or where a blob is ambiguous.
inline already_AddRefed<MediaByteBuffer> GetAudioCodecSpecificBlob(
const AudioCodecSpecificVariant& v) {
MOZ_ASSERT(!v.is<NoCodecSpecificData>(),
"NoCodecSpecificData shouldn't be used as a blob");
MOZ_ASSERT(!v.is<AacCodecSpecificData>(),
"AacCodecSpecificData has 2 blobs internally, one should "
"explicitly be selected");
MOZ_ASSERT(!v.is<Mp3CodecSpecificData>(),
"Mp3CodecSpecificData shouldn't be used as a blob");
return ForceGetAudioCodecSpecificBlob(v);
}
@ -462,8 +470,7 @@ class VideoInfo : public TrackInfo {
rv.AppendPrintf("extra data: %zu bytes", mExtraData->Length());
}
rv.AppendPrintf("rotation: %d", static_cast<int>(mRotation));
rv.AppendPrintf("colors: %s",
ColorDepthStrings[static_cast<int>(mColorDepth)]);
rv.AppendPrintf("colors: %s", ColorDepthStrings[static_cast<int>(mColorDepth)]);
if (mColorSpace) {
rv.AppendPrintf(
"YUV colorspace: %s ",
@ -479,8 +486,7 @@ class VideoInfo : public TrackInfo {
"transfer function %s ",
TransferFunctionStrings[static_cast<int>(mTransferFunction.value())]);
}
rv.AppendPrintf("color range: %s",
ColorRangeStrings[static_cast<int>(mColorRange)]);
rv.AppendPrintf("color range: %s", ColorRangeStrings[static_cast<int>(mColorRange)]);
if (mImageRect) {
rv.AppendPrintf("image rect: %dx%d", mImageRect->Width(),
mImageRect->Height());

View File

@ -4,6 +4,7 @@
#include "VideoUtils.h"
#include <functional>
#include <stdint.h>
#include "CubebUtils.h"
@ -18,6 +19,7 @@
#include "mozilla/StaticPrefs_accessibility.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/Telemetry.h"
#include "nsCharSeparatedTokenizer.h"
#include "nsContentTypeParser.h"
#include "nsIConsoleService.h"
@ -27,6 +29,7 @@
#include "nsNetCID.h"
#include "nsServiceManagerUtils.h"
#include "nsThreadUtils.h"
#include "AudioStream.h"
namespace mozilla {

View File

@ -1,116 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
*
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioSampleFormat.h"
#include "gtest/gtest.h"
#include <type_traits>
using namespace mozilla;
template <typename T>
constexpr T LowestSample() {
if constexpr (std::is_integral_v<T>) {
return std::numeric_limits<T>::lowest();
} else {
return -1.0f;
}
}
// When converting a sample-type to another sample-type, this returns the
// maximum value possible in the destination format
template <typename Dest>
constexpr Dest HighestSample() {
if constexpr (std::is_integral_v<Dest>) {
return std::numeric_limits<Dest>::max();
} else {
return +1.0f;
}
}
// When converting a sample-type to another sample-type, this returns the
// maximum value expected in the destination format
template <typename Dest, typename Source>
constexpr Dest HighestSampleExpected() {
// When converting small integer samples to large integer sample, the higher
// bound isn't reached because of positive / negative integer assymetry.
if constexpr (std::is_same_v<Source, uint8_t> &&
std::is_same_v<Dest, int16_t>) {
return 32512; // INT16_MAX - 2 << 7 + 1
} else if constexpr (std::is_same_v<Source, uint8_t> &&
std::is_same_v<Dest, int32_t>) {
return 2130706432; // INT32_MAX - (2 << 23) + 1
} else if constexpr (std::is_same_v<Source, int16_t> &&
std::is_same_v<Dest, int32_t>) {
return 2147418112; // INT32_MAX - UINT16_MAX
}
if constexpr (std::is_integral_v<Dest>) {
return std::numeric_limits<Dest>::max();
} else {
return +1.0f;
}
}
template <typename Source, typename Dest>
void TestSampleTypePair() {
std::cout << __PRETTY_FUNCTION__ << std::endl;
ASSERT_EQ(LowestSample<Dest>(),
ConvertAudioSample<Dest>(LowestSample<Source>()));
Dest expected = HighestSampleExpected<Dest, Source>();
ASSERT_EQ(expected, ConvertAudioSample<Dest>(HighestSample<Source>()));
ASSERT_EQ(Bias<Dest>(), ConvertAudioSample<Dest>(Bias<Source>()));
}
template <typename T>
void TestSampleType24bits() {
std::cout << __PRETTY_FUNCTION__ << std::endl;
int32_t max_sample_24bits = (2 << 22) - 1;
int32_t min_sample_24bits = -(2 << 22);
int32_t silence_24bits = 0;
ASSERT_EQ(LowestSample<T>(), Int24ToAudioSample<T>(min_sample_24bits));
ASSERT_EQ(Int24ToAudioSample<T>(min_sample_24bits), LowestSample<T>());
if constexpr (std::is_same_v<T, int32_t>) {
// Quantization issue: 2147483392 + (2<<8 - 1) == INT32_MAX
// See comment on HighestSampleExpected above
const int32_t HIGHEST_FROM_24BITS = 2147483392;
ASSERT_EQ(HIGHEST_FROM_24BITS, Int24ToAudioSample<T>(max_sample_24bits));
ASSERT_EQ(Int24ToAudioSample<T>(max_sample_24bits), HIGHEST_FROM_24BITS);
} else {
ASSERT_EQ(HighestSample<T>(), Int24ToAudioSample<T>(max_sample_24bits));
ASSERT_EQ(Int24ToAudioSample<T>(max_sample_24bits), HighestSample<T>());
}
ASSERT_EQ(Bias<T>(), Int24ToAudioSample<T>(silence_24bits));
ASSERT_EQ(Int24ToAudioSample<T>(silence_24bits), Bias<T>());
}
TEST(AudioSampleFormat, Boundaries)
{
TestSampleTypePair<uint8_t, uint8_t>();
TestSampleTypePair<uint8_t, int16_t>();
TestSampleTypePair<uint8_t, int32_t>();
TestSampleTypePair<uint8_t, float>();
TestSampleTypePair<int16_t, uint8_t>();
TestSampleTypePair<int16_t, int16_t>();
TestSampleTypePair<int16_t, int32_t>();
TestSampleTypePair<int16_t, float>();
TestSampleTypePair<int32_t, uint8_t>();
TestSampleTypePair<int32_t, int16_t>();
TestSampleTypePair<int32_t, int32_t>();
TestSampleTypePair<int32_t, float>();
TestSampleTypePair<float, uint8_t>();
TestSampleTypePair<float, int16_t>();
TestSampleTypePair<float, int32_t>();
TestSampleTypePair<float, float>();
// Separately test 24-bit audio stored in 32-bits integers.
TestSampleType24bits<uint8_t>();
TestSampleType24bits<int16_t>();
TestSampleType24bits<int32_t>();
TestSampleType24bits<float>();
}

View File

@ -31,7 +31,7 @@ float GetLowValue<float>() {
template <>
int16_t GetLowValue<short>() {
return INT16_MIN;
return -INT16_MAX;
}
template <>
@ -62,7 +62,7 @@ const T* const* GetPlanarChannelArray(size_t aChannels, size_t aSize) {
for (size_t c = 0; c < aChannels; c++) {
channels[c] = new T[aSize];
for (size_t i = 0; i < aSize; i++) {
channels[c][i] = ConvertAudioSample<T>(1.f / static_cast<float>(c + 1));
channels[c][i] = FloatToAudioSample<T>(1. / (c + 1));
}
}
return channels;
@ -104,7 +104,7 @@ const T* GetInterleavedChannelArray(size_t aChannels, size_t aSize) {
T* samples = new T[sampleCount];
for (size_t i = 0; i < sampleCount; i++) {
uint32_t channel = (i % aChannels) + 1;
samples[i] = ConvertAudioSample<T>(1.f / static_cast<float>(channel));
samples[i] = FloatToAudioSample<T>(1. / channel);
}
return samples;
}
@ -128,9 +128,8 @@ void TestInterleaveAndConvert() {
uint32_t channelIndex = 0;
for (size_t i = 0; i < arraySize * channels; i++) {
ASSERT_TRUE(
FuzzyEqual(dst[i], ConvertAudioSample<DstT>(
1.f / static_cast<float>(channelIndex + 1))));
ASSERT_TRUE(FuzzyEqual(
dst[i], FloatToAudioSample<DstT>(1. / (channelIndex + 1))));
channelIndex++;
channelIndex %= channels;
}
@ -152,9 +151,8 @@ void TestDeinterleaveAndConvert() {
for (size_t channel = 0; channel < channels; channel++) {
for (size_t i = 0; i < arraySize; i++) {
ASSERT_TRUE(FuzzyEqual(
dst[channel][i],
ConvertAudioSample<DstT>(1.f / static_cast<float>(channel + 1))));
ASSERT_TRUE(FuzzyEqual(dst[channel][i],
FloatToAudioSample<DstT>(1. / (channel + 1))));
}
}

View File

@ -1462,7 +1462,7 @@ float rmsf32(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) {
for (uint32_t i = 0; i < aFrames; i++) {
downmixed = 0.;
for (uint32_t j = 0; j < aChannels; j++) {
downmixed += ConvertAudioSample<float>(aSamples[readIdx++]);
downmixed += AudioSampleToFloat(aSamples[readIdx++]);
}
rms += downmixed * downmixed;
}

View File

@ -31,7 +31,6 @@ UNIFIED_SOURCES += [
"TestAudioMixer.cpp",
"TestAudioPacketizer.cpp",
"TestAudioRingBuffer.cpp",
"TestAudioSampleFormat.cpp",
"TestAudioSegment.cpp",
"TestAudioSinkWrapper.cpp",
"TestAudioTrackEncoder.cpp",

View File

@ -4,56 +4,37 @@
#include "Adts.h"
#include "MediaData.h"
#include "PlatformDecoderModule.h"
#include "mozilla/Array.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/Logging.h"
#include "ADTSDemuxer.h"
extern mozilla::LazyLogModule gMediaDemuxerLog;
#define LOG(msg, ...) \
MOZ_LOG(gMediaDemuxerLog, LogLevel::Debug, msg, ##__VA_ARGS__)
#define ADTSLOG(msg, ...) \
DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Debug, msg, ##__VA_ARGS__)
#define ADTSLOGV(msg, ...) \
DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Verbose, msg, ##__VA_ARGS__)
namespace mozilla {
namespace ADTS {
static const int kADTSHeaderSize = 7;
constexpr std::array FREQ_LOOKUP{96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350, 0};
int8_t Adts::GetFrequencyIndex(uint32_t aSamplesPerSecond) {
static const uint32_t freq_lookup[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350, 0};
Result<uint8_t, bool> GetFrequencyIndex(uint32_t aSamplesPerSecond) {
auto found =
std::find(FREQ_LOOKUP.begin(), FREQ_LOOKUP.end(), aSamplesPerSecond);
if (found == FREQ_LOOKUP.end()) {
return Err(false);
int8_t i = 0;
while (freq_lookup[i] && aSamplesPerSecond < freq_lookup[i]) {
i++;
}
return std::distance(FREQ_LOOKUP.begin(), found);
if (!freq_lookup[i]) {
return -1;
}
return i;
}
bool ConvertSample(uint16_t aChannelCount, uint8_t aFrequencyIndex,
uint8_t aProfile, MediaRawData* aSample) {
bool Adts::ConvertSample(uint16_t aChannelCount, int8_t aFrequencyIndex,
int8_t aProfile, MediaRawData* aSample) {
size_t newSize = aSample->Size() + kADTSHeaderSize;
MOZ_LOG(sPDMLog, LogLevel::Debug,
("Converting sample to ADTS format: newSize: %zu, ch: %u, "
"profile: %u, freq index: %d",
newSize, aChannelCount, aProfile, aFrequencyIndex));
// ADTS header uses 13 bits for packet size.
if (newSize >= (1 << 13) || aChannelCount > 15 || aProfile < 1 ||
aProfile > 4 || aFrequencyIndex >= FREQ_LOOKUP.size()) {
MOZ_LOG(sPDMLog, LogLevel::Debug,
("Couldn't convert sample to ADTS format: newSize: %zu, ch: %u, "
"profile: %u, freq index: %d",
newSize, aChannelCount, aProfile, aFrequencyIndex));
if (newSize >= (1 << 13) || aChannelCount > 15 || aFrequencyIndex < 0 ||
aProfile < 1 || aProfile > 4) {
return false;
}
@ -85,36 +66,7 @@ bool ConvertSample(uint16_t aChannelCount, uint8_t aFrequencyIndex,
return true;
}
bool StripHeader(MediaRawData* aSample) {
if (aSample->Size() < kADTSHeaderSize) {
return false;
}
FrameHeader header;
auto data = Span{aSample->Data(), aSample->Size()};
MOZ_ASSERT(FrameHeader::MatchesSync(data),
"Don't attempt to strip the ADTS header of a raw AAC packet.");
bool crcPresent = header.mHaveCrc;
LOG(("Stripping ADTS, crc %spresent", crcPresent ? "" : "not "));
size_t toStrip = crcPresent ? kADTSHeaderSize + 2 : kADTSHeaderSize;
UniquePtr<MediaRawDataWriter> writer(aSample->CreateWriter());
writer->PopFront(toStrip);
if (aSample->mCrypto.IsEncrypted()) {
if (aSample->mCrypto.mPlainSizes.Length() > 0 &&
writer->mCrypto.mPlainSizes[0] >= kADTSHeaderSize) {
writer->mCrypto.mPlainSizes[0] -= kADTSHeaderSize;
}
}
return true;
}
bool RevertSample(MediaRawData* aSample) {
bool Adts::RevertSample(MediaRawData* aSample) {
if (aSample->Size() < kADTSHeaderSize) {
return false;
}
@ -139,156 +91,4 @@ bool RevertSample(MediaRawData* aSample) {
return true;
}
bool FrameHeader::MatchesSync(const Span<const uint8_t>& aData) {
return aData.Length() >= 2 && aData[0] == 0xFF && (aData[1] & 0xF6) == 0xF0;
}
FrameHeader::FrameHeader() { Reset(); }
// Header size
uint64_t FrameHeader::HeaderSize() const { return (mHaveCrc) ? 9 : 7; }
bool FrameHeader::IsValid() const { return mFrameLength > 0; }
// Resets the state to allow for a new parsing session.
void FrameHeader::Reset() { PodZero(this); }
// Returns whether the byte creates a valid sequence up to this point.
bool FrameHeader::Parse(const Span<const uint8_t>& aData) {
if (!MatchesSync(aData)) {
return false;
}
// AAC has 1024 samples per frame per channel.
mSamples = 1024;
mHaveCrc = !(aData[1] & 0x01);
mObjectType = ((aData[2] & 0xC0) >> 6) + 1;
mSamplingIndex = (aData[2] & 0x3C) >> 2;
mChannelConfig = (aData[2] & 0x01) << 2 | (aData[3] & 0xC0) >> 6;
mFrameLength =
static_cast<uint32_t>((aData[3] & 0x03) << 11 | (aData[4] & 0xFF) << 3 |
(aData[5] & 0xE0) >> 5);
mNumAACFrames = (aData[6] & 0x03) + 1;
static const uint32_t SAMPLE_RATES[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350};
if (mSamplingIndex >= ArrayLength(SAMPLE_RATES)) {
LOG(("ADTS: Init() failure: invalid sample-rate index value: %" PRIu32 ".",
mSamplingIndex));
// This marks the header as invalid.
mFrameLength = 0;
return false;
}
mSampleRate = SAMPLE_RATES[mSamplingIndex];
MOZ_ASSERT(mChannelConfig < 8);
mChannels = (mChannelConfig == 7) ? 8 : mChannelConfig;
return true;
}
Frame::Frame() : mOffset(0), mHeader() {}
uint64_t Frame::Offset() const { return mOffset; }
size_t Frame::Length() const {
// TODO: If fields are zero'd when invalid, this check wouldn't be
// necessary.
if (!mHeader.IsValid()) {
return 0;
}
return mHeader.mFrameLength;
}
// Returns the offset to the start of frame's raw data.
uint64_t Frame::PayloadOffset() const { return mOffset + mHeader.HeaderSize(); }
// Returns the length of the frame's raw data (excluding the header) in bytes.
size_t Frame::PayloadLength() const {
// TODO: If fields are zero'd when invalid, this check wouldn't be
// necessary.
if (!mHeader.IsValid()) {
return 0;
}
return mHeader.mFrameLength - mHeader.HeaderSize();
}
// Returns the parsed frame header.
const FrameHeader& Frame::Header() const { return mHeader; }
bool Frame::IsValid() const { return mHeader.IsValid(); }
// Resets the frame header and data.
void Frame::Reset() {
mHeader.Reset();
mOffset = 0;
}
// Returns whether the valid
bool Frame::Parse(uint64_t aOffset, const uint8_t* aStart,
const uint8_t* aEnd) {
MOZ_ASSERT(aStart && aEnd && aStart <= aEnd);
bool found = false;
const uint8_t* ptr = aStart;
// Require at least 7 bytes of data at the end of the buffer for the minimum
// ADTS frame header.
while (ptr < aEnd - 7 && !found) {
found = mHeader.Parse(Span(ptr, aEnd));
ptr++;
}
mOffset = aOffset + (static_cast<size_t>(ptr - aStart)) - 1u;
return found;
}
const Frame& FrameParser::CurrentFrame() { return mFrame; }
const Frame& FrameParser::FirstFrame() const { return mFirstFrame; }
void FrameParser::Reset() {
EndFrameSession();
mFirstFrame.Reset();
}
void FrameParser::EndFrameSession() { mFrame.Reset(); }
bool FrameParser::Parse(uint64_t aOffset, const uint8_t* aStart,
const uint8_t* aEnd) {
const bool found = mFrame.Parse(aOffset, aStart, aEnd);
if (mFrame.Length() && !mFirstFrame.Length()) {
mFirstFrame = mFrame;
}
return found;
}
// Initialize the AAC AudioSpecificConfig.
// Only handles two-byte version for AAC-LC.
void InitAudioSpecificConfig(const ADTS::Frame& frame,
MediaByteBuffer* aBuffer) {
const ADTS::FrameHeader& header = frame.Header();
MOZ_ASSERT(header.IsValid());
int audioObjectType = header.mObjectType;
int samplingFrequencyIndex = header.mSamplingIndex;
int channelConfig = header.mChannelConfig;
uint8_t asc[2];
asc[0] = (audioObjectType & 0x1F) << 3 | (samplingFrequencyIndex & 0x0E) >> 1;
asc[1] = (samplingFrequencyIndex & 0x01) << 7 | (channelConfig & 0x0F) << 3;
aBuffer->AppendElements(asc, 2);
}
}; // namespace ADTS
}; // namespace mozilla
#undef LOG
#undef ADTSLOG
#undef ADTSLOGV
} // namespace mozilla

View File

@ -6,124 +6,17 @@
#define ADTS_H_
#include <stdint.h>
#include "MediaData.h"
#include "mozilla/Result.h"
namespace mozilla {
class MediaRawData;
namespace ADTS {
// adts::FrameHeader - Holds the ADTS frame header and its parsing
// state.
//
// ADTS Frame Structure
//
// 11111111 1111BCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP(QQQQQQQQ
// QQQQQQQQ)
//
// Header consists of 7 or 9 bytes(without or with CRC).
// Letter Length(bits) Description
// { sync } 12 syncword 0xFFF, all bits must be 1
// B 1 MPEG Version: 0 for MPEG-4, 1 for MPEG-2
// C 2 Layer: always 0
// D 1 protection absent, Warning, set to 1 if there is no
// CRC and 0 if there is CRC
// E 2 profile, the MPEG-4 Audio Object Type minus 1
// F 4 MPEG-4 Sampling Frequency Index (15 is forbidden)
// H 3 MPEG-4 Channel Configuration (in the case of 0, the
// channel configuration is sent via an in-band PCE)
// M 13 frame length, this value must include 7 or 9 bytes of
// header length: FrameLength =
// (ProtectionAbsent == 1 ? 7 : 9) + size(AACFrame)
// O 11 Buffer fullness
// P 2 Number of AAC frames(RDBs) in ADTS frame minus 1, for
// maximum compatibility always use 1 AAC frame per ADTS
// frame
// Q 16 CRC if protection absent is 0
class FrameHeader {
class Adts {
public:
uint32_t mFrameLength{};
uint32_t mSampleRate{};
uint32_t mSamples{};
uint32_t mChannels{};
uint8_t mObjectType{};
uint8_t mSamplingIndex{};
uint8_t mChannelConfig{};
uint8_t mNumAACFrames{};
bool mHaveCrc{};
// Returns whether aPtr matches a valid ADTS header sync marker
static bool MatchesSync(const Span<const uint8_t>& aData);
FrameHeader();
// Header size
uint64_t HeaderSize() const;
bool IsValid() const;
// Resets the state to allow for a new parsing session.
void Reset();
// Returns whether the byte creates a valid sequence up to this point.
bool Parse(const Span<const uint8_t>& aData);
static int8_t GetFrequencyIndex(uint32_t aSamplesPerSecond);
static bool ConvertSample(uint16_t aChannelCount, int8_t aFrequencyIndex,
int8_t aProfile, mozilla::MediaRawData* aSample);
static bool RevertSample(MediaRawData* aSample);
};
class Frame {
public:
Frame();
uint64_t Offset() const;
size_t Length() const;
// Returns the offset to the start of frame's raw data.
uint64_t PayloadOffset() const;
size_t PayloadLength() const;
// Returns the parsed frame header.
const FrameHeader& Header() const;
bool IsValid() const;
// Resets the frame header and data.
void Reset();
// Returns whether the valid
bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd);
private:
// The offset to the start of the header.
uint64_t mOffset;
// The currently parsed frame header.
FrameHeader mHeader;
};
class FrameParser {
public:
// Returns the currently parsed frame. Reset via Reset or EndFrameSession.
const Frame& CurrentFrame();
// Returns the first parsed frame. Reset via Reset.
const Frame& FirstFrame() const;
// Resets the parser. Don't use between frames as first frame data is reset.
void Reset();
// Clear the last parsed frame to allow for next frame parsing, i.e.:
// - sets PrevFrame to CurrentFrame
// - resets the CurrentFrame
// - resets ID3Header if no valid header was parsed yet
void EndFrameSession();
// Parses contents of given ByteReader for a valid frame header and returns
// true if one was found. After returning, the variable passed to
// 'aBytesToSkip' holds the amount of bytes to be skipped (if any) in order to
// jump across a large ID3v2 tag spanning multiple buffers.
bool Parse(uint64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd);
private:
// We keep the first parsed frame around for static info access, the
// previously parsed frame for debugging and the currently parsed frame.
Frame mFirstFrame;
Frame mFrame;
};
// Extract the audiospecificconfig from an ADTS header
void InitAudioSpecificConfig(const Frame& aFrame, MediaByteBuffer* aBuffer);
bool StripHeader(MediaRawData* aSample);
Result<uint8_t, bool> GetFrequencyIndex(uint32_t aSamplesPerSecond);
bool ConvertSample(uint16_t aChannelCount, uint8_t aFrequencyIndex,
uint8_t aProfile, mozilla::MediaRawData* aSample);
bool RevertSample(MediaRawData* aSample);
} // namespace ADTS
} // namespace mozilla
#endif

View File

@ -28,7 +28,7 @@
namespace mozilla {
using DecryptPromiseRequestHolder = MozPromiseRequestHolder<DecryptPromise>;
typedef MozPromiseRequestHolder<DecryptPromise> DecryptPromiseRequestHolder;
DDLoggedTypeDeclNameAndBase(EMEDecryptor, MediaDataDecoder);
@ -45,7 +45,7 @@ class ADTSSampleConverter {
// doesn't care what is set.
,
mProfile(aInfo.mProfile < 1 || aInfo.mProfile > 4 ? 2 : aInfo.mProfile),
mFrequencyIndex(ADTS::GetFrequencyIndex(aInfo.mRate).unwrapOr(255)) {
mFrequencyIndex(Adts::GetFrequencyIndex(aInfo.mRate)) {
EME_LOG("ADTSSampleConvertor(): aInfo.mProfile=%" PRIi8
" aInfo.mExtendedProfile=%" PRIi8,
aInfo.mProfile, aInfo.mExtendedProfile);
@ -56,17 +56,17 @@ class ADTSSampleConverter {
}
}
bool Convert(MediaRawData* aSample) const {
return ADTS::ConvertSample(mNumChannels, mFrequencyIndex, mProfile,
return Adts::ConvertSample(mNumChannels, mFrequencyIndex, mProfile,
aSample);
}
bool Revert(MediaRawData* aSample) const {
return ADTS::RevertSample(aSample);
return Adts::RevertSample(aSample);
}
private:
const uint32_t mNumChannels;
const uint8_t mProfile;
const uint8_t mFrequencyIndex{};
const uint8_t mFrequencyIndex;
};
class EMEDecryptor final : public MediaDataDecoder,
@ -124,7 +124,7 @@ class EMEDecryptor final : public MediaDataDecoder,
mThroughputLimiter->Throttle(aSample)
->Then(
mThread, __func__,
[self](const RefPtr<MediaRawData>& aSample) {
[self](RefPtr<MediaRawData> aSample) {
self->mThrottleRequest.Complete();
self->AttemptDecode(aSample);
},
@ -223,7 +223,7 @@ class EMEDecryptor final : public MediaDataDecoder,
mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
mThroughputLimiter->Flush();
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
auto* holder = iter.UserData();
auto holder = iter.UserData();
holder->DisconnectIfExists();
iter.Remove();
}
@ -240,7 +240,7 @@ class EMEDecryptor final : public MediaDataDecoder,
MOZ_ASSERT(mDecodePromise.IsEmpty() && !mDecodeRequest.Exists(),
"Must wait for decoding to complete");
for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
auto* holder = iter.UserData();
auto holder = iter.UserData();
holder->DisconnectIfExists();
iter.Remove();
}
@ -323,7 +323,7 @@ RefPtr<MediaDataDecoder::DecodePromise> EMEMediaDataDecoderProxy::Decode(
mSamplesWaitingForKey->WaitIfKeyNotUsable(sample)
->Then(
mThread, __func__,
[self, this](const RefPtr<MediaRawData>& aSample) {
[self, this](RefPtr<MediaRawData> aSample) {
mKeyRequest.Complete();
MediaDataDecoderProxy::Decode(aSample)

View File

@ -14,9 +14,6 @@
#include "mozilla/SyncRunnable.h"
#include "mozilla/UniquePtr.h"
#include "nsTArray.h"
#include "ADTSDemuxer.h"
#include <array>
#define LOG(...) DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
#define LOGEX(_this, ...) \
@ -65,7 +62,6 @@ AppleATDecoder::~AppleATDecoder() {
RefPtr<MediaDataDecoder::InitPromise> AppleATDecoder::Init() {
if (!mFormatID) {
LOG("AppleATDecoder::Init failure: unknown format ID");
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Non recognised format")),
@ -89,7 +85,6 @@ RefPtr<MediaDataDecoder::FlushPromise> AppleATDecoder::Flush() {
}
}
if (mErrored) {
LOG("Flush error");
mParsedFramesForAACMagicCookie = 0;
mMagicCookie.Clear();
ProcessShutdown();
@ -193,28 +188,18 @@ RefPtr<MediaDataDecoder::DecodePromise> AppleATDecoder::Decode(
MediaResult rv = NS_OK;
if (!mConverter) {
LOG("Lazily initing the decoder");
rv = SetupDecoder(aSample);
if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
LOG("Decoder not initialized");
return DecodePromise::CreateAndReject(rv, __func__);
}
}
if (mIsADTS) {
bool rv = ADTS::StripHeader(aSample);
if (!rv) {
LOG("Stripping the ADTS header in AppleATDecoder failed");
}
}
mQueuedSamples.AppendElement(aSample);
if (rv == NS_OK) {
for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
rv = DecodeSample(mQueuedSamples[i]);
if (NS_FAILED(rv)) {
LOG("Decoding error");
mErrored = true;
return DecodePromise::CreateAndReject(rv, __func__);
}
@ -292,7 +277,7 @@ MediaResult AppleATDecoder::DecodeSample(MediaRawData* aSample) {
}
size_t numFrames = outputData.Length() / channels;
int rate = AssertedCast<int>(mOutputFormat.mSampleRate);
int rate = mOutputFormat.mSampleRate;
media::TimeUnit duration(numFrames, rate);
if (!duration.IsValid()) {
NS_WARNING("Invalid count of accumulated audio samples");
@ -355,8 +340,8 @@ MediaResult AppleATDecoder::GetInputAudioDescription(
aDesc.mChannelsPerFrame = mConfig.mChannels;
aDesc.mSampleRate = mConfig.mRate;
UInt32 inputFormatSize = sizeof(aDesc);
OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0,
nullptr, &inputFormatSize, &aDesc);
OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL,
&inputFormatSize, &aDesc);
if (NS_WARN_IF(rv)) {
return MediaResult(
NS_ERROR_FAILURE,
@ -434,7 +419,7 @@ nsresult AppleATDecoder::SetupChannelLayout() {
UInt32 propertySize;
UInt32 size;
OSStatus status = AudioConverterGetPropertyInfo(
mConverter, kAudioConverterOutputChannelLayout, &propertySize, nullptr);
mConverter, kAudioConverterOutputChannelLayout, &propertySize, NULL);
if (status || !propertySize) {
LOG("Couldn't get channel layout property (%s)", FourCC2Str(status));
return NS_ERROR_FAILURE;
@ -519,36 +504,15 @@ MediaResult AppleATDecoder::SetupDecoder(MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread());
static const uint32_t MAX_FRAMES = 2;
bool isADTS =
ADTS::FrameHeader::MatchesSync(Span{aSample->Data(), aSample->Size()});
if (isADTS) {
ADTS::FrameParser parser;
if(!parser.Parse(0, aSample->Data(), aSample->Data() + aSample->Size()) {
LOG("ADTS frame parsing error");
return NS_ERROR_NOT_INITIALIZED;
}
AudioCodecSpecificBinaryBlob blob;
ADTS::InitAudioSpecificConfig(parser.FirstFrame(), blob.mBinaryBlob);
mConfig.mCodecSpecificConfig = AudioCodecSpecificVariant{std::move(blob)};
mConfig.mProfile = mConfig.mExtendedProfile =
parser.FirstFrame().Header().mObjectType;
mIsADTS = true;
}
if (mFormatID == kAudioFormatMPEG4AAC && mConfig.mExtendedProfile == 2 &&
mParsedFramesForAACMagicCookie < MAX_FRAMES) {
LOG("Attempting to get implicit AAC magic cookie");
// Check for implicit SBR signalling if stream is AAC-LC
// This will provide us with an updated magic cookie for use with
// GetInputAudioDescription.
if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
!mMagicCookie.Length() && !isADTS) {
!mMagicCookie.Length()) {
// nothing found yet, will try again later
LOG("Getting implicit AAC magic cookie failed");
mParsedFramesForAACMagicCookie++;
LOG("Not initialized -- need magic cookie");
return NS_ERROR_NOT_INITIALIZED;
}
// An error occurred, fallback to using default stream description
@ -574,7 +538,6 @@ MediaResult AppleATDecoder::SetupDecoder(MediaRawData* aSample) {
MediaResult rv = GetInputAudioDescription(inputFormat, magicCookie);
if (NS_FAILED(rv)) {
LOG("GetInputAudioDescription failure");
return rv;
}
// Fill in the output format manually.
@ -654,41 +617,28 @@ static void _SampleCallback(void* aSBR, UInt32 aNumBytes, UInt32 aNumPackets,
const void* aData,
AudioStreamPacketDescription* aPackets) {}
nsresult AppleATDecoder::GetImplicitAACMagicCookie(MediaRawData* aSample) {
nsresult AppleATDecoder::GetImplicitAACMagicCookie(
const MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread());
bool isADTS =
ADTS::FrameHeader::MatchesSync(Span{aSample->Data(), aSample->Size()});
// Prepend ADTS header to AAC audio.
RefPtr<MediaRawData> adtssample(aSample->Clone());
if (!adtssample) {
return NS_ERROR_OUT_OF_MEMORY;
}
int8_t frequency_index = Adts::GetFrequencyIndex(mConfig.mRate);
RefPtr<MediaRawData> adtssample = aSample;
if (!isADTS) {
// Prepend ADTS header to AAC audio.
adtssample = aSample->Clone();
if (!adtssample) {
return NS_ERROR_OUT_OF_MEMORY;
}
auto frequency_index = ADTS::GetFrequencyIndex(mConfig.mRate);
if (frequency_index.isErr()) {
LOG("%d isn't a valid rate for AAC", mConfig.mRate);
return NS_ERROR_FAILURE;
}
// Arbitrarily pick main profile if not specified
int profile = mConfig.mProfile ? mConfig.mProfile : 1;
bool rv = ADTS::ConvertSample(mConfig.mChannels, frequency_index.unwrap(),
profile, adtssample);
if (!rv) {
LOG("Failed to apply ADTS header");
return NS_ERROR_FAILURE;
}
bool rv = Adts::ConvertSample(mConfig.mChannels, frequency_index,
mConfig.mProfile, adtssample);
if (!rv) {
NS_WARNING("Failed to apply ADTS header");
return NS_ERROR_FAILURE;
}
if (!mStream) {
OSStatus rv = AudioFileStreamOpen(this, _MetadataCallback, _SampleCallback,
kAudioFileAAC_ADTSType, &mStream);
if (rv) {
LOG("Couldn't open AudioFileStream");
NS_WARNING("Couldn't open AudioFileStream");
return NS_ERROR_FAILURE;
}
}
@ -696,7 +646,7 @@ nsresult AppleATDecoder::GetImplicitAACMagicCookie(MediaRawData* aSample) {
OSStatus status = AudioFileStreamParseBytes(
mStream, adtssample->Size(), adtssample->Data(), 0 /* discontinuity */);
if (status) {
LOG("Couldn't parse sample");
NS_WARNING("Couldn't parse sample");
}
if (status || mFileStreamError || mMagicCookie.Length()) {

View File

@ -38,7 +38,7 @@ class AppleATDecoder final : public MediaDataDecoder,
nsCString GetCodecName() const override;
// Callbacks also need access to the config.
AudioInfo mConfig;
const AudioInfo mConfig;
// Use to extract magic cookie for HE-AAC detection.
nsTArray<uint8_t> mMagicCookie;
@ -67,12 +67,11 @@ class AppleATDecoder final : public MediaDataDecoder,
// Setup AudioConverter once all information required has been gathered.
// Will return NS_ERROR_NOT_INITIALIZED if more data is required.
MediaResult SetupDecoder(MediaRawData* aSample);
nsresult GetImplicitAACMagicCookie(MediaRawData* aSample);
nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample);
nsresult SetupChannelLayout();
uint32_t mParsedFramesForAACMagicCookie;
uint32_t mEncoderDelay = 0;
uint64_t mTotalMediaFrames = 0;
bool mIsADTS = false;
bool mErrored;
};

View File

@ -164,7 +164,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(*data++);
*tmp++ = AudioSampleToFloat(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
@ -174,7 +174,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(data[channel][frame]);
*tmp++ = AudioSampleToFloat(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32) {
@ -183,7 +183,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int32_t* data = reinterpret_cast<int32_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(*data++);
*tmp++ = AudioSampleToFloat(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32P) {
@ -193,7 +193,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
int32_t** data = reinterpret_cast<int32_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(data[channel][frame]);
*tmp++ = AudioSampleToFloat(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8) {
@ -202,7 +202,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
uint8_t* data = reinterpret_cast<uint8_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(*data++);
*tmp++ = UInt8bitToAudioSample<AudioDataValue>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8P) {
@ -212,7 +212,7 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
uint8_t** data = reinterpret_cast<uint8_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = ConvertAudioSample<float>(data[channel][frame]);
*tmp++ = UInt8bitToAudioSample<AudioDataValue>(data[channel][frame]);
}
}
}

View File

@ -55,9 +55,6 @@ WMFAudioMFTManager::WMFAudioMFTManager(const AudioInfo& aConfig)
audioSpecConfig = audioCodecSpecificBinaryBlob->Elements();
configLength = audioCodecSpecificBinaryBlob->Length();
}
// If no extradata has been provided, assume this is ADTS. Otherwise,
// assume raw AAC packets.
mIsADTS = !configLength;
AACAudioSpecificConfigToUserData(aConfig.mExtendedProfile, audioSpecConfig,
configLength, mUserData);
}
@ -107,8 +104,7 @@ bool WMFAudioMFTManager::Init() {
NS_ENSURE_TRUE(SUCCEEDED(hr), false);
if (mStreamType == WMFStreamType::AAC) {
UINT32 payloadType = mIsADTS ? 1 : 0;
hr = inputType->SetUINT32(MF_MT_AAC_PAYLOAD_TYPE, payloadType);
hr = inputType->SetUINT32(MF_MT_AAC_PAYLOAD_TYPE, 0x0); // Raw AAC packet
NS_ENSURE_TRUE(SUCCEEDED(hr), false);
hr = inputType->SetBlob(MF_MT_USER_DATA, mUserData.Elements(),
@ -148,8 +144,7 @@ WMFAudioMFTManager::Input(MediaRawData* aSample) {
nsCString WMFAudioMFTManager::GetCodecName() const {
if (mStreamType == WMFStreamType::AAC) {
return "aac"_ns;
}
if (mStreamType == WMFStreamType::MP3) {
} else if (mStreamType == WMFStreamType::MP3) {
return "mp3"_ns;
}
return "unknown"_ns;
@ -182,8 +177,8 @@ WMFAudioMFTManager::UpdateOutputType() {
}
HRESULT
WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) {
aOutput = nullptr;
WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutData) {
aOutData = nullptr;
RefPtr<IMFSample> sample;
HRESULT hr;
int typeChangeCount = 0;
@ -247,8 +242,8 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) {
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
// Output is made of floats.
uint32_t numSamples = currentLength / sizeof(float);
uint32_t numFrames = numSamples / mAudioChannels;
int32_t numSamples = currentLength / sizeof(float);
int32_t numFrames = numSamples / mAudioChannels;
MOZ_ASSERT(numFrames >= 0);
MOZ_ASSERT(numSamples >= 0);
if (numFrames == 0) {
@ -280,10 +275,10 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) {
return MF_E_TRANSFORM_NEED_MORE_INPUT;
}
aOutput = new AudioData(aStreamOffset, pts, std::move(audioData),
mAudioChannels, mAudioRate, mChannelsMap);
MOZ_DIAGNOSTIC_ASSERT(duration == aOutput->mDuration, "must be equal");
mLastOutputDuration = aOutput->mDuration;
aOutData = new AudioData(aStreamOffset, pts, std::move(audioData),
mAudioChannels, mAudioRate, mChannelsMap);
MOZ_DIAGNOSTIC_ASSERT(duration == aOutData->mDuration, "must be equal");
mLastOutputDuration = aOutData->mDuration;
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

View File

@ -58,7 +58,6 @@ class WMFAudioMFTManager : public MFTManager {
media::TimeUnit mLastOutputDuration = media::TimeUnit::Zero();
bool mFirstFrame = true;
bool mIsADTS = false;
uint64_t mTotalMediaFrames = 0;
uint32_t mEncoderDelay = 0;

View File

@ -177,8 +177,7 @@ Maybe<gfx::YUVColorSpace> GetYUVColorSpace(IMFMediaType* aType) {
}
int32_t MFOffsetToInt32(const MFOffset& aOffset) {
return AssertedCast<int32_t>(AssertedCast<float>(aOffset.value) +
(AssertedCast<float>(aOffset.fract) / 65536.0f));
return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
}
TimeUnit GetSampleDuration(IMFSample* aSample) {
@ -205,7 +204,7 @@ GetPictureRegion(IMFMediaType* aMediaType, gfx::IntRect& aOutPictureRegion) {
// Determine if "pan and scan" is enabled for this media. If it is, we
// only display a region of the video frame, not the entire frame.
BOOL panScan =
!!MFGetAttributeUINT32(aMediaType, MF_MT_PAN_SCAN_ENABLED, FALSE);
MFGetAttributeUINT32(aMediaType, MF_MT_PAN_SCAN_ENABLED, FALSE);
// If pan and scan mode is enabled. Try to get the display region.
HRESULT hr = E_FAIL;
@ -301,14 +300,11 @@ const char* MFTMessageTypeToStr(MFT_MESSAGE_TYPE aMsg) {
GUID AudioMimeTypeToMediaFoundationSubtype(const nsACString& aMimeType) {
if (aMimeType.EqualsLiteral("audio/mpeg")) {
return MFAudioFormat_MP3;
}
if (MP4Decoder::IsAAC(aMimeType)) {
} else if (MP4Decoder::IsAAC(aMimeType)) {
return MFAudioFormat_AAC;
}
if (aMimeType.EqualsLiteral("audio/vorbis")) {
} else if (aMimeType.EqualsLiteral("audio/vorbis")) {
return MFAudioFormat_Vorbis;
}
if (aMimeType.EqualsLiteral("audio/opus")) {
} else if (aMimeType.EqualsLiteral("audio/opus")) {
return MFAudioFormat_Opus;
}
NS_WARNING("Unsupport audio mimetype");
@ -318,19 +314,17 @@ GUID AudioMimeTypeToMediaFoundationSubtype(const nsACString& aMimeType) {
GUID VideoMimeTypeToMediaFoundationSubtype(const nsACString& aMimeType) {
if (MP4Decoder::IsH264(aMimeType)) {
return MFVideoFormat_H264;
}
if (VPXDecoder::IsVP8(aMimeType)) {
} else if (VPXDecoder::IsVP8(aMimeType)) {
return MFVideoFormat_VP80;
}
if (VPXDecoder::IsVP9(aMimeType)) {
} else if (VPXDecoder::IsVP9(aMimeType)) {
return MFVideoFormat_VP90;
}
#ifdef MOZ_AV1
if (AOMDecoder::IsAV1(aMimeType)) {
else if (AOMDecoder::IsAV1(aMimeType)) {
return MFVideoFormat_AV1;
}
#endif
if (MP4Decoder::IsHEVC(aMimeType)) {
else if (MP4Decoder::IsHEVC(aMimeType)) {
return MFVideoFormat_HEVC;
}
NS_WARNING("Unsupport video mimetype");
@ -374,9 +368,7 @@ void AACAudioSpecificConfigToUserData(uint8_t aAACProfileLevelIndication,
// the rest can be all 0x00.
BYTE heeInfo[heeInfoLen] = {0};
WORD* w = (WORD*)heeInfo;
// If extradata has been provided, assume raw AAC packets (0). Otherwise,
// assume ADTS (1)
w[0] = aConfigLength ? 0 : 1;
w[0] = 0x0; // Payload type raw AAC packet
w[1] = aAACProfileLevelIndication;
aOutUserData.AppendElements(heeInfo, heeInfoLen);
@ -385,10 +377,10 @@ void AACAudioSpecificConfigToUserData(uint8_t aAACProfileLevelIndication,
// The AudioSpecificConfig is TTTTTFFF|FCCCCGGG
// (T=ObjectType, F=Frequency, C=Channel, G=GASpecificConfig)
// If frequency = 0xf, then the frequency is explicitly defined on 24 bits.
uint8_t frequency =
int8_t frequency =
(aAudioSpecConfig[0] & 0x7) << 1 | (aAudioSpecConfig[1] & 0x80) >> 7;
uint8_t channels = (aAudioSpecConfig[1] & 0x78) >> 3;
uint8_t gasc = aAudioSpecConfig[1] & 0x7;
int8_t channels = (aAudioSpecConfig[1] & 0x78) >> 3;
int8_t gasc = aAudioSpecConfig[1] & 0x7;
if (frequency != 0xf && channels && !gasc) {
// We enter this condition if the AudioSpecificConfig should theorically
// be 2 bytes long but it's not.

View File

@ -1,710 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/Assertions.h"
#include "mozilla/Logging.h"
#include "mozilla/dom/AudioData.h"
#include "mozilla/dom/AudioDataBinding.h"
#include "mozilla/dom/Promise.h"
#include "mozilla/dom/StructuredCloneTags.h"
#include "nsStringFwd.h"
#include <utility>
#include "AudioSampleFormat.h"
#include "WebCodecsUtils.h"
#include "js/StructuredClone.h"
#include "mozilla/Maybe.h"
#include "mozilla/Result.h"
extern mozilla::LazyLogModule gWebCodecsLog;
namespace mozilla::dom {
#ifdef LOG_INTERNAL
# undef LOG_INTERNAL
#endif // LOG_INTERNAL
#define LOG_INTERNAL(level, msg, ...) \
MOZ_LOG(gWebCodecsLog, LogLevel::level, (msg, ##__VA_ARGS__))
#ifdef LOGD
# undef LOGD
#endif // LOGD
#define LOGD(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__)
#ifdef LOGE
# undef LOGE
#endif // LOGE
#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__)
// Only needed for refcounted objects.
//
NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(AudioData)
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioData)
tmp->CloseIfNeeded();
NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent)
NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioData)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTING_ADDREF(AudioData)
// AudioData should be released as soon as its refcount drops to zero,
// without waiting for async deletion by the cycle collector, since it may hold
// a large-size PCM buffer.
NS_IMPL_CYCLE_COLLECTING_RELEASE_WITH_LAST_RELEASE(AudioData, CloseIfNeeded())
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioData)
NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
NS_INTERFACE_MAP_ENTRY(nsISupports)
NS_INTERFACE_MAP_END
/*
* W3C Webcodecs AudioData implementation
*/
AudioData::AudioData(nsIGlobalObject* aParent,
const AudioDataSerializedData& aData)
: mParent(aParent),
mTimestamp(aData.mTimestamp),
mNumberOfChannels(aData.mNumberOfChannels),
mNumberOfFrames(aData.mNumberOfFrames),
mSampleRate(aData.mSampleRate),
mAudioSampleFormat(aData.mAudioSampleFormat),
// The resource is not copied, but referenced
mResource(aData.mResource) {
MOZ_ASSERT(mParent);
MOZ_ASSERT(mResource,
"Resource should always be present then receiving a transfer.");
}
AudioData::AudioData(const AudioData& aOther)
: mParent(aOther.mParent),
mTimestamp(aOther.mTimestamp),
mNumberOfChannels(aOther.mNumberOfChannels),
mNumberOfFrames(aOther.mNumberOfFrames),
mSampleRate(aOther.mSampleRate),
mAudioSampleFormat(aOther.mAudioSampleFormat),
// The resource is not copied, but referenced
mResource(aOther.mResource) {
MOZ_ASSERT(mParent);
}
Result<already_AddRefed<AudioDataResource>, nsresult>
AudioDataResource::Construct(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aInit) {
FallibleTArray<uint8_t> copied;
uint8_t* rv = ProcessTypedArraysFixed(
aInit, [&](const Span<uint8_t>& aData) -> uint8_t* {
return copied.AppendElements(aData.Elements(), aData.Length(),
fallible);
});
if (!rv) {
LOGE("AudioDataResource::Ctor: OOM");
return Err(NS_ERROR_OUT_OF_MEMORY);
}
return MakeAndAddRef<AudioDataResource>(std::move(copied));
}
AudioData::AudioData(
nsIGlobalObject* aParent,
already_AddRefed<mozilla::dom::AudioDataResource> aResource,
const AudioDataInit& aInit)
: mParent(aParent),
mTimestamp(aInit.mTimestamp),
mNumberOfChannels(aInit.mNumberOfChannels),
mNumberOfFrames(aInit.mNumberOfFrames),
mSampleRate(aInit.mSampleRate),
mAudioSampleFormat(Some(aInit.mFormat)),
mResource(std::move(aResource)) {
MOZ_ASSERT(mParent);
}
nsIGlobalObject* AudioData::GetParentObject() const {
AssertIsOnOwningThread();
return mParent.get();
}
JSObject* AudioData::WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) {
AssertIsOnOwningThread();
return AudioData_Binding::Wrap(aCx, this, aGivenProto);
}
uint32_t BytesPerSamples(const mozilla::dom::AudioSampleFormat& aFormat) {
switch (aFormat) {
case AudioSampleFormat::U8:
case AudioSampleFormat::U8_planar:
return sizeof(uint8_t);
case AudioSampleFormat::S16:
case AudioSampleFormat::S16_planar:
return sizeof(int16_t);
case AudioSampleFormat::S32:
case AudioSampleFormat::F32:
case AudioSampleFormat::S32_planar:
case AudioSampleFormat::F32_planar:
return sizeof(float);
}
}
Result<Ok, nsCString> IsValidAudioDataInit(const AudioDataInit& aInit) {
if (aInit.mSampleRate <= 0.0) {
auto msg = nsLiteralCString("sampleRate must be positive");
LOGD("%s", msg.get());
return Err(msg);
}
if (aInit.mNumberOfFrames == 0) {
auto msg = nsLiteralCString("mNumberOfFrames must be positive");
LOGD("%s", msg.get());
return Err(msg);
}
if (aInit.mNumberOfChannels == 0) {
auto msg = nsLiteralCString("mNumberOfChannels must be positive");
LOGD("%s", msg.get());
return Err(msg);
}
uint64_t totalSamples = aInit.mNumberOfFrames * aInit.mNumberOfChannels;
uint32_t bytesPerSamples = BytesPerSamples(aInit.mFormat);
uint64_t totalSize = totalSamples * bytesPerSamples;
uint64_t arraySizeBytes = ProcessTypedArraysFixed(
aInit.mData, [&](const Span<uint8_t>& aData) -> uint64_t {
return aData.LengthBytes();
});
if (arraySizeBytes < totalSize) {
auto msg =
nsPrintfCString("Array of size %" PRIu64
" not big enough, should be at least %" PRIu64 " bytes",
arraySizeBytes, totalSize);
LOGD("%s", msg.get());
return Err(msg);
}
return Ok();
}
const char* FormatToString(AudioSampleFormat aFormat) {
switch (aFormat) {
case AudioSampleFormat::U8:
return "u8";
case AudioSampleFormat::S16:
return "s16";
case AudioSampleFormat::S32:
return "s32";
case AudioSampleFormat::F32:
return "f32";
case AudioSampleFormat::U8_planar:
return "u8-planar";
case AudioSampleFormat::S16_planar:
return "s16-planar";
case AudioSampleFormat::S32_planar:
return "s32-planar";
case AudioSampleFormat::F32_planar:
return "f32-planar";
}
}
/* static */
already_AddRefed<AudioData> AudioData::Constructor(const GlobalObject& aGlobal,
const AudioDataInit& aInit,
ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
LOGD("[%p] AudioData(fmt: %s, rate: %f, ch: %" PRIu32 ", ts: %" PRId64 ")",
global.get(), FormatToString(aInit.mFormat), aInit.mSampleRate,
aInit.mNumberOfChannels, aInit.mTimestamp);
if (!global) {
LOGE("Global unavailable");
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
nsString errorMessage;
auto rv = IsValidAudioDataInit(aInit);
if (rv.isErr()) {
LOGD("AudioData::Constructor failure (IsValidAudioDataInit)");
aRv.ThrowTypeError(rv.inspectErr());
return nullptr;
}
auto resource = AudioDataResource::Construct(aInit.mData);
if (resource.isErr()) {
LOGD("AudioData::Constructor failure (OOM)");
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}
return MakeAndAddRef<mozilla::dom::AudioData>(global, resource.unwrap(),
aInit);
}
// https://w3c.github.io/webcodecs/#dom-audiodata-format
Nullable<mozilla::dom::AudioSampleFormat> AudioData::GetFormat() const {
AssertIsOnOwningThread();
return MaybeToNullable(mAudioSampleFormat);
}
// https://w3c.github.io/webcodecs/#dom-audiodata-samplerate
float AudioData::SampleRate() const {
AssertIsOnOwningThread();
return mSampleRate;
}
// https://w3c.github.io/webcodecs/#dom-audiodata-numberofframes
uint32_t AudioData::NumberOfFrames() const {
AssertIsOnOwningThread();
return mNumberOfFrames;
}
// https://w3c.github.io/webcodecs/#dom-audiodata-numberofchannels
uint32_t AudioData::NumberOfChannels() const {
AssertIsOnOwningThread();
return mNumberOfChannels;
}
// https://w3c.github.io/webcodecs/#dom-audiodata-duration
uint64_t AudioData::Duration() const {
AssertIsOnOwningThread();
// The spec isn't clear in which direction to convert to integer.
// https://github.com/w3c/webcodecs/issues/726
return static_cast<uint64_t>(
static_cast<float>(USECS_PER_S * mNumberOfFrames) / mSampleRate);
}
// https://w3c.github.io/webcodecs/#dom-audiodata-timestamp
int64_t AudioData::Timestamp() const {
AssertIsOnOwningThread();
return mTimestamp;
}
struct CopyToSpec {
CopyToSpec(uint32_t aFrameCount, uint32_t aFrameOffset, uint32_t mPlaneIndex,
AudioSampleFormat aFormat)
: mFrameCount(aFrameCount),
mFrameOffset(aFrameOffset),
mPlaneIndex(mPlaneIndex),
mFormat(aFormat) {}
const uint32_t mFrameCount;
const uint32_t mFrameOffset;
const uint32_t mPlaneIndex;
const AudioSampleFormat mFormat;
};
bool IsInterleaved(const AudioSampleFormat& aFormat) {
switch (aFormat) {
case AudioSampleFormat::U8:
case AudioSampleFormat::S16:
case AudioSampleFormat::S32:
case AudioSampleFormat::F32:
return true;
case AudioSampleFormat::U8_planar:
case AudioSampleFormat::S16_planar:
case AudioSampleFormat::S32_planar:
case AudioSampleFormat::F32_planar:
return false;
};
MOZ_ASSERT_UNREACHABLE("Invalid enum value");
return false;
}
size_t AudioData::ComputeCopyElementCount(
const AudioDataCopyToOptions& aOptions, ErrorResult& aRv) {
// https://w3c.github.io/webcodecs/#compute-copy-element-count
// 1, 2
auto destFormat = mAudioSampleFormat;
if (aOptions.mFormat.WasPassed()) {
destFormat = OptionalToMaybe(aOptions.mFormat);
}
// 3, 4
MOZ_ASSERT(destFormat.isSome());
if (IsInterleaved(destFormat.value())) {
if (aOptions.mPlaneIndex > 0) {
auto msg = "Interleaved format, but plane index > 0"_ns;
LOGD("%s", msg.get());
aRv.ThrowRangeError(msg);
return 0;
}
} else {
if (aOptions.mPlaneIndex >= mNumberOfChannels) {
auto msg = nsPrintfCString(
"Plane index %" PRIu32
" greater or equal than the number of channels %" PRIu32,
aOptions.mPlaneIndex, mNumberOfChannels);
LOGD("%s", msg.get());
aRv.ThrowRangeError(msg);
return 0;
}
}
// 5 -- conversion between all formats supported
// 6 -- all planes have the same number of frames, always
uint64_t frameCount = mNumberOfFrames;
// 7
if (aOptions.mFrameOffset >= frameCount) {
auto msg = nsPrintfCString("Frame offset of %" PRIu32
" greater or equal than frame count %" PRIu64,
aOptions.mFrameOffset, frameCount);
LOGD("%s", msg.get());
aRv.ThrowRangeError(msg);
return 0;
}
// 8, 9
uint64_t copyFrameCount = frameCount - aOptions.mFrameOffset;
if (aOptions.mFrameCount.WasPassed()) {
if (aOptions.mFrameCount.Value() > copyFrameCount) {
auto msg = nsPrintfCString(
"Passed copy frame count of %" PRIu32
" greater than available source frames for copy of %" PRIu64,
aOptions.mFrameCount.Value(), copyFrameCount);
LOGD("%s", msg.get());
aRv.ThrowRangeError(msg);
return 0;
}
copyFrameCount = aOptions.mFrameCount.Value();
}
// 10, 11
uint64_t elementCount = copyFrameCount;
if (IsInterleaved(destFormat.value())) {
elementCount *= mNumberOfChannels;
}
return elementCount;
}
// https://w3c.github.io/webcodecs/#dom-audiodata-allocationsize
// This method returns an int, that can be zero in case of success or error.
// Caller should check aRv to determine success or error.
uint32_t AudioData::AllocationSize(const AudioDataCopyToOptions& aOptions,
ErrorResult& aRv) {
AssertIsOnOwningThread();
if (!mResource) {
auto msg = "allocationSize called on detached AudioData"_ns;
LOGD("%s", msg.get());
aRv.ThrowInvalidStateError(msg);
return 0;
}
size_t copyElementCount = ComputeCopyElementCount(aOptions, aRv);
if (aRv.Failed()) {
LOGD("AudioData::AllocationSize failure");
// ComputeCopyElementCount has set the exception type.
return 0;
}
Maybe<mozilla::dom::AudioSampleFormat> destFormat = mAudioSampleFormat;
if (aOptions.mFormat.WasPassed()) {
destFormat = OptionalToMaybe(aOptions.mFormat);
}
if (destFormat.isNothing()) {
auto msg = "AudioData has an unknown format"_ns;
LOGD("%s", msg.get());
// See https://github.com/w3c/webcodecs/issues/727 -- it isn't clear yet
// what to do here
aRv.ThrowRangeError(msg);
return 0;
}
CheckedInt<size_t> bytesPerSample = BytesPerSamples(destFormat.ref());
auto res = bytesPerSample * copyElementCount;
if (res.isValid()) {
return res.value();
}
aRv.ThrowRangeError("Allocation size too large");
return 0;
}
template <typename S, typename D>
void CopySamples(Span<S> aSource, Span<D> aDest, uint32_t aSourceChannelCount,
const AudioSampleFormat aSourceFormat,
const CopyToSpec& aCopyToSpec) {
if (IsInterleaved(aSourceFormat) && IsInterleaved(aCopyToSpec.mFormat)) {
MOZ_ASSERT(aCopyToSpec.mPlaneIndex == 0);
MOZ_ASSERT(aDest.Length() >= aCopyToSpec.mFrameCount);
MOZ_ASSERT(aSource.Length() - aCopyToSpec.mFrameOffset >=
aCopyToSpec.mFrameCount);
// This turns into a regular memcpy if the types are in fact equal
ConvertAudioSamples(aSource.data() + aCopyToSpec.mFrameOffset, aDest.data(),
aCopyToSpec.mFrameCount * aSourceChannelCount);
return;
}
if (IsInterleaved(aSourceFormat) && !IsInterleaved(aCopyToSpec.mFormat)) {
DebugOnly<size_t> sourceFrameCount = aSource.Length() / aSourceChannelCount;
MOZ_ASSERT(aDest.Length() >= aCopyToSpec.mFrameCount);
MOZ_ASSERT(aSource.Length() - aCopyToSpec.mFrameOffset >=
aCopyToSpec.mFrameCount);
// Interleaved to planar -- only copy samples of the correct channel to the
// destination
size_t readIndex = aCopyToSpec.mFrameOffset * aSourceChannelCount +
aCopyToSpec.mPlaneIndex;
for (size_t i = 0; i < aCopyToSpec.mFrameCount; i++) {
aDest[i] = ConvertAudioSample<D>(aSource[readIndex]);
readIndex += aSourceChannelCount;
}
return;
}
if (!IsInterleaved(aSourceFormat) && IsInterleaved(aCopyToSpec.mFormat)) {
MOZ_CRASH("This should never be hit -- current spec doesn't support it");
// Planar to interleaved -- copy of all channels of the source into the
// destination buffer.
MOZ_ASSERT(aCopyToSpec.mPlaneIndex == 0);
MOZ_ASSERT(aDest.Length() >= aCopyToSpec.mFrameCount * aSourceChannelCount);
MOZ_ASSERT(aSource.Length() -
aCopyToSpec.mFrameOffset * aSourceChannelCount >=
aCopyToSpec.mFrameCount * aSourceChannelCount);
size_t writeIndex = 0;
// Scan the source linearly and put each sample at the right position in the
// destination interleaved buffer.
size_t readIndex = 0;
for (size_t channel = 0; channel < aSourceChannelCount; channel++) {
writeIndex = channel;
for (size_t i = 0; i < aCopyToSpec.mFrameCount; i++) {
aDest[writeIndex] = ConvertAudioSample<D>(aSource[readIndex]);
readIndex++;
writeIndex += aSourceChannelCount;
}
}
return;
}
if (!IsInterleaved(aSourceFormat) && !IsInterleaved(aCopyToSpec.mFormat)) {
// Planar to Planar / convert + copy from the right index in the source.
size_t offset =
aCopyToSpec.mPlaneIndex * aSource.Length() / aSourceChannelCount;
MOZ_ASSERT(aDest.Length() >= aSource.Length() / aSourceChannelCount -
aCopyToSpec.mFrameOffset);
for (uint32_t i = 0; i < aCopyToSpec.mFrameCount; i++) {
aDest[i] =
ConvertAudioSample<D>(aSource[offset + aCopyToSpec.mFrameOffset + i]);
}
}
}
nsCString AudioData::ToString() const {
if (!mResource) {
return nsCString("AudioData[detached]");
}
return nsPrintfCString("AudioData[%zu bytes %s %fHz %" PRIu32 "x%" PRIu32
"ch]",
mResource->Data().LengthBytes(),
FormatToString(mAudioSampleFormat.value()),
mSampleRate, mNumberOfFrames, mNumberOfChannels);
}
nsCString CopyToToString(size_t aDestBufSize,
const AudioDataCopyToOptions& aOptions) {
return nsPrintfCString(
"AudioDataCopyToOptions[data: %zu bytes %s frame count:%" PRIu32
" frame offset: %" PRIu32 " plane: %" PRIu32 "]",
aDestBufSize,
aOptions.mFormat.WasPassed() ? FormatToString(aOptions.mFormat.Value())
: "null",
aOptions.mFrameCount.WasPassed() ? aOptions.mFrameCount.Value() : 0,
aOptions.mFrameOffset, aOptions.mPlaneIndex);
}
using DataSpanType =
Variant<Span<uint8_t>, Span<int16_t>, Span<int32_t>, Span<float>>;
DataSpanType GetDataSpan(Span<uint8_t> aSpan, const AudioSampleFormat aFormat) {
const size_t Length = aSpan.Length() / BytesPerSamples(aFormat);
// TODO: Check size so Span can be reasonably constructed?
switch (aFormat) {
case AudioSampleFormat::U8:
case AudioSampleFormat::U8_planar:
return AsVariant(aSpan);
case AudioSampleFormat::S16:
case AudioSampleFormat::S16_planar:
return AsVariant(Span(reinterpret_cast<int16_t*>(aSpan.data()), Length));
case AudioSampleFormat::S32:
case AudioSampleFormat::S32_planar:
return AsVariant(Span(reinterpret_cast<int32_t*>(aSpan.data()), Length));
case AudioSampleFormat::F32:
case AudioSampleFormat::F32_planar:
return AsVariant(Span(reinterpret_cast<float*>(aSpan.data()), Length));
}
MOZ_ASSERT_UNREACHABLE("Invalid enum value");
return AsVariant(aSpan);
}
void CopySamples(DataSpanType& aSource, DataSpanType& aDest,
uint32_t aSourceChannelCount,
const AudioSampleFormat aSourceFormat,
const CopyToSpec& aCopyToSpec) {
aSource.match([&](auto& src) {
aDest.match([&](auto& dst) {
CopySamples(src, dst, aSourceChannelCount, aSourceFormat, aCopyToSpec);
});
});
}
void DoCopy(Span<uint8_t> aSource, Span<uint8_t> aDest,
const uint32_t aSourceChannelCount,
const AudioSampleFormat aSourceFormat,
const CopyToSpec& aCopyToSpec) {
DataSpanType source = GetDataSpan(aSource, aSourceFormat);
DataSpanType dest = GetDataSpan(aDest, aCopyToSpec.mFormat);
CopySamples(source, dest, aSourceChannelCount, aSourceFormat, aCopyToSpec);
}
// https://w3c.github.io/webcodecs/#dom-audiodata-copyto
void AudioData::CopyTo(
const MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aDestination,
const AudioDataCopyToOptions& aOptions, ErrorResult& aRv) {
AssertIsOnOwningThread();
size_t destLength = ProcessTypedArraysFixed(
aDestination, [&](const Span<uint8_t>& aData) -> size_t {
return aData.LengthBytes();
});
LOGD("AudioData::CopyTo %s -> %s", ToString().get(),
CopyToToString(destLength, aOptions).get());
if (!mResource) {
auto msg = "copyTo called on closed AudioData"_ns;
LOGD("%s", msg.get());
aRv.ThrowInvalidStateError(msg);
return;
}
uint64_t copyElementCount = ComputeCopyElementCount(aOptions, aRv);
if (aRv.Failed()) {
LOGD("AudioData::CopyTo failed in ComputeCopyElementCount");
return;
}
auto destFormat = mAudioSampleFormat;
if (aOptions.mFormat.WasPassed()) {
destFormat = OptionalToMaybe(aOptions.mFormat);
}
uint32_t bytesPerSample = BytesPerSamples(destFormat.value());
CheckedInt<uint32_t> copyLength = bytesPerSample;
copyLength *= copyElementCount;
if (copyLength.value() > destLength) {
auto msg = nsPrintfCString(
"destination buffer of length %zu too small for copying %" PRIu64
" elements",
destLength, bytesPerSample * copyElementCount);
LOGD("%s", msg.get());
aRv.ThrowRangeError(msg);
return;
}
uint32_t framesToCopy = mNumberOfFrames - aOptions.mFrameOffset;
if (aOptions.mFrameCount.WasPassed()) {
framesToCopy = aOptions.mFrameCount.Value();
}
CopyToSpec copyToSpec(framesToCopy, aOptions.mFrameOffset,
aOptions.mPlaneIndex, destFormat.value());
// Now a couple layers of macros to type the pointers and perform the actual
// copy.
ProcessTypedArraysFixed(aDestination, [&](const Span<uint8_t>& aData) {
DoCopy(mResource->Data(), aData, mNumberOfChannels,
mAudioSampleFormat.value(), copyToSpec);
});
}
// https://w3c.github.io/webcodecs/#dom-audiodata-clone
already_AddRefed<AudioData> AudioData::Clone(ErrorResult& aRv) {
AssertIsOnOwningThread();
if (!mResource) {
auto msg = "No media resource in the AudioData now"_ns;
LOGD("%s", msg.get());
aRv.ThrowInvalidStateError(msg);
return nullptr;
}
return MakeAndAddRef<AudioData>(*this);
}
// https://w3c.github.io/webcodecs/#close-audiodata
void AudioData::Close() {
AssertIsOnOwningThread();
mResource = nullptr;
mSampleRate = 0;
mNumberOfFrames = 0;
mNumberOfChannels = 0;
mAudioSampleFormat = Nothing();
}
// https://w3c.github.io/webcodecs/#ref-for-deserialization-steps%E2%91%A1
/* static */
JSObject* AudioData::ReadStructuredClone(JSContext* aCx,
nsIGlobalObject* aGlobal,
JSStructuredCloneReader* aReader,
const AudioDataSerializedData& aData) {
JS::Rooted<JS::Value> value(aCx, JS::NullValue());
// To avoid a rooting hazard error from returning a raw JSObject* before
// running the RefPtr destructor, RefPtr needs to be destructed before
// returning the raw JSObject*, which is why the RefPtr<AudioData> is created
// in the scope below. Otherwise, the static analysis infers the RefPtr cannot
// be safely destructed while the unrooted return JSObject* is on the stack.
{
RefPtr<AudioData> frame = MakeAndAddRef<AudioData>(aGlobal, aData);
if (!GetOrCreateDOMReflector(aCx, frame, &value) || !value.isObject()) {
LOGE("GetOrCreateDOMReflect failure");
return nullptr;
}
}
return value.toObjectOrNull();
}
// https://w3c.github.io/webcodecs/#ref-for-audiodata%E2%91%A2%E2%91%A2
bool AudioData::WriteStructuredClone(JSStructuredCloneWriter* aWriter,
StructuredCloneHolder* aHolder) const {
AssertIsOnOwningThread();
// AudioData closed
if (!mResource) {
LOGD("AudioData was already close in WriteStructuredClone");
return false;
}
const uint32_t index = aHolder->AudioData().Length();
// https://github.com/w3c/webcodecs/issues/717
// For now, serialization is only allowed in the same address space, it's OK
// to send a refptr here instead of copying the backing buffer.
aHolder->AudioData().AppendElement(AudioDataSerializedData(*this));
return !NS_WARN_IF(!JS_WriteUint32Pair(aWriter, SCTAG_DOM_AUDIODATA, index));
}
// https://w3c.github.io/webcodecs/#ref-for-transfer-steps
UniquePtr<AudioData::TransferredData> AudioData::Transfer() {
AssertIsOnOwningThread();
if (!mResource) {
// Closed
LOGD("AudioData was already close in Transfer");
return nullptr;
}
// This adds a ref to the resource
auto serialized = MakeUnique<AudioDataSerializedData>(*this);
// This removes the ref to the resource, effectively transfering the backing
// storage.
Close();
return serialized;
}
// https://w3c.github.io/webcodecs/#ref-for-transfer-receiving-steps
/* static */
already_AddRefed<AudioData> AudioData::FromTransferred(nsIGlobalObject* aGlobal,
TransferredData* aData) {
MOZ_ASSERT(aData);
return MakeAndAddRef<AudioData>(aGlobal, *aData);
}
void AudioData::CloseIfNeeded() {
if (mResource) {
mResource = nullptr;
}
}
#undef LOGD
#undef LOGE
#undef LOG_INTERNAL
} // namespace mozilla::dom

View File

@ -1,171 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_AudioData_h
#define mozilla_dom_AudioData_h
#include "MediaData.h"
#include "WebCodecsUtils.h"
#include "js/TypeDecls.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/Span.h"
#include "mozilla/dom/AudioDataBinding.h"
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/StructuredCloneHolder.h"
#include "nsCycleCollectionParticipant.h"
#include "nsWrapperCache.h"
class nsIGlobalObject;
class nsIURI;
namespace mozilla::dom {
class MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer;
class OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer;
class Promise;
struct AudioDataBufferInit;
struct AudioDataCopyToOptions;
struct AudioDataInit;
} // namespace mozilla::dom
namespace mozilla::dom {
class AudioData;
class AudioDataResource;
struct AudioDataSerializedData;
class AudioData final : public nsISupports, public nsWrapperCache {
public:
NS_DECL_CYCLE_COLLECTING_ISUPPORTS
NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(AudioData)
public:
AudioData(nsIGlobalObject* aParent, const AudioDataSerializedData& aData);
AudioData(nsIGlobalObject* aParent,
already_AddRefed<AudioDataResource> aResource,
const AudioDataInit& aInit);
AudioData(const AudioData& aOther);
protected:
~AudioData() = default;
public:
nsIGlobalObject* GetParentObject() const;
JSObject* WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) override;
static already_AddRefed<AudioData> Constructor(const GlobalObject& aGlobal,
const AudioDataInit& aInit,
ErrorResult& aRv);
Nullable<mozilla::dom::AudioSampleFormat> GetFormat() const;
float SampleRate() const;
uint32_t NumberOfFrames() const;
uint32_t NumberOfChannels() const;
uint64_t Duration() const; // microseconds
int64_t Timestamp() const; // microseconds
uint32_t AllocationSize(const AudioDataCopyToOptions& aOptions,
ErrorResult& aRv);
void CopyTo(
const MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aDestination,
const AudioDataCopyToOptions& aOptions, ErrorResult& aRv);
already_AddRefed<AudioData> Clone(ErrorResult& aRv);
void Close();
// [Serializable] implementations: {Read, Write}StructuredClone
static JSObject* ReadStructuredClone(JSContext* aCx, nsIGlobalObject* aGlobal,
JSStructuredCloneReader* aReader,
const AudioDataSerializedData& aData);
bool WriteStructuredClone(JSStructuredCloneWriter* aWriter,
StructuredCloneHolder* aHolder) const;
// [Transferable] implementations: Transfer, FromTransferred
using TransferredData = AudioDataSerializedData;
UniquePtr<TransferredData> Transfer();
static already_AddRefed<AudioData> FromTransferred(nsIGlobalObject* aGlobal,
TransferredData* aData);
private:
size_t ComputeCopyElementCount(const AudioDataCopyToOptions& aOptions,
ErrorResult& aRv);
nsCString ToString() const;
// AudioData can run on either main thread or worker thread.
void AssertIsOnOwningThread() const { NS_ASSERT_OWNINGTHREAD(AudioData); }
void CloseIfNeeded();
nsCOMPtr<nsIGlobalObject> mParent;
friend struct AudioDataSerializedData;
int64_t mTimestamp;
uint32_t mNumberOfChannels;
uint32_t mNumberOfFrames;
float mSampleRate;
Maybe<AudioSampleFormat> mAudioSampleFormat;
RefPtr<mozilla::dom::AudioDataResource> mResource;
};
class AudioDataResource final {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioDataResource);
explicit AudioDataResource(FallibleTArray<uint8_t>&& aData)
: mData(std::move(aData)) {}
explicit AudioDataResource() : mData() {}
static AudioDataResource* Create(const Span<uint8_t>& aData) {
AudioDataResource* resource = new AudioDataResource();
if (!resource->mData.AppendElements(aData, mozilla::fallible_t())) {
return nullptr;
}
return resource;
}
static Result<already_AddRefed<AudioDataResource>, nsresult> Construct(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aInit);
Span<uint8_t> Data() { return Span(mData.Elements(), mData.Length()); };
private:
~AudioDataResource() = default;
// It's always possible for the allocation to fail -- the size is
// controled by script.
FallibleTArray<uint8_t> mData;
};
struct AudioDataSerializedData {
explicit AudioDataSerializedData(const AudioData& aFrom)
: mTimestamp(aFrom.Timestamp()),
mNumberOfChannels(aFrom.NumberOfChannels()),
mNumberOfFrames(aFrom.NumberOfFrames()),
mSampleRate(aFrom.SampleRate()),
mAudioSampleFormat(NullableToMaybe(aFrom.GetFormat())),
mResource(aFrom.mResource) {}
int64_t mTimestamp{};
uint32_t mNumberOfChannels{};
uint32_t mNumberOfFrames{};
float mSampleRate{};
Maybe<AudioSampleFormat> mAudioSampleFormat;
RefPtr<AudioDataResource> mResource;
};
} // namespace mozilla::dom
#endif // mozilla_dom_AudioData_h

View File

@ -1,472 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/dom/AudioDecoder.h"
#include "mozilla/dom/AudioDecoderBinding.h"
#include "DecoderTraits.h"
#include "MediaContainerType.h"
#include "MediaData.h"
#include "VideoUtils.h"
#include "mozilla/Assertions.h"
#include "mozilla/Logging.h"
#include "mozilla/Maybe.h"
#include "mozilla/Try.h"
#include "mozilla/Unused.h"
#include "mozilla/dom/AudioDataBinding.h"
#include "mozilla/dom/EncodedAudioChunk.h"
#include "mozilla/dom/EncodedAudioChunkBinding.h"
#include "mozilla/dom/ImageUtils.h"
#include "mozilla/dom/Promise.h"
#include "mozilla/dom/WebCodecsUtils.h"
#include "nsPrintfCString.h"
#include "nsReadableUtils.h"
extern mozilla::LazyLogModule gWebCodecsLog;
namespace mozilla::dom {
#ifdef LOG_INTERNAL
# undef LOG_INTERNAL
#endif // LOG_INTERNAL
#define LOG_INTERNAL(level, msg, ...) \
MOZ_LOG(gWebCodecsLog, LogLevel::level, (msg, ##__VA_ARGS__))
#ifdef LOG
# undef LOG
#endif // LOG
#define LOG(msg, ...) LOG_INTERNAL(Debug, msg, ##__VA_ARGS__)
#ifdef LOGW
# undef LOGW
#endif // LOGW
#define LOGW(msg, ...) LOG_INTERNAL(Warning, msg, ##__VA_ARGS__)
#ifdef LOGE
# undef LOGE
#endif // LOGE
#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__)
#ifdef LOGV
# undef LOGV
#endif // LOGV
#define LOGV(msg, ...) LOG_INTERNAL(Verbose, msg, ##__VA_ARGS__)
NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDecoder, DOMEventTargetHelper,
mErrorCallback, mOutputCallback)
NS_IMPL_ADDREF_INHERITED(AudioDecoder, DOMEventTargetHelper)
NS_IMPL_RELEASE_INHERITED(AudioDecoder, DOMEventTargetHelper)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioDecoder)
NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
/*
* Below are helper classes
*/
AudioDecoderConfigInternal::AudioDecoderConfigInternal(
const nsAString& aCodec, uint32_t aSampleRate, uint32_t aNumberOfChannels,
Maybe<RefPtr<MediaByteBuffer>>&& aDescription)
: mCodec(aCodec),
mSampleRate(aSampleRate),
mNumberOfChannels(aNumberOfChannels),
mDescription(std::move(aDescription)) {}
/*static*/
UniquePtr<AudioDecoderConfigInternal> AudioDecoderConfigInternal::Create(
const AudioDecoderConfig& aConfig) {
nsCString errorMessage;
if (!AudioDecoderTraits::Validate(aConfig, errorMessage)) {
LOGE("Failed to create AudioDecoderConfigInternal: %s", errorMessage.get());
return nullptr;
}
Maybe<RefPtr<MediaByteBuffer>> description;
if (aConfig.mDescription.WasPassed()) {
auto rv = GetExtraDataFromArrayBuffer(aConfig.mDescription.Value());
if (rv.isErr()) { // Invalid description data.
nsCString error;
GetErrorName(rv.unwrapErr(), error);
LOGE(
"Failed to create AudioDecoderConfigInternal due to invalid "
"description data. Error: %s",
error.get());
return nullptr;
}
description.emplace(rv.unwrap());
}
return UniquePtr<AudioDecoderConfigInternal>(new AudioDecoderConfigInternal(
aConfig.mCodec, aConfig.mSampleRate, aConfig.mNumberOfChannels,
std::move(description)));
}
/*
* The followings are helpers for AudioDecoder methods
*/
struct AudioMIMECreateParam {
explicit AudioMIMECreateParam(const AudioDecoderConfigInternal& aConfig)
: mParsedCodec(ParseCodecString(aConfig.mCodec).valueOr(EmptyString())) {}
explicit AudioMIMECreateParam(const AudioDecoderConfig& aConfig)
: mParsedCodec(ParseCodecString(aConfig.mCodec).valueOr(EmptyString())) {}
const nsString mParsedCodec;
};
// Map between WebCodecs pcm types as strings and codec numbers
// All other codecs
nsCString ConvertCodecName(const nsCString& aContainer,
const nsCString& aCodec) {
if (!aContainer.EqualsLiteral("x-wav")) {
return aCodec;
}
if (aCodec.EqualsLiteral("ulaw")) {
return nsCString("7");
}
if (aCodec.EqualsLiteral("alaw")) {
return nsCString("6");
}
if (aCodec.Find("f32")) {
return nsCString("3");
}
// Linear PCM
return nsCString("1");
}
static nsTArray<nsCString> GuessMIMETypes(const AudioMIMECreateParam& aParam) {
nsCString codec = NS_ConvertUTF16toUTF8(aParam.mParsedCodec);
nsTArray<nsCString> types;
for (const nsCString& container : GuessContainers(aParam.mParsedCodec)) {
codec = ConvertCodecName(container, codec);
nsPrintfCString mime("audio/%s; codecs=%s", container.get(), codec.get());
types.AppendElement(mime);
}
return types;
}
static bool IsSupportedAudioCodec(const nsAString& aCodec) {
LOG("IsSupportedAudioCodec: %s", NS_ConvertUTF16toUTF8(aCodec).get());
return aCodec.EqualsLiteral("flac") || aCodec.EqualsLiteral("mp3") ||
IsAACCodecString(aCodec) || aCodec.EqualsLiteral("opus") ||
aCodec.EqualsLiteral("ulaw") || aCodec.EqualsLiteral("alaw") ||
aCodec.EqualsLiteral("pcm-u8") || aCodec.EqualsLiteral("pcm-s16") ||
aCodec.EqualsLiteral("pcm-s24") || aCodec.EqualsLiteral("pcm-s32") ||
aCodec.EqualsLiteral("pcm-f32");
}
// https://w3c.github.io/webcodecs/#check-configuration-support
template <typename Config>
static bool CanDecodeAudio(const Config& aConfig) {
auto param = AudioMIMECreateParam(aConfig);
if (!IsSupportedAudioCodec(param.mParsedCodec)) {
return false;
}
if (IsOnAndroid() && IsAACCodecString(param.mParsedCodec)) {
return false;
}
// TODO: Instead of calling CanHandleContainerType with the guessed the
// containers, DecoderTraits should provide an API to tell if a codec is
// decodable or not.
for (const nsCString& mime : GuessMIMETypes(param)) {
if (Maybe<MediaContainerType> containerType =
MakeMediaExtendedMIMEType(mime)) {
if (DecoderTraits::CanHandleContainerType(
*containerType, nullptr /* DecoderDoctorDiagnostics */) !=
CANPLAY_NO) {
return true;
}
}
}
return false;
}
static nsTArray<UniquePtr<TrackInfo>> GetTracksInfo(
const AudioDecoderConfigInternal& aConfig) {
// TODO: Instead of calling GetTracksInfo with the guessed containers,
// DecoderTraits should provide an API to create the TrackInfo directly.
for (const nsCString& mime : GuessMIMETypes(AudioMIMECreateParam(aConfig))) {
if (Maybe<MediaContainerType> containerType =
MakeMediaExtendedMIMEType(mime)) {
if (nsTArray<UniquePtr<TrackInfo>> tracks =
DecoderTraits::GetTracksInfo(*containerType);
!tracks.IsEmpty()) {
return tracks;
}
}
}
return {};
}
static Result<Ok, nsresult> CloneConfiguration(
RootedDictionary<AudioDecoderConfig>& aDest, JSContext* aCx,
const AudioDecoderConfig& aConfig) {
aDest.mCodec = aConfig.mCodec;
if (aConfig.mDescription.WasPassed()) {
aDest.mDescription.Construct();
MOZ_TRY(CloneBuffer(aCx, aDest.mDescription.Value(),
aConfig.mDescription.Value()));
}
aDest.mNumberOfChannels = aConfig.mNumberOfChannels;
aDest.mSampleRate = aConfig.mSampleRate;
return Ok();
}
// https://w3c.github.io/webcodecs/#create-a-audiodata
static RefPtr<AudioData> CreateAudioData(nsIGlobalObject* aGlobalObject,
mozilla::AudioData* aData) {
MOZ_ASSERT(aGlobalObject);
MOZ_ASSERT(aData);
mozilla::dom::AudioDataInit init;
init.mFormat = mozilla::dom::AudioSampleFormat::F32;
init.mNumberOfChannels = aData->mChannels;
init.mSampleRate = AssertedCast<float>(aData->mRate);
init.mTimestamp = aData->mTime.ToMicroseconds();
auto buf = aData->MoveableData();
init.mNumberOfFrames = buf.Length() / init.mNumberOfChannels;
RefPtr<AudioDataResource> resource = AudioDataResource::Create(Span{
reinterpret_cast<uint8_t*>(buf.Data()), buf.Length() * sizeof(float)});
return MakeRefPtr<AudioData>(aGlobalObject, resource.forget(), init);
}
/* static */
bool AudioDecoderTraits::IsSupported(
const AudioDecoderConfigInternal& aConfig) {
return CanDecodeAudio(aConfig);
}
/* static */
Result<UniquePtr<TrackInfo>, nsresult> AudioDecoderTraits::CreateTrackInfo(
const AudioDecoderConfigInternal& aConfig) {
LOG("Create a AudioInfo from %s config",
NS_ConvertUTF16toUTF8(aConfig.mCodec).get());
nsTArray<UniquePtr<TrackInfo>> tracks = GetTracksInfo(aConfig);
if (tracks.Length() != 1 || tracks[0]->GetType() != TrackInfo::kAudioTrack) {
LOGE("Failed to get TrackInfo");
return Err(NS_ERROR_INVALID_ARG);
}
UniquePtr<TrackInfo> track(std::move(tracks[0]));
AudioInfo* ai = track->GetAsAudioInfo();
if (!ai) {
LOGE("Failed to get AudioInfo");
return Err(NS_ERROR_INVALID_ARG);
}
if (aConfig.mDescription.isSome()) {
RefPtr<MediaByteBuffer> buf;
buf = aConfig.mDescription.value();
if (buf) {
LOG("The given config has %zu bytes of description data", buf->Length());
ai->mCodecSpecificConfig =
AudioCodecSpecificVariant{AudioCodecSpecificBinaryBlob{buf}};
}
}
ai->mChannels = aConfig.mNumberOfChannels;
ai->mRate = aConfig.mSampleRate;
LOG("Created AudioInfo %s (%" PRIu32 "ch %" PRIu32
"Hz - with extra-data: %s)",
NS_ConvertUTF16toUTF8(aConfig.mCodec).get(), ai->mChannels, ai->mChannels,
aConfig.mDescription.isSome() ? "yes" : "no");
return track;
}
// https://w3c.github.io/webcodecs/#valid-audiodecoderconfig
/* static */
bool AudioDecoderTraits::Validate(const AudioDecoderConfig& aConfig,
nsCString& aErrorMessage) {
Maybe<nsString> codec = ParseCodecString(aConfig.mCodec);
if (!codec || codec->IsEmpty()) {
LOGE("Validating AudioDecoderConfig: invalid codec string");
aErrorMessage.AppendPrintf("Invalid codec string %s",
NS_ConvertUTF16toUTF8(aConfig.mCodec).get());
return false;
}
LOG("Validating AudioDecoderConfig: codec: %s %uch %uHz %s extradata",
NS_ConvertUTF16toUTF8(codec.value()).get(), aConfig.mNumberOfChannels,
aConfig.mSampleRate, aConfig.mDescription.WasPassed() ? "w/" : "no");
if (aConfig.mNumberOfChannels == 0) {
aErrorMessage.AppendPrintf("Invalid number of channels of %u",
aConfig.mNumberOfChannels);
return false;
}
if (aConfig.mSampleRate == 0) {
aErrorMessage.AppendPrintf("Invalid sample-rate of %u",
aConfig.mNumberOfChannels);
return false;
}
return true;
}
/* static */
UniquePtr<AudioDecoderConfigInternal> AudioDecoderTraits::CreateConfigInternal(
const AudioDecoderConfig& aConfig) {
return AudioDecoderConfigInternal::Create(aConfig);
}
/* static */
bool AudioDecoderTraits::IsKeyChunk(const EncodedAudioChunk& aInput) {
return aInput.Type() == EncodedAudioChunkType::Key;
}
/* static */
UniquePtr<EncodedAudioChunkData> AudioDecoderTraits::CreateInputInternal(
const EncodedAudioChunk& aInput) {
return aInput.Clone();
}
/*
* Below are AudioDecoder implementation
*/
AudioDecoder::AudioDecoder(nsIGlobalObject* aParent,
RefPtr<WebCodecsErrorCallback>&& aErrorCallback,
RefPtr<AudioDataOutputCallback>&& aOutputCallback)
: DecoderTemplate(aParent, std::move(aErrorCallback),
std::move(aOutputCallback)) {
MOZ_ASSERT(mErrorCallback);
MOZ_ASSERT(mOutputCallback);
LOG("AudioDecoder %p ctor", this);
}
AudioDecoder::~AudioDecoder() {
LOG("AudioDecoder %p dtor", this);
Unused << ResetInternal(NS_ERROR_DOM_ABORT_ERR);
}
JSObject* AudioDecoder::WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) {
AssertIsOnOwningThread();
return AudioDecoder_Binding::Wrap(aCx, this, aGivenProto);
}
// https://w3c.github.io/webcodecs/#dom-audiodecoder-audiodecoder
/* static */
already_AddRefed<AudioDecoder> AudioDecoder::Constructor(
const GlobalObject& aGlobal, const AudioDecoderInit& aInit,
ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
return MakeAndAddRef<AudioDecoder>(
global.get(), RefPtr<WebCodecsErrorCallback>(aInit.mError),
RefPtr<AudioDataOutputCallback>(aInit.mOutput));
}
// https://w3c.github.io/webcodecs/#dom-audiodecoder-isconfigsupported
/* static */
already_AddRefed<Promise> AudioDecoder::IsConfigSupported(
const GlobalObject& aGlobal, const AudioDecoderConfig& aConfig,
ErrorResult& aRv) {
LOG("AudioDecoder::IsConfigSupported, config: %s",
NS_ConvertUTF16toUTF8(aConfig.mCodec).get());
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
RefPtr<Promise> p = Promise::Create(global.get(), aRv);
if (NS_WARN_IF(aRv.Failed())) {
return p.forget();
}
nsCString errorMessage;
if (!AudioDecoderTraits::Validate(aConfig, errorMessage)) {
p->MaybeRejectWithTypeError(errorMessage);
return p.forget();
}
// TODO: Move the following works to another thread to unblock the current
// thread, as what spec suggests.
RootedDictionary<AudioDecoderConfig> config(aGlobal.Context());
auto r = CloneConfiguration(config, aGlobal.Context(), aConfig);
if (r.isErr()) {
nsresult e = r.unwrapErr();
nsCString error;
GetErrorName(e, error);
LOGE("Failed to clone AudioDecoderConfig. Error: %s", error.get());
p->MaybeRejectWithTypeError("Failed to clone AudioDecoderConfig");
aRv.Throw(e);
return p.forget();
}
bool canDecode = CanDecodeAudio(config);
RootedDictionary<AudioDecoderSupport> s(aGlobal.Context());
s.mConfig.Construct(std::move(config));
s.mSupported.Construct(canDecode);
p->MaybeResolve(s);
return p.forget();
}
already_AddRefed<MediaRawData> AudioDecoder::InputDataToMediaRawData(
UniquePtr<EncodedAudioChunkData>&& aData, TrackInfo& aInfo,
const AudioDecoderConfigInternal& aConfig) {
AssertIsOnOwningThread();
MOZ_ASSERT(aInfo.GetAsAudioInfo());
if (!aData) {
LOGE("No data for conversion");
return nullptr;
}
RefPtr<MediaRawData> sample = aData->TakeData();
if (!sample) {
LOGE("Take no data for conversion");
return nullptr;
}
LOGV(
"EncodedAudioChunkData %p converted to %zu-byte MediaRawData - time: "
"%" PRIi64 "us, timecode: %" PRIi64 "us, duration: %" PRIi64
"us, key-frame: %s",
aData.get(), sample->Size(), sample->mTime.ToMicroseconds(),
sample->mTimecode.ToMicroseconds(), sample->mDuration.ToMicroseconds(),
sample->mKeyframe ? "yes" : "no");
return sample.forget();
}
nsTArray<RefPtr<AudioData>> AudioDecoder::DecodedDataToOutputType(
nsIGlobalObject* aGlobalObject, const nsTArray<RefPtr<MediaData>>&& aData,
AudioDecoderConfigInternal& aConfig) {
AssertIsOnOwningThread();
nsTArray<RefPtr<AudioData>> frames;
for (const RefPtr<MediaData>& data : aData) {
MOZ_RELEASE_ASSERT(data->mType == MediaData::Type::AUDIO_DATA);
RefPtr<mozilla::AudioData> d(data->As<mozilla::AudioData>());
frames.AppendElement(CreateAudioData(aGlobalObject, d.get()));
}
return frames;
}
#undef LOG
#undef LOGW
#undef LOGE
#undef LOGV
#undef LOG_INTERNAL
} // namespace mozilla::dom

View File

@ -1,83 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_AudioDecoder_h
#define mozilla_dom_AudioDecoder_h
#include "js/TypeDecls.h"
#include "mozilla/Attributes.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/dom/AudioData.h"
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/DecoderTemplate.h"
#include "mozilla/dom/DecoderTypes.h"
#include "mozilla/dom/RootedDictionary.h"
#include "nsCycleCollectionParticipant.h"
#include "nsWrapperCache.h"
class nsIGlobalObject;
namespace mozilla {
namespace dom {
class AudioDataOutputCallback;
class EncodedAudioChunk;
class EncodedAudioChunkData;
class EventHandlerNonNull;
class GlobalObject;
class Promise;
class WebCodecsErrorCallback;
struct AudioDecoderConfig;
struct AudioDecoderInit;
} // namespace dom
} // namespace mozilla
namespace mozilla::dom {
class AudioDecoder final : public DecoderTemplate<AudioDecoderTraits> {
public:
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioDecoder, DOMEventTargetHelper)
public:
AudioDecoder(nsIGlobalObject* aParent,
RefPtr<WebCodecsErrorCallback>&& aErrorCallback,
RefPtr<AudioDataOutputCallback>&& aOutputCallback);
protected:
~AudioDecoder();
public:
JSObject* WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) override;
static already_AddRefed<AudioDecoder> Constructor(
const GlobalObject& aGlobal, const AudioDecoderInit& aInit,
ErrorResult& aRv);
static already_AddRefed<Promise> IsConfigSupported(
const GlobalObject& aGlobal, const AudioDecoderConfig& aConfig,
ErrorResult& aRv);
protected:
virtual already_AddRefed<MediaRawData> InputDataToMediaRawData(
UniquePtr<EncodedAudioChunkData>&& aData, TrackInfo& aInfo,
const AudioDecoderConfigInternal& aConfig) override;
virtual nsTArray<RefPtr<AudioData>> DecodedDataToOutputType(
nsIGlobalObject* aGlobalObject, const nsTArray<RefPtr<MediaData>>&& aData,
AudioDecoderConfigInternal& aConfig) override;
};
} // namespace mozilla::dom
#endif // mozilla_dom_AudioDecoder_h

View File

@ -6,6 +6,8 @@
#include "DecoderAgent.h"
#include <atomic>
#include "ImageContainer.h"
#include "MediaDataDecoderProxy.h"
#include "PDMFactory.h"

View File

@ -139,8 +139,8 @@ void DecoderTemplate<DecoderType>::Configure(const ConfigType& aConfig,
nsCString errorMessage;
if (!DecoderType::Validate(aConfig, errorMessage)) {
LOG("Configure: Validate error: %s", errorMessage.get());
aRv.ThrowTypeError(errorMessage);
aRv.ThrowTypeError(
nsPrintfCString("config is invalid: %s", errorMessage.get()));
return;
}
@ -322,13 +322,13 @@ void DecoderTemplate<DecoderType>::OutputDecodedData(
MOZ_ASSERT(mState == CodecState::Configured);
MOZ_ASSERT(mActiveConfig);
nsTArray<RefPtr<OutputType>> frames = DecodedDataToOutputType(
nsTArray<RefPtr<VideoFrame>> frames = DecodedDataToOutputType(
GetParentObject(), std::move(aData), *mActiveConfig);
RefPtr<OutputCallbackType> cb(mOutputCallback);
for (RefPtr<OutputType>& frame : frames) {
RefPtr<VideoFrameOutputCallback> cb(mOutputCallback);
for (RefPtr<VideoFrame>& frame : frames) {
LOG("Outputing decoded data: ts: %" PRId64, frame->Timestamp());
RefPtr<OutputType> f = frame;
cb->Call((OutputType&)(*f));
RefPtr<VideoFrame> f = frame;
cb->Call((VideoFrame&)(*f));
}
}
@ -881,7 +881,6 @@ void DecoderTemplate<DecoderType>::DestroyDecoderAgentIfAny() {
}
template class DecoderTemplate<VideoDecoderTraits>;
template class DecoderTemplate<AudioDecoderTraits>;
#undef LOG
#undef LOGW

View File

@ -9,9 +9,6 @@
#include "MediaData.h"
#include "mozilla/Maybe.h"
#include "mozilla/dom/AudioData.h"
#include "mozilla/dom/AudioDecoderBinding.h"
#include "mozilla/dom/EncodedAudioChunk.h"
#include "mozilla/dom/EncodedVideoChunk.h"
#include "mozilla/dom/VideoColorSpaceBinding.h"
#include "mozilla/dom/VideoDecoderBinding.h"
@ -61,17 +58,17 @@ class VideoDecoderConfigInternal {
bool Equals(const VideoDecoderConfigInternal& aOther) const {
if (mDescription.isSome() != aOther.mDescription.isSome()) {
return false;
return false;
}
if (mDescription.isSome() && aOther.mDescription.isSome()) {
auto lhs = mDescription.value();
auto rhs = aOther.mDescription.value();
if (lhs->Length() != rhs->Length()) {
return false;
}
if (!ArrayEqual(lhs->Elements(), rhs->Elements(), lhs->Length())) {
return false;
}
auto lhs = mDescription.value();
auto rhs = aOther.mDescription.value();
if (lhs->Length() != rhs->Length()) {
return false;
}
if (!ArrayEqual(lhs->Elements(), rhs->Elements(), lhs->Length())) {
return false;
}
}
return mCodec.Equals(aOther.mCodec) &&
mCodedHeight == aOther.mCodedHeight &&
@ -114,49 +111,6 @@ class VideoDecoderTraits {
const InputType& aInput);
};
class AudioDecoderConfigInternal {
public:
static UniquePtr<AudioDecoderConfigInternal> Create(
const AudioDecoderConfig& aConfig);
~AudioDecoderConfigInternal() = default;
nsString mCodec;
uint32_t mSampleRate;
uint32_t mNumberOfChannels;
Maybe<RefPtr<MediaByteBuffer>> mDescription;
// Compilation fix, should be abstracted by DecoderAgent since those are not
// supported
HardwareAcceleration mHardwareAcceleration =
HardwareAcceleration::No_preference;
Maybe<bool> mOptimizeForLatency;
private:
AudioDecoderConfigInternal(const nsAString& aCodec, uint32_t aSampleRate,
uint32_t aNumberOfChannels,
Maybe<RefPtr<MediaByteBuffer>>&& aDescription);
};
class AudioDecoderTraits {
public:
static constexpr nsLiteralCString Name = "AudioDecoder"_ns;
using ConfigType = AudioDecoderConfig;
using ConfigTypeInternal = AudioDecoderConfigInternal;
using InputType = EncodedAudioChunk;
using InputTypeInternal = EncodedAudioChunkData;
using OutputType = AudioData;
using OutputCallbackType = AudioDataOutputCallback;
static bool IsSupported(const ConfigTypeInternal& aConfig);
static Result<UniquePtr<TrackInfo>, nsresult> CreateTrackInfo(
const ConfigTypeInternal& aConfig);
static bool Validate(const ConfigType& aConfig, nsCString& aErrorMessage);
static UniquePtr<ConfigTypeInternal> CreateConfigInternal(
const ConfigType& aConfig);
static bool IsKeyChunk(const InputType& aInput);
static UniquePtr<InputTypeInternal> CreateInputInternal(
const InputType& aInput);
};
} // namespace dom
} // namespace mozilla

View File

@ -1,260 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/dom/EncodedAudioChunk.h"
#include "mozilla/dom/EncodedAudioChunkBinding.h"
#include <utility>
#include "MediaData.h"
#include "TimeUnits.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/Logging.h"
#include "mozilla/PodOperations.h"
#include "mozilla/dom/StructuredCloneHolder.h"
#include "mozilla/dom/StructuredCloneTags.h"
#include "mozilla/dom/WebCodecsUtils.h"
extern mozilla::LazyLogModule gWebCodecsLog;
using mozilla::media::TimeUnit;
namespace mozilla::dom {
#ifdef LOG_INTERNAL
# undef LOG_INTERNAL
#endif // LOG_INTERNAL
#define LOG_INTERNAL(level, msg, ...) \
MOZ_LOG(gWebCodecsLog, LogLevel::level, (msg, ##__VA_ARGS__))
#ifdef LOGW
# undef LOGW
#endif // LOGW
#define LOGW(msg, ...) LOG_INTERNAL(Warning, msg, ##__VA_ARGS__)
#ifdef LOGE
# undef LOGE
#endif // LOGE
#define LOGE(msg, ...) LOG_INTERNAL(Error, msg, ##__VA_ARGS__)
// Only needed for refcounted objects.
NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(EncodedAudioChunk, mParent)
NS_IMPL_CYCLE_COLLECTING_ADDREF(EncodedAudioChunk)
NS_IMPL_CYCLE_COLLECTING_RELEASE(EncodedAudioChunk)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(EncodedAudioChunk)
NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
NS_INTERFACE_MAP_ENTRY(nsISupports)
NS_INTERFACE_MAP_END
EncodedAudioChunkData::EncodedAudioChunkData(
already_AddRefed<MediaAlignedByteBuffer> aBuffer,
const EncodedAudioChunkType& aType, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration)
: mBuffer(aBuffer),
mType(aType),
mTimestamp(aTimestamp),
mDuration(aDuration) {
MOZ_ASSERT(mBuffer);
MOZ_ASSERT(mBuffer->Length() == mBuffer->Size());
MOZ_ASSERT(mBuffer->Length() <=
static_cast<size_t>(std::numeric_limits<uint32_t>::max()));
}
UniquePtr<EncodedAudioChunkData> EncodedAudioChunkData::Clone() const {
if (!mBuffer) {
LOGE("No buffer in EncodedAudioChunkData %p to clone!", this);
return nullptr;
}
// Since EncodedAudioChunkData can be zero-sized, cloning a zero-sized chunk
// is allowed.
if (mBuffer->Size() == 0) {
LOGW("Cloning an empty EncodedAudioChunkData %p", this);
}
auto buffer =
MakeRefPtr<MediaAlignedByteBuffer>(mBuffer->Data(), mBuffer->Length());
if (!buffer || buffer->Size() != mBuffer->Size()) {
LOGE("OOM to copy EncodedAudioChunkData %p", this);
return nullptr;
}
return MakeUnique<EncodedAudioChunkData>(buffer.forget(), mType, mTimestamp,
Maybe<uint64_t>(mDuration));
}
already_AddRefed<MediaRawData> EncodedAudioChunkData::TakeData() {
if (!mBuffer || !(*mBuffer)) {
LOGE("EncodedAudioChunkData %p has no data!", this);
return nullptr;
}
RefPtr<MediaRawData> sample(new MediaRawData(std::move(*mBuffer)));
sample->mKeyframe = mType == EncodedAudioChunkType::Key;
sample->mTime = TimeUnit::FromMicroseconds(mTimestamp);
sample->mTimecode = TimeUnit::FromMicroseconds(mTimestamp);
if (mDuration) {
CheckedInt64 duration(*mDuration);
if (!duration.isValid()) {
LOGE("EncodedAudioChunkData %p 's duration exceeds TimeUnit's limit",
this);
return nullptr;
}
sample->mDuration = TimeUnit::FromMicroseconds(duration.value());
}
return sample.forget();
}
EncodedAudioChunk::EncodedAudioChunk(
nsIGlobalObject* aParent, already_AddRefed<MediaAlignedByteBuffer> aBuffer,
const EncodedAudioChunkType& aType, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration)
: EncodedAudioChunkData(std::move(aBuffer), aType, aTimestamp,
std::move(aDuration)),
mParent(aParent) {}
EncodedAudioChunk::EncodedAudioChunk(nsIGlobalObject* aParent,
const EncodedAudioChunkData& aData)
: EncodedAudioChunkData(aData), mParent(aParent) {}
nsIGlobalObject* EncodedAudioChunk::GetParentObject() const {
AssertIsOnOwningThread();
return mParent.get();
}
JSObject* EncodedAudioChunk::WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) {
AssertIsOnOwningThread();
return EncodedAudioChunk_Binding::Wrap(aCx, this, aGivenProto);
}
// https://w3c.github.io/webcodecs/#encodedaudiochunk-constructors
/* static */
already_AddRefed<EncodedAudioChunk> EncodedAudioChunk::Constructor(
const GlobalObject& aGlobal, const EncodedAudioChunkInit& aInit,
ErrorResult& aRv) {
nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
if (!global) {
aRv.Throw(NS_ERROR_FAILURE);
return nullptr;
}
auto buffer = ProcessTypedArrays(
aInit.mData,
[&](const Span<uint8_t>& aData,
JS::AutoCheckCannotGC&&) -> RefPtr<MediaAlignedByteBuffer> {
// Make sure it's in uint32_t's range.
CheckedUint32 byteLength(aData.Length());
if (!byteLength.isValid()) {
aRv.Throw(NS_ERROR_INVALID_ARG);
return nullptr;
}
if (aData.Length() == 0) {
LOGW("Buffer for constructing EncodedAudioChunk is empty!");
}
RefPtr<MediaAlignedByteBuffer> buf = MakeRefPtr<MediaAlignedByteBuffer>(
aData.Elements(), aData.Length());
// Instead of checking *buf, size comparision is used to allow
// constructing a zero-sized EncodedAudioChunk.
if (!buf || buf->Size() != aData.Length()) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}
return buf;
});
RefPtr<EncodedAudioChunk> chunk(new EncodedAudioChunk(
global, buffer.forget(), aInit.mType, aInit.mTimestamp,
OptionalToMaybe(aInit.mDuration)));
return aRv.Failed() ? nullptr : chunk.forget();
}
EncodedAudioChunkType EncodedAudioChunk::Type() const {
AssertIsOnOwningThread();
return mType;
}
int64_t EncodedAudioChunk::Timestamp() const {
AssertIsOnOwningThread();
return mTimestamp;
}
Nullable<uint64_t> EncodedAudioChunk::GetDuration() const {
AssertIsOnOwningThread();
return MaybeToNullable(mDuration);
}
uint32_t EncodedAudioChunk::ByteLength() const {
AssertIsOnOwningThread();
MOZ_ASSERT(mBuffer);
return static_cast<uint32_t>(mBuffer->Length());
}
// https://w3c.github.io/webcodecs/#dom-encodedaudiochunk-copyto
void EncodedAudioChunk::CopyTo(
const MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aDestination,
ErrorResult& aRv) {
AssertIsOnOwningThread();
ProcessTypedArraysFixed(aDestination, [&](const Span<uint8_t>& aData) {
if (mBuffer->Size() > aData.size_bytes()) {
aRv.ThrowTypeError(
"Destination ArrayBuffer smaller than source EncodedAudioChunk");
return;
}
PodCopy(aData.data(), mBuffer->Data(), mBuffer->Size());
});
}
// https://w3c.github.io/webcodecs/#ref-for-deserialization-steps
/* static */
JSObject* EncodedAudioChunk::ReadStructuredClone(
JSContext* aCx, nsIGlobalObject* aGlobal, JSStructuredCloneReader* aReader,
const EncodedAudioChunkData& aData) {
JS::Rooted<JS::Value> value(aCx, JS::NullValue());
// To avoid a rooting hazard error from returning a raw JSObject* before
// running the RefPtr destructor, RefPtr needs to be destructed before
// returning the raw JSObject*, which is why the RefPtr<EncodedAudioChunk> is
// created in the scope below. Otherwise, the static analysis infers the
// RefPtr cannot be safely destructed while the unrooted return JSObject* is
// on the stack.
{
auto frame = MakeRefPtr<EncodedAudioChunk>(aGlobal, aData);
if (!GetOrCreateDOMReflector(aCx, frame, &value) || !value.isObject()) {
return nullptr;
}
}
return value.toObjectOrNull();
}
// https://w3c.github.io/webcodecs/#ref-for-serialization-steps
bool EncodedAudioChunk::WriteStructuredClone(
JSStructuredCloneWriter* aWriter, StructuredCloneHolder* aHolder) const {
AssertIsOnOwningThread();
// Indexing the chunk and send the index to the receiver.
const uint32_t index =
static_cast<uint32_t>(aHolder->EncodedAudioChunks().Length());
// The serialization is limited to the same process scope so it's ok to
// serialize a reference instead of a copy.
aHolder->EncodedAudioChunks().AppendElement(EncodedAudioChunkData(*this));
return !NS_WARN_IF(
!JS_WriteUint32Pair(aWriter, SCTAG_DOM_ENCODEDAUDIOCHUNK, index));
}
#undef LOGW
#undef LOGE
#undef LOG_INTERNAL
} // namespace mozilla::dom

View File

@ -1,117 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_EncodedAudioChunk_h
#define mozilla_dom_EncodedAudioChunk_h
#include "js/TypeDecls.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/Maybe.h"
#include "mozilla/dom/BindingDeclarations.h"
#include "nsCycleCollectionParticipant.h"
#include "nsWrapperCache.h"
class nsIGlobalObject;
namespace mozilla {
class MediaAlignedByteBuffer;
class MediaRawData;
namespace dom {
class MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer;
class OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer;
class StructuredCloneHolder;
enum class EncodedAudioChunkType : uint8_t;
struct EncodedAudioChunkInit;
} // namespace dom
} // namespace mozilla
namespace mozilla::dom {
class EncodedAudioChunkData {
public:
EncodedAudioChunkData(already_AddRefed<MediaAlignedByteBuffer> aBuffer,
const EncodedAudioChunkType& aType, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration);
EncodedAudioChunkData(const EncodedAudioChunkData& aData) = default;
~EncodedAudioChunkData() = default;
UniquePtr<EncodedAudioChunkData> Clone() const;
already_AddRefed<MediaRawData> TakeData();
protected:
// mBuffer's byte length is guaranteed to be smaller than UINT32_MAX.
RefPtr<MediaAlignedByteBuffer> mBuffer;
EncodedAudioChunkType mType;
int64_t mTimestamp;
Maybe<uint64_t> mDuration;
};
class EncodedAudioChunk final : public EncodedAudioChunkData,
public nsISupports,
public nsWrapperCache {
public:
NS_DECL_CYCLE_COLLECTING_ISUPPORTS
NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(EncodedAudioChunk)
public:
EncodedAudioChunk(nsIGlobalObject* aParent,
already_AddRefed<MediaAlignedByteBuffer> aBuffer,
const EncodedAudioChunkType& aType, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration);
EncodedAudioChunk(nsIGlobalObject* aParent,
const EncodedAudioChunkData& aData);
protected:
~EncodedAudioChunk() = default;
public:
nsIGlobalObject* GetParentObject() const;
JSObject* WrapObject(JSContext* aCx,
JS::Handle<JSObject*> aGivenProto) override;
static already_AddRefed<EncodedAudioChunk> Constructor(
const GlobalObject& aGlobal, const EncodedAudioChunkInit& aInit,
ErrorResult& aRv);
EncodedAudioChunkType Type() const;
int64_t Timestamp() const;
Nullable<uint64_t> GetDuration() const;
uint32_t ByteLength() const;
void CopyTo(
const MaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aDestination,
ErrorResult& aRv);
// [Serializable] implementations: {Read, Write}StructuredClone
static JSObject* ReadStructuredClone(JSContext* aCx, nsIGlobalObject* aGlobal,
JSStructuredCloneReader* aReader,
const EncodedAudioChunkData& aData);
bool WriteStructuredClone(JSStructuredCloneWriter* aWriter,
StructuredCloneHolder* aHolder) const;
private:
// EncodedAudioChunk can run on either main thread or worker thread.
void AssertIsOnOwningThread() const {
NS_ASSERT_OWNINGTHREAD(EncodedAudioChunk);
}
nsCOMPtr<nsIGlobalObject> mParent;
};
} // namespace mozilla::dom
#endif // mozilla_dom_EncodedAudioChunk_h

View File

@ -15,6 +15,8 @@
#include "MediaData.h"
#include "VideoUtils.h"
#include "mozilla/Assertions.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Logging.h"
#include "mozilla/Maybe.h"
#include "mozilla/Try.h"
@ -28,6 +30,7 @@
#include "mozilla/dom/WebCodecsUtils.h"
#include "nsPrintfCString.h"
#include "nsReadableUtils.h"
#include "nsThreadUtils.h"
#ifdef XP_MACOSX
# include "MacIOSurfaceImage.h"
@ -93,6 +96,9 @@ VideoColorSpaceInit VideoColorSpaceInternal::ToColorSpaceInit() const {
return init;
};
static Result<RefPtr<MediaByteBuffer>, nsresult> GetExtraData(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aBuffer);
VideoDecoderConfigInternal::VideoDecoderConfigInternal(
const nsAString& aCodec, Maybe<uint32_t>&& aCodedHeight,
Maybe<uint32_t>&& aCodedWidth, Maybe<VideoColorSpaceInternal>&& aColorSpace,
@ -122,7 +128,7 @@ UniquePtr<VideoDecoderConfigInternal> VideoDecoderConfigInternal::Create(
Maybe<RefPtr<MediaByteBuffer>> description;
if (aConfig.mDescription.WasPassed()) {
auto rv = GetExtraDataFromArrayBuffer(aConfig.mDescription.Value());
auto rv = GetExtraData(aConfig.mDescription.Value());
if (rv.isErr()) { // Invalid description data.
LOGE(
"Failed to create VideoDecoderConfigInternal due to invalid "
@ -270,6 +276,15 @@ static nsTArray<UniquePtr<TrackInfo>> GetTracksInfo(
return {};
}
static Result<RefPtr<MediaByteBuffer>, nsresult> GetExtraData(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aBuffer) {
RefPtr<MediaByteBuffer> data = MakeRefPtr<MediaByteBuffer>();
if (!AppendTypedArrayDataTo(aBuffer, *data)) {
return Err(NS_ERROR_OUT_OF_MEMORY);
}
return data->Length() > 0 ? data : nullptr;
}
static Result<Ok, nsresult> CloneConfiguration(
RootedDictionary<VideoDecoderConfig>& aDest, JSContext* aCx,
const VideoDecoderConfig& aConfig) {
@ -849,7 +864,7 @@ already_AddRefed<Promise> VideoDecoder::IsConfigSupported(
nsCString errorMessage;
if (!VideoDecoderTraits::Validate(aConfig, errorMessage)) {
p->MaybeRejectWithTypeError(nsPrintfCString(
"IsConfigSupported: config is invalid: %s", errorMessage.get()));
"VideoDecoderConfig is invalid: %s", errorMessage.get()));
return p.forget();
}

View File

@ -37,7 +37,7 @@ std::atomic<WebCodecsId> sNextId = 0;
namespace mozilla::dom {
/*
* The followings are helpers for AudioDecoder and VideoDecoder methods
* The followings are helpers for VideoDecoder methods
*/
nsTArray<nsCString> GuessContainers(const nsAString& aCodec) {
@ -57,29 +57,6 @@ nsTArray<nsCString> GuessContainers(const nsAString& aCodec) {
return {"mp4"_ns, "3gpp"_ns, "3gpp2"_ns, "3gp2"_ns};
}
if (IsAACCodecString(aCodec)) {
return {"adts"_ns, "mp4"_ns};
}
if (aCodec.EqualsLiteral("vorbis") || aCodec.EqualsLiteral("opus")) {
return {"ogg"_ns};
}
if (aCodec.EqualsLiteral("flac")) {
return {"flac"_ns};
}
if (aCodec.EqualsLiteral("mp3")) {
return {"mp3"_ns};
}
if (aCodec.EqualsLiteral("ulaw") || aCodec.EqualsLiteral("alaw") ||
aCodec.EqualsLiteral("pcm-u8") || aCodec.EqualsLiteral("pcm-s16") ||
aCodec.EqualsLiteral("pcm-s24") || aCodec.EqualsLiteral("pcm-s32") ||
aCodec.EqualsLiteral("pcm-f32")) {
return {"x-wav"_ns};
}
return {};
}
@ -318,6 +295,13 @@ Maybe<VideoPixelFormat> ImageBitmapFormatToVideoPixelFormat(
return Nothing();
}
Result<RefPtr<MediaByteBuffer>, nsresult> GetExtraDataFromArrayBuffer(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aBuffer) {
RefPtr<MediaByteBuffer> data = MakeRefPtr<MediaByteBuffer>();
Unused << AppendTypedArrayDataTo(aBuffer, *data);
return data->Length() > 0 ? data : nullptr;
}
bool IsOnAndroid() {
#if defined(ANDROID)
return true;
@ -575,13 +559,4 @@ nsString ConfigToString(const VideoDecoderConfig& aConfig) {
return internal->ToString();
}
Result<RefPtr<MediaByteBuffer>, nsresult> GetExtraDataFromArrayBuffer(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aBuffer) {
RefPtr<MediaByteBuffer> data = MakeRefPtr<MediaByteBuffer>();
if (!AppendTypedArrayDataTo(aBuffer, *data)) {
return Err(NS_ERROR_OUT_OF_MEMORY);
}
return data->Length() > 0 ? data : nullptr;
}
} // namespace mozilla::dom
}; // namespace mozilla::dom

View File

@ -8,7 +8,6 @@
#define MOZILLA_DOM_WEBCODECS_WEBCODECSUTILS_H
#include "ErrorList.h"
#include "MediaData.h"
#include "js/TypeDecls.h"
#include "mozilla/Maybe.h"
#include "mozilla/MozPromise.h"
@ -89,9 +88,6 @@ Result<Ok, nsresult> CloneBuffer(
OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aDest,
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aSrc);
Result<RefPtr<MediaByteBuffer>, nsresult> GetExtraDataFromArrayBuffer(
const OwningMaybeSharedArrayBufferViewOrMaybeSharedArrayBuffer& aBuffer);
/*
* The following are utilities to convert between VideoColorSpace values to
* gfx's values.

View File

@ -21,11 +21,8 @@ EXPORTS.mozilla += [
]
EXPORTS.mozilla.dom += [
"AudioData.h",
"AudioDecoder.h",
"DecoderTemplate.h",
"DecoderTypes.h",
"EncodedAudioChunk.h",
"EncodedVideoChunk.h",
"EncoderAgent.h",
"EncoderTemplate.h",
@ -38,11 +35,8 @@ EXPORTS.mozilla.dom += [
]
UNIFIED_SOURCES += [
"AudioData.cpp",
"AudioDecoder.cpp",
"DecoderAgent.cpp",
"DecoderTemplate.cpp",
"EncodedAudioChunk.cpp",
"EncodedVideoChunk.cpp",
"EncoderAgent.cpp",
"EncoderTemplate.cpp",

View File

@ -1,63 +0,0 @@
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://w3c.github.io/webcodecs/#audiodata
*/
// [Serializable, Transferable] are implemented without adding attributes here,
// but directly with {Read,Write}StructuredClone and Transfer/FromTransfered.
[Exposed=(Window,DedicatedWorker)]
interface AudioData {
[Throws]
constructor(AudioDataInit init);
readonly attribute AudioSampleFormat? format;
readonly attribute float sampleRate;
readonly attribute unsigned long numberOfFrames;
readonly attribute unsigned long numberOfChannels;
readonly attribute unsigned long long duration; // microseconds
readonly attribute long long timestamp; // microseconds
[Throws]
unsigned long allocationSize(AudioDataCopyToOptions options);
[Throws]
undefined copyTo(
// bug 1696216: Should be `copyTo(AllowSharedBufferSource destination, ...)`
([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) destination,
AudioDataCopyToOptions options);
[Throws]
AudioData clone();
undefined close();
};
dictionary AudioDataInit {
required AudioSampleFormat format;
required float sampleRate;
required [EnforceRange] unsigned long numberOfFrames;
required [EnforceRange] unsigned long numberOfChannels;
required [EnforceRange] long long timestamp; // microseconds
// bug 1696216: Should be AllowSharedBufferSource
required ([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) data;
sequence<ArrayBuffer> transfer = [];
};
enum AudioSampleFormat {
"u8",
"s16",
"s32",
"f32",
"u8-planar",
"s16-planar",
"s32-planar",
"f32-planar",
};
dictionary AudioDataCopyToOptions {
required [EnforceRange] unsigned long planeIndex;
[EnforceRange] unsigned long frameOffset = 0;
[EnforceRange] unsigned long frameCount;
AudioSampleFormat format;
};

View File

@ -1,53 +0,0 @@
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://w3c.github.io/webcodecs/#audiodecoder
*/
[Exposed=(Window,DedicatedWorker), SecureContext, Pref="dom.media.webcodecs.enabled"]
interface AudioDecoder : EventTarget {
[Throws]
constructor(AudioDecoderInit init);
readonly attribute CodecState state;
readonly attribute unsigned long decodeQueueSize;
attribute EventHandler ondequeue;
[Throws]
undefined configure(AudioDecoderConfig config);
[Throws]
undefined decode(EncodedAudioChunk chunk);
[NewObject, Throws]
Promise<undefined> flush();
[Throws]
undefined reset();
[Throws]
undefined close();
[NewObject, Throws]
static Promise<AudioDecoderSupport> isConfigSupported(AudioDecoderConfig config);
};
dictionary AudioDecoderInit {
required AudioDataOutputCallback output;
required WebCodecsErrorCallback error;
};
callback AudioDataOutputCallback = undefined(AudioData output);
dictionary AudioDecoderSupport {
boolean supported;
AudioDecoderConfig config;
};
dictionary AudioDecoderConfig {
required DOMString codec;
required [EnforceRange] unsigned long sampleRate;
required [EnforceRange] unsigned long numberOfChannels;
// Bug 1696216: Should be AllowSharedBufferSource
([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) description;
};

View File

@ -1,38 +0,0 @@
/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://w3c.github.io/webcodecs/#encodedaudiochunk
*/
// [Serializable] is implemented without adding attribute here.
[Exposed=(Window,DedicatedWorker), Pref="dom.media.webcodecs.enabled"]
interface EncodedAudioChunk {
[Throws]
constructor(EncodedAudioChunkInit init);
readonly attribute EncodedAudioChunkType type;
readonly attribute long long timestamp; // microseconds
readonly attribute unsigned long long? duration; // microseconds
readonly attribute unsigned long byteLength;
[Throws]
undefined copyTo(
// bug 1696216: Should be `copyTo(AllowSharedBufferSource destination, ...)`
([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) destination);
};
dictionary EncodedAudioChunkInit {
required EncodedAudioChunkType type;
required [EnforceRange] long long timestamp; // microseconds
[EnforceRange] unsigned long long duration; // microseconds
// bug 1696216: Should be AllowSharedBufferSource
required ([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) data;
sequence<ArrayBuffer> transfer = [];
};
enum EncodedAudioChunkType {
"key",
"delta"
};

View File

@ -45,7 +45,7 @@ dictionary VideoDecoderSupport {
dictionary VideoDecoderConfig {
required DOMString codec;
// Bug 1696216: Should be [AllowShared] BufferSource description;
// Bug 1696216: Should be 1696216 [AllowShared] BufferSource description;
([AllowShared] ArrayBufferView or [AllowShared] ArrayBuffer) description;
[EnforceRange] unsigned long codedWidth;
[EnforceRange] unsigned long codedHeight;

View File

@ -416,8 +416,6 @@ WEBIDL_FILES = [
"AudioBuffer.webidl",
"AudioBufferSourceNode.webidl",
"AudioContext.webidl",
"AudioData.webidl",
"AudioDecoder.webidl",
"AudioDestinationNode.webidl",
"AudioListener.webidl",
"AudioNode.webidl",
@ -528,7 +526,6 @@ WEBIDL_FILES = [
"DynamicsCompressorNode.webidl",
"Element.webidl",
"ElementInternals.webidl",
"EncodedAudioChunk.webidl",
"EncodedVideoChunk.webidl",
"Event.webidl",
"EventHandler.webidl",

View File

@ -0,0 +1,11 @@
[audio-data-serialization.any.html]
expected:
if asan and not fission: [OK, CRASH]
[Verify closing AudioData does not propagate accross contexts.]
expected: FAIL
[Verify posting closed AudioData throws.]
expected: FAIL
[Verify transferring audio data closes them.]
expected: FAIL

View File

@ -0,0 +1,50 @@
[audio-data.any.html]
[Verify AudioData constructors]
expected: FAIL
[Verify closing and cloning AudioData]
expected: FAIL
[Test we can construct AudioData with a negative timestamp.]
expected: FAIL
[Test conversion of uint8 data to float32]
expected: FAIL
[Test conversion of int16 data to float32]
expected: FAIL
[Test conversion of int32 data to float32]
expected: FAIL
[Test conversion of float32 data to float32]
expected: FAIL
[Test copying out planar and interleaved data]
expected: FAIL
[audio-data.any.worker.html]
[Verify AudioData constructors]
expected: FAIL
[Verify closing and cloning AudioData]
expected: FAIL
[Test we can construct AudioData with a negative timestamp.]
expected: FAIL
[Test conversion of uint8 data to float32]
expected: FAIL
[Test conversion of int16 data to float32]
expected: FAIL
[Test conversion of int32 data to float32]
expected: FAIL
[Test conversion of float32 data to float32]
expected: FAIL
[Test copying out planar and interleaved data]
expected: FAIL

View File

@ -0,0 +1,9 @@
[audio-data.crossOriginIsolated.https.any.html]
expected:
if (os == "android") and not swgl and debug: [OK, TIMEOUT]
if (os == "android") and swgl: [OK, TIMEOUT]
[Test construction and copyTo() using a SharedArrayBuffer]
expected: FAIL
[Test construction and copyTo() using a Uint8Array(SharedArrayBuffer)]
expected: FAIL

View File

@ -1,18 +1,17 @@
[audio-decoder.crossOriginIsolated.https.any.html]
expected:
if (os == "android") and debug and not swgl: [OK, TIMEOUT]
if (os == "android") and debug and swgl: [OK, TIMEOUT]
[Test isConfigSupported() and configure() using a SharedArrayBuffer]
expected: FAIL
[Test isConfigSupported() and configure() using a Uint8Array(SharedArrayBuffer)]
expected: FAIL
[audio-decoder.crossOriginIsolated.https.any.worker.html]
[Test isConfigSupported() and configure() using a SharedArrayBuffer]
expected:
if os == "android": PRECONDITION_FAILED
expected: FAIL
[Test isConfigSupported() and configure() using a Uint8Array(SharedArrayBuffer)]
expected:
if os == "android": PRECONDITION_FAILED
[audio-decoder.crossOriginIsolated.https.any.html]
[Test isConfigSupported() and configure() using a SharedArrayBuffer]
expected:
if os == "android": PRECONDITION_FAILED
[Test isConfigSupported() and configure() using a Uint8Array(SharedArrayBuffer)]
expected:
if os == "android": PRECONDITION_FAILED
expected: FAIL

View File

@ -0,0 +1,158 @@
[audio-decoder.https.any.html]
[Test AudioDecoder construction]
expected: FAIL
[Verify unconfigured AudioDecoder operations]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Empty codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing sampleRate]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing numberOfChannels]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Zero sampleRate]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Zero channels]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing codec]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Empty codec]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing sampleRate]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing numberOfChannels]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Zero sampleRate]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Zero channels]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Unrecognized codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Video codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Ambiguous codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Codec with MIME type]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Possible future opus codec string]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Possible future aac codec string]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Unrecognized codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Video codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Ambiguous codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Codec with MIME type]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Possible future opus codec string]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Possible future aac codec string]
expected: FAIL
[audio-decoder.https.any.worker.html]
[Test AudioDecoder construction]
expected: FAIL
[Verify unconfigured AudioDecoder operations]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Empty codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing sampleRate]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Missing numberOfChannels]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Zero sampleRate]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() rejects invalid config: Zero channels]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing codec]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Empty codec]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing sampleRate]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Missing numberOfChannels]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Zero sampleRate]
expected: FAIL
[Test that AudioDecoder.configure() rejects invalid config: Zero channels]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Unrecognized codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Video codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Ambiguous codec]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Codec with MIME type]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Possible future opus codec string]
expected: FAIL
[Test that AudioDecoder.isConfigSupported() doesn't support config: Possible future aac codec string]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Unrecognized codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Video codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Ambiguous codec]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Codec with MIME type]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Possible future opus codec string]
expected: FAIL
[Test that AudioDecoder.configure() doesn't support config: Possible future aac codec string]
expected: FAIL

View File

@ -1,6 +1,7 @@
[audio-encoder.https.any.html]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Simple audio encoding]
expected: FAIL
@ -9,66 +10,3 @@
[Test reset during flush]
expected: FAIL
[Channel number variation: 1]
expected: FAIL
[Channel number variation: 2]
expected: FAIL
[Sample rate variation: 3000]
expected: FAIL
[Sample rate variation: 13000]
expected: FAIL
[Sample rate variation: 23000]
expected: FAIL
[Sample rate variation: 33000]
expected: FAIL
[Sample rate variation: 43000]
expected: FAIL
[Sample rate variation: 53000]
expected: FAIL
[Sample rate variation: 63000]
expected: FAIL
[Sample rate variation: 73000]
expected: FAIL
[Sample rate variation: 83000]
expected: FAIL
[Sample rate variation: 93000]
expected: FAIL
[Encoding and decoding]
expected: FAIL
[Emit decoder config and extra data.]
expected: FAIL
[encodeQueueSize test]
expected: FAIL
[Test encoding Opus with additional parameters: Empty Opus config]
expected: FAIL
[Test encoding Opus with additional parameters: Opus with frameDuration]
expected: FAIL
[Test encoding Opus with additional parameters: Opus with complexity]
expected: FAIL
[Test encoding Opus with additional parameters: Opus with useinbandfec]
expected: FAIL
[Test encoding Opus with additional parameters: Opus with usedtx]
expected: FAIL
[Test encoding Opus with additional parameters: Opus mixed parameters]
expected: FAIL

View File

@ -1,193 +1,378 @@
[audioDecoder-codec-specific.https.any.html?adts_aac]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
if os == "android": ERROR
expected: ERROR
[Test isConfigSupported()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test configure()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding a with negative timestamp]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding after flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test reset during flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?pcm_mulaw]
expected: ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.html?mp3]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?adts_aac]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
if os == "android": ERROR
ERROR
[Test isConfigSupported()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test configure()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding a with negative timestamp]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding after flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test reset during flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?mp4_aac]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
if os == "android": ERROR
ERROR
[Test isConfigSupported()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test configure()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding a with negative timestamp]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding after flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test reset during flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[audioDecoder-codec-specific.https.any.html?opus]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?mp3]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.html?mp4_aac]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
if os == "android": ERROR
ERROR
[Test isConfigSupported()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test configure()]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding a with negative timestamp]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test decoding after flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[Test reset during flush]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected:
if os == "android": NOTRUN
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?opus]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.html?pcm_mulaw]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.worker.html?pcm_alaw]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN
[audioDecoder-codec-specific.https.any.html?pcm_alaw]
expected:
if (os == "android") and fission: [ERROR, TIMEOUT]
ERROR
[Test isConfigSupported()]
expected: NOTRUN
[Test that AudioDecoder.isConfigSupported() returns a parsed configuration]
expected: NOTRUN
[Test configure()]
expected: NOTRUN
[Verify closed AudioDecoder operations]
expected: NOTRUN
[Test decoding]
expected: NOTRUN
[Test decoding a with negative timestamp]
expected: NOTRUN
[Test decoding after flush]
expected: NOTRUN
[Test reset during flush]
expected: NOTRUN
[AudioDecoder decodeQueueSize test]
expected: NOTRUN

View File

@ -1,8 +1,7 @@
prefs: [dom.media.webcodecs.enabled:true]
[chunk-serialization.any.html]
[Verify EncodedAudioChunk is serializable.]
expected:
if (os == "android") and not debug: [PASS, FAIL]
expected: FAIL
[Verify EncodedVideoChunk is serializable.]
expected:

View File

@ -0,0 +1,18 @@
[encoded-audio-chunk.any.html]
expected:
if (os == "android") and fission: [OK, TIMEOUT]
[Test we can construct an EncodedAudioChunk.]
expected: FAIL
[Test copyTo() exception if destination invalid]
expected: FAIL
[encoded-audio-chunk.any.worker.html]
expected:
if (os == "android") and fission: [OK, TIMEOUT]
[Test we can construct an EncodedAudioChunk.]
expected: FAIL
[Test copyTo() exception if destination invalid]
expected: FAIL

View File

@ -0,0 +1,18 @@
[encoded-audio-chunk.crossOriginIsolated.https.any.worker.html]
expected:
if (os == "android") and debug and not swgl: [OK, TIMEOUT]
[Test construction and copyTo() using a SharedArrayBuffer]
expected: FAIL
[Test construction and copyTo() using a Uint8Array(SharedArrayBuffer)]
expected: FAIL
[encoded-audio-chunk.crossOriginIsolated.https.any.html]
expected:
if (os == "android") and debug and not swgl: [OK, TIMEOUT]
[Test construction and copyTo() using a SharedArrayBuffer]
expected: FAIL
[Test construction and copyTo() using a Uint8Array(SharedArrayBuffer)]
expected: FAIL

View File

@ -1,5 +1,50 @@
prefs: [dom.media.webcodecs.enabled:true]
[idlharness.https.any.html]
[AudioDecoder interface: existence and properties of interface object]
expected: FAIL
[AudioDecoder interface object length]
expected: FAIL
[AudioDecoder interface object name]
expected: FAIL
[AudioDecoder interface: existence and properties of interface prototype object]
expected: FAIL
[AudioDecoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[AudioDecoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[AudioDecoder interface: attribute state]
expected: FAIL
[AudioDecoder interface: attribute decodeQueueSize]
expected: FAIL
[AudioDecoder interface: attribute ondequeue]
expected: FAIL
[AudioDecoder interface: operation configure(AudioDecoderConfig)]
expected: FAIL
[AudioDecoder interface: operation decode(EncodedAudioChunk)]
expected: FAIL
[AudioDecoder interface: operation flush()]
expected: FAIL
[AudioDecoder interface: operation reset()]
expected: FAIL
[AudioDecoder interface: operation close()]
expected: FAIL
[AudioDecoder interface: operation isConfigSupported(AudioDecoderConfig)]
expected: FAIL
[AudioEncoder interface: existence and properties of interface object]
expected: FAIL
@ -45,6 +90,81 @@ prefs: [dom.media.webcodecs.enabled:true]
[AudioEncoder interface: operation isConfigSupported(AudioEncoderConfig)]
expected: FAIL
[EncodedAudioChunk interface: existence and properties of interface object]
expected: FAIL
[EncodedAudioChunk interface object length]
expected: FAIL
[EncodedAudioChunk interface object name]
expected: FAIL
[EncodedAudioChunk interface: existence and properties of interface prototype object]
expected: FAIL
[EncodedAudioChunk interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[EncodedAudioChunk interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[EncodedAudioChunk interface: attribute type]
expected: FAIL
[EncodedAudioChunk interface: attribute timestamp]
expected: FAIL
[EncodedAudioChunk interface: attribute duration]
expected: FAIL
[EncodedAudioChunk interface: attribute byteLength]
expected: FAIL
[AudioData interface: existence and properties of interface object]
expected: FAIL
[AudioData interface object length]
expected: FAIL
[AudioData interface object name]
expected: FAIL
[AudioData interface: existence and properties of interface prototype object]
expected: FAIL
[AudioData interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[AudioData interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[AudioData interface: attribute format]
expected: FAIL
[AudioData interface: attribute sampleRate]
expected: FAIL
[AudioData interface: attribute numberOfFrames]
expected: FAIL
[AudioData interface: attribute numberOfChannels]
expected: FAIL
[AudioData interface: attribute duration]
expected: FAIL
[AudioData interface: attribute timestamp]
expected: FAIL
[AudioData interface: operation allocationSize(AudioDataCopyToOptions)]
expected: FAIL
[AudioData interface: operation clone()]
expected: FAIL
[AudioData interface: operation close()]
expected: FAIL
[VideoFrame interface: operation metadata()]
expected: FAIL
@ -156,207 +276,10 @@ prefs: [dom.media.webcodecs.enabled:true]
[idl_test setup]
expected: FAIL
[idlharness.https.any.worker.html]
[AudioEncoder interface: existence and properties of interface object]
expected: FAIL
[AudioEncoder interface object length]
expected: FAIL
[AudioEncoder interface object name]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[AudioEncoder interface: attribute state]
expected: FAIL
[AudioEncoder interface: attribute encodeQueueSize]
expected: FAIL
[AudioEncoder interface: attribute ondequeue]
expected: FAIL
[AudioEncoder interface: operation configure(AudioEncoderConfig)]
expected: FAIL
[AudioEncoder interface: operation encode(AudioData)]
expected: FAIL
[AudioEncoder interface: operation flush()]
expected: FAIL
[AudioEncoder interface: operation reset()]
expected: FAIL
[AudioEncoder interface: operation close()]
expected: FAIL
[AudioEncoder interface: operation isConfigSupported(AudioEncoderConfig)]
expected: FAIL
[VideoEncoder interface: existence and properties of interface object]
expected: FAIL
[VideoEncoder interface object length]
expected: FAIL
[VideoEncoder interface object name]
expected: FAIL
[VideoEncoder interface: existence and properties of interface prototype object]
expected: FAIL
[VideoEncoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[VideoEncoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[VideoEncoder interface: attribute state]
expected: FAIL
[VideoEncoder interface: attribute encodeQueueSize]
expected: FAIL
[VideoEncoder interface: attribute ondequeue]
expected: FAIL
[VideoEncoder interface: operation configure(VideoEncoderConfig)]
expected: FAIL
[VideoEncoder interface: operation encode(VideoFrame, optional VideoEncoderEncodeOptions)]
expected: FAIL
[VideoEncoder interface: operation flush()]
expected: FAIL
[VideoEncoder interface: operation reset()]
expected: FAIL
[VideoEncoder interface: operation close()]
expected: FAIL
[VideoEncoder interface: operation isConfigSupported(VideoEncoderConfig)]
expected: FAIL
[VideoFrame interface: operation metadata()]
expected: FAIL
[ImageDecoder interface: existence and properties of interface object]
expected: FAIL
[ImageDecoder interface object length]
expected: FAIL
[ImageDecoder interface object name]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageDecoder interface: attribute type]
expected: FAIL
[ImageDecoder interface: attribute complete]
expected: FAIL
[ImageDecoder interface: attribute completed]
expected: FAIL
[ImageDecoder interface: attribute tracks]
expected: FAIL
[ImageDecoder interface: operation decode(optional ImageDecodeOptions)]
expected: FAIL
[ImageDecoder interface: operation reset()]
expected: FAIL
[ImageDecoder interface: operation close()]
expected: FAIL
[ImageDecoder interface: operation isTypeSupported(DOMString)]
expected: FAIL
[ImageTrackList interface: existence and properties of interface object]
expected: FAIL
[ImageTrackList interface object length]
expected: FAIL
[ImageTrackList interface object name]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageTrackList interface: attribute ready]
expected: FAIL
[ImageTrackList interface: attribute length]
expected: FAIL
[ImageTrackList interface: attribute selectedIndex]
expected: FAIL
[ImageTrackList interface: attribute selectedTrack]
expected: FAIL
[ImageTrack interface: existence and properties of interface object]
expected: FAIL
[ImageTrack interface object length]
expected: FAIL
[ImageTrack interface object name]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageTrack interface: attribute animated]
expected: FAIL
[ImageTrack interface: attribute frameCount]
expected: FAIL
[ImageTrack interface: attribute repetitionCount]
expected: FAIL
[ImageTrack interface: attribute selected]
expected: FAIL
[idl_test setup]
[EncodedAudioChunk interface: operation copyTo(AllowSharedBufferSource)]
expected: FAIL
[idl_test setup]
[AudioData interface: operation copyTo(AllowSharedBufferSource, AudioDataCopyToOptions)]
expected: FAIL
@ -642,158 +565,3 @@ prefs: [dom.media.webcodecs.enabled:true]
[AudioData interface: operation copyTo(AllowSharedBufferSource, AudioDataCopyToOptions)]
expected: FAIL
[idlharness.https.any.worker.html]
[AudioEncoder interface: existence and properties of interface object]
expected: FAIL
[AudioEncoder interface object length]
expected: FAIL
[AudioEncoder interface object name]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[AudioEncoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[AudioEncoder interface: attribute state]
expected: FAIL
[AudioEncoder interface: attribute encodeQueueSize]
expected: FAIL
[AudioEncoder interface: attribute ondequeue]
expected: FAIL
[AudioEncoder interface: operation configure(AudioEncoderConfig)]
expected: FAIL
[AudioEncoder interface: operation encode(AudioData)]
expected: FAIL
[AudioEncoder interface: operation flush()]
expected: FAIL
[AudioEncoder interface: operation reset()]
expected: FAIL
[AudioEncoder interface: operation close()]
expected: FAIL
[AudioEncoder interface: operation isConfigSupported(AudioEncoderConfig)]
expected: FAIL
[VideoFrame interface: operation metadata()]
expected: FAIL
[ImageDecoder interface: existence and properties of interface object]
expected: FAIL
[ImageDecoder interface object length]
expected: FAIL
[ImageDecoder interface object name]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageDecoder interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageDecoder interface: attribute type]
expected: FAIL
[ImageDecoder interface: attribute complete]
expected: FAIL
[ImageDecoder interface: attribute completed]
expected: FAIL
[ImageDecoder interface: attribute tracks]
expected: FAIL
[ImageDecoder interface: operation decode(optional ImageDecodeOptions)]
expected: FAIL
[ImageDecoder interface: operation reset()]
expected: FAIL
[ImageDecoder interface: operation close()]
expected: FAIL
[ImageDecoder interface: operation isTypeSupported(DOMString)]
expected: FAIL
[ImageTrackList interface: existence and properties of interface object]
expected: FAIL
[ImageTrackList interface object length]
expected: FAIL
[ImageTrackList interface object name]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageTrackList interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageTrackList interface: attribute ready]
expected: FAIL
[ImageTrackList interface: attribute length]
expected: FAIL
[ImageTrackList interface: attribute selectedIndex]
expected: FAIL
[ImageTrackList interface: attribute selectedTrack]
expected: FAIL
[ImageTrack interface: existence and properties of interface object]
expected: FAIL
[ImageTrack interface object length]
expected: FAIL
[ImageTrack interface object name]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object's "constructor" property]
expected: FAIL
[ImageTrack interface: existence and properties of interface prototype object's @@unscopables property]
expected: FAIL
[ImageTrack interface: attribute animated]
expected: FAIL
[ImageTrack interface: attribute frameCount]
expected: FAIL
[ImageTrack interface: attribute repetitionCount]
expected: FAIL
[ImageTrack interface: attribute selected]
expected: FAIL
[idl_test setup]
expected: FAIL

View File

@ -2,20 +2,19 @@
// META: script=/common/media.js
// META: script=/webcodecs/utils.js
var defaultInit = {
timestamp: 1234,
channels: 2,
sampleRate: 8000,
frames: 100,
};
var defaultInit =
{
timestamp: 1234,
channels: 2,
sampleRate: 8000,
frames: 100,
}
function createDefaultAudioData() {
function
createDefaultAudioData() {
return make_audio_data(
defaultInit.timestamp,
defaultInit.channels,
defaultInit.sampleRate,
defaultInit.frames
);
defaultInit.timestamp, defaultInit.channels, defaultInit.sampleRate,
defaultInit.frames);
}
test(t => {
@ -129,302 +128,230 @@ test(t => {
data.close();
}, 'Test we can construct AudioData with a negative timestamp.');
// Each test vector represents two channels of data in the following arbitrary
// layout: <min, zero, max, min, max / 2, min / 2, zero, max, zero, zero>.
const testVectorFrames = 5;
const testVectorChannels = 2;
const testVectorInterleavedResult =
[[-1.0, 1.0, 0.5, 0.0, 0.0], [0.0, -1.0, -0.5, 1.0, 0.0]];
const testVectorPlanarResult =
[[-1.0, 0.0, 1.0, -1.0, 0.5], [-0.5, 0.0, 1.0, 0.0, 0.0]];
test(t => {
let audio_data_init = {
timestamp: 0,
data: new Float32Array([1,2,3,4,5,6,7,8]),
numberOfFrames: 4,
numberOfChannels: 2,
sampleRate: 44100,
format: 'f32',
};
let audioData = new AudioData(audio_data_init);
let dest = new Float32Array(8);
assert_throws_js(
RangeError, () => audioData.copyTo(dest, {planeIndex: 1}),
'copyTo from interleaved data with non-zero planeIndex throws');
audioData.close();
}, 'Test that copyTo throws if copying from interleaved with a non-zero planeIndex');
const INT8_MIN = (-0x7f - 1);
const INT8_MAX = 0x7f;
const UINT8_MAX = 0xff;
// Indices to pick a particular specific value in a specific sample-format
const MIN = 0; // Minimum sample value, max amplitude
const MAX = 1; // Maximum sample value, max amplitude
const HALF = 2; // Half the maximum sample value, positive
const NEGATIVE_HALF = 3; // Half the maximum sample value, negative
const BIAS = 4; // Center of the range, silence
const DISCRETE_STEPS = 5; // Number of different value for a type.
const testVectorUint8 = [
0, -INT8_MIN, UINT8_MAX, 0, INT8_MAX / 2 + 128, INT8_MIN / 2 + 128,
-INT8_MIN, UINT8_MAX, -INT8_MIN, -INT8_MIN
];
function pow2(p) {
return 2 ** p;
}
// Rounding operations for conversion, currently always floor (round towards
// zero).
let r = Math.floor.bind(this);
const TEST_VALUES = {
u8: [0, 255, 191, 64, 128, 256],
s16: [
-pow2(15),
pow2(15) - 1,
r((pow2(15) - 1) / 2),
r(-pow2(15) / 2),
0,
pow2(16),
],
s32: [
-pow2(31),
pow2(31) - 1,
r((pow2(31) - 1) / 2),
r(-pow2(31) / 2),
0,
pow2(32),
],
f32: [-1.0, 1.0, 0.5, -0.5, 0, pow2(24)],
};
const TEST_TEMPLATE = {
channels: 2,
frames: 5,
// Each test is run with an element of the cartesian product of a pair of
// elements of the set of type in [u8, s16, s32, f32]
// For each test, this template is copied and the values replaced with the
// appropriate values for this particular type.
// For each test, copy this template and replace the number by the appropriate
// number for this type
testInput: [MIN, BIAS, MAX, MIN, HALF, NEGATIVE_HALF, BIAS, MAX, BIAS, BIAS],
testVectorInterleavedResult: [
[MIN, MAX, HALF, BIAS, BIAS],
[BIAS, MIN, NEGATIVE_HALF, MAX, BIAS],
],
testVectorPlanarResult: [
[MIN, BIAS, MAX, MIN, HALF],
[NEGATIVE_HALF, BIAS, MAX, BIAS, BIAS],
],
};
function isInteger(type) {
switch (type) {
case "u8":
case "s16":
case "s32":
return true;
case "f32":
return false;
default:
throw "invalid type";
}
}
// This is the complex part: carefully select an acceptable error value
// depending on various factors: expected destination value, source type,
// destination type. This is designed to be strict but reachable with simple
// sample format transformation (no dithering or complex transformation).
function epsilon(expectedDestValue, sourceType, destType) {
// Strict comparison if not converting
if (sourceType == destType) {
return 0.0;
}
// There are three cases in which the maximum value cannot be reached, when
// converting from a smaller integer sample type to a wider integer sample
// type:
// - u8 to s16
// - u8 to s32
// - s16 to u32
if (expectedDestValue == TEST_VALUES[destType][MAX]) {
if (sourceType == "u8" && destType == "s16") {
return expectedDestValue - 32511; // INT16_MAX - 2 << 7 + 1
} else if (sourceType == "u8" && destType == "s32") {
return expectedDestValue - 2130706432; // INT32_MAX - (2 << 23) + 1
} else if (sourceType == "s16" && destType == "s32") {
return expectedDestValue - 2147418112; // INT32_MAX - UINT16_MAX
}
}
// Min and bias value are correctly mapped for all integer sample-types
if (isInteger(sourceType) && isInteger(destType)) {
if (expectedDestValue == TEST_VALUES[destType][MIN] ||
expectedDestValue == TEST_VALUES[destType][BIAS]) {
return 0.0;
}
}
// If converting from float32 to u8 or s16, allow choosing the rounding
// direction. s32 has higher resolution than f32 in [-1.0,1.0] (24 bits of
// mantissa)
if (!isInteger(sourceType) && isInteger(destType) && destType != "s32") {
return 1.0;
}
// In all other cases, expect an accuracy that depends on the source type and
// the destination type.
// The resolution of the source type.
var sourceResolution = TEST_VALUES[sourceType][DISCRETE_STEPS];
// The resolution of the destination type.
var destResolution = TEST_VALUES[destType][DISCRETE_STEPS];
// Computations should be exact if going from high resolution to low resolution.
if (sourceResolution > destResolution) {
return 0.0;
} else {
// Something that approaches the precision imbalance
return destResolution / sourceResolution;
}
}
// Fill the template above with the values for a particular type
function get_type_values(type) {
let cloned = structuredClone(TEST_TEMPLATE);
cloned.testInput = Array.from(
cloned.testInput,
idx => TEST_VALUES[type][idx]
);
cloned.testVectorInterleavedResult = Array.from(
cloned.testVectorInterleavedResult,
c => {
return Array.from(c, idx => {
return TEST_VALUES[type][idx];
});
}
);
cloned.testVectorPlanarResult = Array.from(
cloned.testVectorPlanarResult,
c => {
return Array.from(c, idx => {
return TEST_VALUES[type][idx];
});
}
);
return cloned;
}
function typeToArrayType(type) {
switch (type) {
case "u8":
return Uint8Array;
case "s16":
return Int16Array;
case "s32":
return Int32Array;
case "f32":
return Float32Array;
default:
throw "Unexpected";
}
}
function arrayTypeToType(array) {
switch (array.constructor) {
case Uint8Array:
return "u8";
case Int16Array:
return "s16";
case Int32Array:
return "s32";
case Float32Array:
return "f32";
default:
throw "Unexpected";
}
}
function check_array_equality(values, expected, sourceType, message, assert_func) {
if (values.length != expected.length) {
throw "Array not of the same length";
}
for (var i = 0; i < values.length; i++) {
var eps = epsilon(expected[i], sourceType, arrayTypeToType(values));
assert_func(
Math.abs(expected[i] - values[i]) <= eps,
`Got ${values[i]} but expected result ${
expected[i]
} at index ${i} when converting from ${sourceType} to ${arrayTypeToType(
values
)}, epsilon ${eps}`
);
}
assert_func(
true,
`${values} is equal to ${expected} when converting from ${sourceType} to ${arrayTypeToType(
values
)}`
);
}
function conversionTest(sourceType, destinationType) {
test(function (t) {
var test = get_type_values(sourceType);
var result = get_type_values(destinationType);
var sourceArrayCtor = typeToArrayType(sourceType);
var destArrayCtor = typeToArrayType(destinationType);
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new sourceArrayCtor(test.testInput),
numberOfFrames: test.frames,
numberOfChannels: test.channels,
sampleRate: defaultInit.sampleRate,
format: sourceType,
});
// All conversions can be supported, but conversion of any type to f32-planar
// MUST be supported.
var assert_func = destinationType == "f32" ? assert_true : assert_implements_optional;
let dest = new destArrayCtor(data.numberOfFrames);
data.copyTo(dest, { planeIndex: 0, format: destinationType + "-planar" });
check_array_equality(
dest,
result.testVectorInterleavedResult[0],
sourceType,
"interleaved channel 0",
assert_func
);
data.copyTo(dest, { planeIndex: 1, format: destinationType + "-planar" });
check_array_equality(
dest,
result.testVectorInterleavedResult[1],
sourceType,
"interleaved channel 0",
assert_func
);
let destInterleaved = new destArrayCtor(data.numberOfFrames * data.numberOfChannels);
data.copyTo(destInterleaved, { planeIndex: 0, format: destinationType });
check_array_equality(
destInterleaved,
result.testInput,
sourceType,
"copyTo from interleaved to interleaved (conversion only)",
assert_implements_optional
);
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new sourceArrayCtor(test.testInput),
numberOfFrames: test.frames,
numberOfChannels: test.channels,
sampleRate: defaultInit.sampleRate,
format: sourceType + "-planar",
});
data.copyTo(dest, { planeIndex: 0, format: destinationType + "-planar" });
check_array_equality(
dest,
result.testVectorPlanarResult[0],
sourceType,
"planar channel 0",
assert_func,
);
data.copyTo(dest, { planeIndex: 1, format: destinationType + "-planar" });
check_array_equality(
dest,
result.testVectorPlanarResult[1],
sourceType,
"planar channel 1",
assert_func
);
// Planar to interleaved isn't supported
}, `Test conversion of ${sourceType} to ${destinationType}`);
}
const TYPES = ["u8", "s16", "s32", "f32"];
TYPES.forEach(sourceType => {
TYPES.forEach(destinationType => {
conversionTest(sourceType, destinationType);
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Uint8Array(testVectorUint8),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'u8'
});
});
const epsilon = 1.0 / (UINT8_MAX - 1);
let dest = new Float32Array(data.numberOfFrames);
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[0], epsilon, 'interleaved channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[1], epsilon, 'interleaved channel 1');
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Uint8Array(testVectorUint8),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'u8-planar'
});
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[0], epsilon, 'planar channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[1], epsilon, 'planar channel 1');
}, 'Test conversion of uint8 data to float32');
test(t => {
const INT16_MIN = (-0x7fff - 1);
const INT16_MAX = 0x7fff;
const testVectorInt16 = [
INT16_MIN, 0, INT16_MAX, INT16_MIN, INT16_MAX / 2, INT16_MIN / 2, 0,
INT16_MAX, 0, 0
];
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Int16Array(testVectorInt16),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 's16'
});
const epsilon = 1.0 / (INT16_MAX + 1);
let dest = new Float32Array(data.numberOfFrames);
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[0], epsilon, 'interleaved channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[1], epsilon, 'interleaved channel 1');
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Int16Array(testVectorInt16),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 's16-planar'
});
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[0], epsilon, 'planar channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[1], epsilon, 'planar channel 1');
}, 'Test conversion of int16 data to float32');
test(t => {
const INT32_MIN = (-0x7fffffff - 1);
const INT32_MAX = 0x7fffffff;
const testVectorInt32 = [
INT32_MIN, 0, INT32_MAX, INT32_MIN, INT32_MAX / 2, INT32_MIN / 2, 0,
INT32_MAX, 0, 0
];
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Int32Array(testVectorInt32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 's32'
});
const epsilon = 1.0 / INT32_MAX;
let dest = new Float32Array(data.numberOfFrames);
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[0], epsilon, 'interleaved channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[1], epsilon, 'interleaved channel 1');
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Int32Array(testVectorInt32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 's32-planar'
});
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[0], epsilon, 'planar channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[1], epsilon, 'planar channel 1');
}, 'Test conversion of int32 data to float32');
test(t => {
const testVectorFloat32 =
[-1.0, 0.0, 1.0, -1.0, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0];
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Float32Array(testVectorFloat32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'f32'
});
const epsilon = 0;
let dest = new Float32Array(data.numberOfFrames);
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[0], epsilon, 'interleaved channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorInterleavedResult[1], epsilon, 'interleaved channel 1');
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Float32Array(testVectorFloat32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'f32-planar'
});
data.copyTo(dest, {planeIndex: 0, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[0], epsilon, 'planar channel 0');
data.copyTo(dest, {planeIndex: 1, format: 'f32-planar'});
assert_array_approx_equals(
dest, testVectorPlanarResult[1], epsilon, 'planar channel 1');
}, 'Test conversion of float32 data to float32');
test(t => {
const testVectorFloat32 =
[-1.0, 0.0, 1.0, -1.0, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0];
let data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Float32Array(testVectorFloat32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'f32'
});
const epsilon = 0;
// Call copyTo() without specifying a format, for interleaved data.
let dest = new Float32Array(data.numberOfFrames * testVectorChannels);
data.copyTo(dest, {planeIndex: 0});
assert_array_approx_equals(
dest, testVectorFloat32, epsilon, 'interleaved data');
assert_throws_js(RangeError, () => {
data.copyTo(dest, {planeIndex: 1});
}, 'Interleaved AudioData cannot copy out planeIndex > 0');
data = new AudioData({
timestamp: defaultInit.timestamp,
data: new Float32Array(testVectorFloat32),
numberOfFrames: testVectorFrames,
numberOfChannels: testVectorChannels,
sampleRate: defaultInit.sampleRate,
format: 'f32-planar'
});
// Call copyTo() without specifying a format, for planar data.
dest = new Float32Array(data.numberOfFrames);
data.copyTo(dest, {planeIndex: 0});
assert_array_approx_equals(
dest, testVectorPlanarResult[0], epsilon, 'planar channel 0');
data.copyTo(dest, {planeIndex: 1});
assert_array_approx_equals(
dest, testVectorPlanarResult[1], epsilon, 'planar channel 1');
}, 'Test copying out planar and interleaved data');