Bug 1319987: P9. More coding style fixes. r=gerald

MozReview-Commit-ID: DhFRqkWQZny

--HG--
extra : rebase_source : 03ed44efc83fe9cab7fc975229ac4e5746aff96b
This commit is contained in:
Jean-Yves Avenard 2017-01-27 13:20:37 +01:00
parent 8dcd7e8a30
commit b7844bd5c3
26 changed files with 889 additions and 671 deletions

View File

@ -17,14 +17,13 @@
#include "nsDataHashtable.h"
#include "nsThreadUtils.h"
namespace mozilla
{
namespace mozilla {
namespace layers
{
class ImageContainer;
class KnowsCompositor;
namespace layers {
class ImageContainer;
class KnowsCompositor;
} // namespace layers
class AbstractThread;
class MediaResource;
class ReentrantMonitor;
@ -35,7 +34,8 @@ class GMPCrashHelper;
typedef nsDataHashtable<nsCStringHashKey, nsCString> MetadataTags;
static inline bool IsCurrentThread(nsIThread* aThread) {
static inline bool IsCurrentThread(nsIThread* aThread)
{
return NS_GetCurrentThread() == aThread;
}
@ -59,7 +59,10 @@ public:
// Can be called on any thread.
virtual void NotifyDecodedFrames(const FrameStatisticsData& aStats) = 0;
virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() { return nullptr; };
virtual AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull()
{
return nullptr;
};
// Return an event that will be notified when data arrives in MediaResource.
// MediaDecoderReader will register with this event to receive notifications
@ -74,7 +77,8 @@ public:
// and we might have a new compositor. If this new compositor requires us to
// recreate our decoders, then we expect the existing decoderis to return an
// error independently of this.
virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>* CompositorUpdatedEvent()
virtual MediaEventSource<RefPtr<layers::KnowsCompositor>>*
CompositorUpdatedEvent()
{
return nullptr;
}
@ -82,7 +86,7 @@ public:
// Notify the media decoder that a decryption key is required before emitting
// further output. This only needs to be overridden for decoders that expect
// encryption, such as the MediaSource decoder.
virtual void NotifyWaitingForKey() {}
virtual void NotifyWaitingForKey() { }
// Return an event that will be notified when a decoder is waiting for a
// decryption key before it can return more output.
@ -95,13 +99,12 @@ public:
virtual AbstractThread* AbstractMainThread() const = 0;
protected:
virtual void UpdateEstimatedMediaDuration(int64_t aDuration) {};
virtual void UpdateEstimatedMediaDuration(int64_t aDuration) { };
public:
void DispatchUpdateEstimatedMediaDuration(int64_t aDuration)
{
NS_DispatchToMainThread(NewRunnableMethod<int64_t>(this,
&AbstractMediaDecoder::UpdateEstimatedMediaDuration,
aDuration));
NS_DispatchToMainThread(NewRunnableMethod<int64_t>(
this, &AbstractMediaDecoder::UpdateEstimatedMediaDuration, aDuration));
}
virtual VideoFrameContainer* GetVideoFrameContainer() = 0;
@ -112,19 +115,22 @@ public:
virtual MediaDecoderOwner* GetOwner() const = 0;
// Set by Reader if the current audio track can be offloaded
virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) {}
virtual void SetPlatformCanOffloadAudio(bool aCanOffloadAudio) { }
virtual already_AddRefed<GMPCrashHelper> GetCrashHelper() { return nullptr; }
// Stack based class to assist in notifying the frame statistics of
// parsed and decoded frames. Use inside video demux & decode functions
// to ensure all parsed and decoded frames are reported on all return paths.
class AutoNotifyDecoded {
class AutoNotifyDecoded
{
public:
explicit AutoNotifyDecoded(AbstractMediaDecoder* aDecoder)
: mDecoder(aDecoder)
{}
~AutoNotifyDecoded() {
{
}
~AutoNotifyDecoded()
{
if (mDecoder) {
mDecoder->NotifyDecodedFrames(mStats);
}
@ -138,8 +144,12 @@ public:
// Classes directly inheriting from AbstractMediaDecoder do not support
// Observe and it should never be called directly.
NS_IMETHOD Observe(nsISupports *aSubject, const char * aTopic, const char16_t * aData) override
{ MOZ_CRASH("Forbidden method"); return NS_OK; }
NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
const char16_t* aData) override
{
MOZ_CRASH("Forbidden method");
return NS_OK;
}
};
} // namespace mozilla

View File

@ -24,7 +24,7 @@ class Benchmark;
class BenchmarkPlayback : public QueueObject
{
friend class Benchmark;
explicit BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
BenchmarkPlayback(Benchmark* aMainThreadState, MediaDataDemuxer* aDemuxer);
void DemuxSamples();
void DemuxNextSample();
void MainThreadShutdown();
@ -61,7 +61,9 @@ public:
Parameters()
: mFramesToMeasure(-1)
, mStartupFrame(1)
, mTimeout(TimeDuration::Forever()) {}
, mTimeout(TimeDuration::Forever())
{
}
Parameters(int32_t aFramesToMeasure,
uint32_t aStartupFrame,
@ -70,7 +72,9 @@ public:
: mFramesToMeasure(aFramesToMeasure)
, mStartupFrame(aStartupFrame)
, mStopAtFrame(Some(aStopAtFrame))
, mTimeout(aTimeout) {}
, mTimeout(aTimeout)
{
}
const int32_t mFramesToMeasure;
const uint32_t mStartupFrame;
@ -80,7 +84,8 @@ public:
typedef MozPromise<uint32_t, bool, /* IsExclusive = */ true> BenchmarkPromise;
explicit Benchmark(MediaDataDemuxer* aDemuxer, const Parameters& aParameters = Parameters());
explicit Benchmark(MediaDataDemuxer* aDemuxer,
const Parameters& aParameters = Parameters());
RefPtr<BenchmarkPromise> Run();
static void Init();

View File

@ -724,7 +724,7 @@ DOMMediaStream::CloneInternal(TrackForwardingOption aForwarding)
LOG(LogLevel::Info, ("DOMMediaStream %p created clone %p, forwarding %s tracks",
this, newStream.get(),
aForwarding == TrackForwardingOption::ALL
? "all" : "current"));
? "all" : "current"));
MOZ_RELEASE_ASSERT(mPlaybackStream);
MOZ_RELEASE_ASSERT(mPlaybackStream->Graph());

View File

@ -67,8 +67,8 @@ void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)",
aPreviousDriver,
aPreviousDriver->AsAudioCallbackDriver()
? "AudioCallbackDriver"
: "SystemClockDriver"));
? "AudioCallbackDriver"
: "SystemClockDriver"));
SetPreviousDriver(aPreviousDriver);
}

View File

@ -1526,8 +1526,8 @@ MediaCache::AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData,
bo->mLastUseTime = now;
stream->mBlocks[streamBlockIndex] = blockIndex;
if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) {
bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK
? PLAYED_BLOCK : METADATA_BLOCK;
bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK ? PLAYED_BLOCK
: METADATA_BLOCK;
// This must be the most-recently-used block, since we
// marked it as used now (which may be slightly bogus, but we'll
// treat it as used for simplicity).
@ -1648,7 +1648,8 @@ MediaCache::NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex,
GetListForBlock(bo)->RemoveBlock(aBlockIndex);
bo->mClass =
(aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK)
? METADATA_BLOCK : PLAYED_BLOCK;
? METADATA_BLOCK
: PLAYED_BLOCK;
// Since this is just being used now, it can definitely be at the front
// of mMetadataBlocks or mPlayedBlocks
GetListForBlock(bo)->AddFirstBlock(aBlockIndex);

View File

@ -270,12 +270,14 @@ typedef AlignedBuffer<int16_t> AlignedShortBuffer;
typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
// Container that holds media samples.
class MediaData {
class MediaData
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaData)
enum Type {
enum Type
{
AUDIO_DATA = 0,
VIDEO_DATA,
RAW_DATA,
@ -294,7 +296,8 @@ public:
, mDuration(aDuration)
, mFrames(aFrames)
, mKeyframe(false)
{}
{
}
// Type of contained data.
const Type mType;
@ -348,7 +351,8 @@ protected:
, mDuration(0)
, mFrames(aFrames)
, mKeyframe(false)
{}
{
}
virtual ~MediaData() {}
@ -356,7 +360,8 @@ protected:
// NullData is for decoder generating a sample which doesn't need to be
// rendered.
class NullData : public MediaData {
class NullData : public MediaData
{
public:
NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
: MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
@ -366,7 +371,8 @@ public:
};
// Holds chunk a decoded audio frames.
class AudioData : public MediaData {
class AudioData : public MediaData
{
public:
AudioData(int64_t aOffset,
@ -411,7 +417,7 @@ public:
AlignedAudioBuffer mAudioData;
protected:
~AudioData() {}
~AudioData() { }
};
namespace layers {
@ -422,7 +428,8 @@ class PlanarYCbCrImage;
class VideoInfo;
// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
class VideoData : public MediaData {
class VideoData : public MediaData
{
public:
typedef gfx::IntRect IntRect;
typedef gfx::IntSize IntSize;
@ -437,8 +444,10 @@ public:
// 0 = Y
// 1 = Cb
// 2 = Cr
struct YCbCrBuffer {
struct Plane {
struct YCbCrBuffer
{
struct Plane
{
uint8_t* mData;
uint32_t mWidth;
uint32_t mHeight;
@ -451,7 +460,8 @@ public:
YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
};
class Listener {
class Listener
{
public:
virtual void OnSentToCompositor() = 0;
virtual ~Listener() {}
@ -469,44 +479,48 @@ public:
// Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
// to allocate an Image to hold the copied data.
static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
const YCbCrBuffer::Plane &aAlphaPlane,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const YCbCrBuffer &aBuffer,
const YCbCrBuffer::Plane &aAlphaPlane,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
layers::TextureClient* aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
layers::TextureClient* aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateFromImage(const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateFromImage(
const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
const RefPtr<Image>& aImage,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
@ -553,7 +567,7 @@ protected:
class CryptoTrack
{
public:
CryptoTrack() : mValid(false), mMode(0), mIVSize(0) {}
CryptoTrack() : mValid(false), mMode(0), mIVSize(0) { }
bool mValid;
int32_t mMode;
int32_t mIVSize;
@ -620,7 +634,8 @@ private:
MediaRawData* mTarget;
};
class MediaRawData : public MediaData {
class MediaRawData : public MediaData
{
public:
MediaRawData();
MediaRawData(const uint8_t* aData, size_t aSize);
@ -676,10 +691,10 @@ private:
class MediaByteBuffer : public nsTArray<uint8_t> {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
MediaByteBuffer() = default;
explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) {}
explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) { }
private:
~MediaByteBuffer() {}
~MediaByteBuffer() { }
};
} // namespace mozilla

View File

@ -55,8 +55,8 @@ public:
// aTrackNumber must be constrained between 0 and GetNumberTracks(aType) - 1
// The actual Track ID is to be retrieved by calling
// MediaTrackDemuxer::TrackInfo.
virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(TrackInfo::TrackType aType,
uint32_t aTrackNumber) = 0;
virtual already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(
TrackInfo::TrackType aType, uint32_t aTrackNumber) = 0;
// Returns true if the underlying resource allows seeking.
virtual bool IsSeekable() const = 0;
@ -101,15 +101,17 @@ class MediaTrackDemuxer
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaTrackDemuxer)
class SamplesHolder {
class SamplesHolder
{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder)
nsTArray<RefPtr<MediaRawData>> mSamples;
private:
~SamplesHolder() {}
~SamplesHolder() { }
};
class SkipFailureHolder {
class SkipFailureHolder
{
public:
SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped)
: mFailure(aFailure)
@ -119,9 +121,13 @@ public:
uint32_t mSkipped;
};
typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true> SeekPromise;
typedef MozPromise<RefPtr<SamplesHolder>, MediaResult, /* IsExclusive = */ true> SamplesPromise;
typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true> SkipAccessPointPromise;
typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true>
SeekPromise;
typedef MozPromise<RefPtr<SamplesHolder>, MediaResult,
/* IsExclusive = */ true>
SamplesPromise;
typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true>
SkipAccessPointPromise;
// Returns the TrackInfo (a.k.a Track Description) for this track.
// The TrackInfo returned will be:
@ -207,7 +213,7 @@ public:
}
protected:
virtual ~MediaTrackDemuxer() {}
virtual ~MediaTrackDemuxer() { }
};
} // namespace mozilla

View File

@ -92,7 +92,8 @@ class MediaMemoryTracker : public nsIMemoryReporter
static StaticRefPtr<MediaMemoryTracker> sUniqueInstance;
static MediaMemoryTracker* UniqueInstance() {
static MediaMemoryTracker* UniqueInstance()
{
if (!sUniqueInstance) {
sUniqueInstance = new MediaMemoryTracker();
sUniqueInstance->InitMemoryReporter();
@ -101,7 +102,8 @@ class MediaMemoryTracker : public nsIMemoryReporter
}
typedef nsTArray<MediaDecoder*> DecodersArray;
static DecodersArray& Decoders() {
static DecodersArray& Decoders()
{
return UniqueInstance()->mDecoders;
}
@ -441,7 +443,8 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
mWatchManager.Watch(mStateMachineDuration, &MediaDecoder::DurationChanged);
// mStateMachineIsShutdown
mWatchManager.Watch(mStateMachineIsShutdown, &MediaDecoder::ShutdownBitChanged);
mWatchManager.Watch(mStateMachineIsShutdown,
&MediaDecoder::ShutdownBitChanged);
// readyState
mWatchManager.Watch(mPlayState, &MediaDecoder::UpdateReadyState);
@ -458,7 +461,8 @@ MediaDecoder::MediaDecoder(MediaDecoderOwner* aOwner)
// mIgnoreProgressData
mWatchManager.Watch(mLogicallySeeking, &MediaDecoder::SeekingChanged);
mWatchManager.Watch(mIsAudioDataAudible, &MediaDecoder::NotifyAudibleStateChanged);
mWatchManager.Watch(mIsAudioDataAudible,
&MediaDecoder::NotifyAudibleStateChanged);
MediaShutdownManager::Instance().Register(this);
}
@ -648,8 +652,9 @@ MediaDecoder::SetStateMachineParameters()
mAbstractMainThread, this, &MediaDecoder::OnMetadataUpdate);
mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::MetadataLoaded);
mFirstFrameLoadedListener = mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
mFirstFrameLoadedListener =
mDecoderStateMachine->FirstFrameLoadedEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::FirstFrameLoaded);
mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect(
mAbstractMainThread, this, &MediaDecoder::OnPlaybackEvent);
@ -695,7 +700,8 @@ MediaDecoder::Play()
}
nsresult
MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType, dom::Promise* aPromise /*=nullptr*/)
MediaDecoder::Seek(double aTime, SeekTarget::Type aSeekType,
dom::Promise* aPromise /*=nullptr*/)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
@ -813,7 +819,8 @@ MediaDecoder::MetadataLoaded(nsAutoPtr<MediaInfo> aInfo,
// our new size.
if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
mFiredMetadataLoaded = true;
GetOwner()->MetadataLoaded(mInfo, nsAutoPtr<const MetadataTags>(aTags.forget()));
GetOwner()->MetadataLoaded(mInfo,
nsAutoPtr<const MetadataTags>(aTags.forget()));
}
// Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
// dimensions retrieved from the video frame container. The video frame
@ -838,15 +845,17 @@ MediaDecoder::EnsureTelemetryReported()
}
nsTArray<nsCString> codecs;
if (mInfo->HasAudio() && !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
if (mInfo->HasAudio()
&& !mInfo->mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) {
codecs.AppendElement(mInfo->mAudio.GetAsAudioInfo()->mMimeType);
}
if (mInfo->HasVideo() && !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
if (mInfo->HasVideo()
&& !mInfo->mVideo.GetAsVideoInfo()->mMimeType.IsEmpty()) {
codecs.AppendElement(mInfo->mVideo.GetAsVideoInfo()->mMimeType);
}
if (codecs.IsEmpty()) {
codecs.AppendElement(nsPrintfCString("resource; %s",
mResource->GetContentType().OriginalString().Data()));
codecs.AppendElement(nsPrintfCString(
"resource; %s", mResource->GetContentType().OriginalString().Data()));
}
for (const nsCString& codec : codecs) {
DECODER_LOG("Telemetry MEDIA_CODEC_USED= '%s'", codec.get());
@ -870,9 +879,10 @@ MediaDecoder::FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d mPlayState=%s",
aInfo->mAudio.mChannels, aInfo->mAudio.mRate,
aInfo->HasAudio(), aInfo->HasVideo(), PlayStateStr());
DECODER_LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d "
"mPlayState=%s",
aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
aInfo->HasVideo(), PlayStateStr());
mInfo = aInfo.forget();
@ -962,8 +972,10 @@ already_AddRefed<GMPCrashHelper>
MediaDecoder::GetCrashHelper()
{
MOZ_ASSERT(NS_IsMainThread());
return GetOwner()->GetMediaElement() ?
MakeAndAddRef<MediaElementGMPCrashHelper>(GetOwner()->GetMediaElement()) : nullptr;
return GetOwner()->GetMediaElement()
? MakeAndAddRef<MediaElementGMPCrashHelper>(
GetOwner()->GetMediaElement())
: nullptr;
}
bool
@ -999,8 +1011,8 @@ MediaDecoder::PlaybackEnded()
InvalidateWithFlags(VideoFrameContainer::INVALIDATE_FORCE);
GetOwner()->PlaybackEnded();
// This must be called after |GetOwner()->PlaybackEnded()| call above, in order
// to fire the required durationchange.
// This must be called after |GetOwner()->PlaybackEnded()| call above, in
// order to fire the required durationchange.
if (IsInfinite()) {
SetInfinite(false);
}
@ -1013,7 +1025,8 @@ MediaDecoder::GetStatistics()
MOZ_ASSERT(mResource);
MediaStatistics result;
result.mDownloadRate = mResource->GetDownloadRate(&result.mDownloadRateReliable);
result.mDownloadRate =
mResource->GetDownloadRate(&result.mDownloadRateReliable);
result.mDownloadPosition = mResource->GetCachedDataEnd(mDecoderPosition);
result.mTotalBytes = mResource->GetLength();
result.mPlaybackRate = mPlaybackBytesPerSecond;
@ -1030,7 +1043,8 @@ MediaDecoder::ComputePlaybackRate()
MOZ_ASSERT(mResource);
int64_t length = mResource->GetLength();
if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration) && length >= 0) {
if (!IsNaN(mDuration) && !mozilla::IsInfinite<double>(mDuration)
&& length >= 0) {
mPlaybackRateReliable = true;
mPlaybackBytesPerSecond = length / mDuration;
return;
@ -1199,7 +1213,8 @@ MediaDecoder::UpdateLogicalPositionInternal()
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
double currentPosition = static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
double currentPosition =
static_cast<double>(CurrentPosition()) / static_cast<double>(USECS_PER_S);
if (mPlayState == PLAY_STATE_ENDED) {
currentPosition = std::max(currentPosition, mDuration);
}
@ -1243,8 +1258,9 @@ MediaDecoder::DurationChanged()
// See https://www.w3.org/Bugs/Public/show_bug.cgi?id=28822 for a discussion
// of whether we should fire durationchange on explicit infinity.
if (mFiredMetadataLoaded &&
(!mozilla::IsInfinite<double>(mDuration) || mExplicitDuration.Ref().isSome())) {
if (mFiredMetadataLoaded
&& (!mozilla::IsInfinite<double>(mDuration)
|| mExplicitDuration.Ref().isSome())) {
GetOwner()->DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
}
@ -1282,8 +1298,10 @@ MediaDecoder::UpdateEstimatedMediaDuration(int64_t aDuration)
// the current estimate, as the incoming duration is an estimate and so
// often is unstable as more data is read and the estimate is updated.
// Can result in a durationchangeevent. aDuration is in microseconds.
if (mEstimatedDuration.Ref().isSome() &&
mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds() - aDuration) < ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
if (mEstimatedDuration.Ref().isSome()
&& mozilla::Abs(mEstimatedDuration.Ref().ref().ToMicroseconds()
- aDuration)
< ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
return;
}
@ -1327,9 +1345,9 @@ MediaDecoder::GetSeekable()
} else {
return media::TimeIntervals(
media::TimeInterval(media::TimeUnit::FromMicroseconds(0),
IsInfinite() ?
media::TimeUnit::FromInfinity() :
media::TimeUnit::FromSeconds(GetDuration())));
IsInfinite()
? media::TimeUnit::FromInfinity()
: media::TimeUnit::FromSeconds(GetDuration())));
}
}
@ -1338,7 +1356,8 @@ MediaDecoder::SetFragmentEndTime(double aTime)
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
mDecoderStateMachine->DispatchSetFragmentEndTime(static_cast<int64_t>(aTime * USECS_PER_S));
mDecoderStateMachine->DispatchSetFragmentEndTime(
static_cast<int64_t>(aTime * USECS_PER_S));
}
}
@ -1443,7 +1462,8 @@ MediaDecoder::SetStateMachine(MediaDecoderStateMachine* aStateMachine)
ImageContainer*
MediaDecoder::GetImageContainer()
{
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer() : nullptr;
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
: nullptr;
}
void
@ -1465,13 +1485,15 @@ MediaDecoder::Invalidate()
// Constructs the time ranges representing what segments of the media
// are buffered and playable.
media::TimeIntervals
MediaDecoder::GetBuffered() {
MediaDecoder::GetBuffered()
{
MOZ_ASSERT(NS_IsMainThread());
return mBuffered.Ref();
}
size_t
MediaDecoder::SizeOfVideoQueue() {
MediaDecoder::SizeOfVideoQueue()
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
return mDecoderStateMachine->SizeOfVideoQueue();
@ -1480,7 +1502,8 @@ MediaDecoder::SizeOfVideoQueue() {
}
size_t
MediaDecoder::SizeOfAudioQueue() {
MediaDecoder::SizeOfAudioQueue()
{
MOZ_ASSERT(NS_IsMainThread());
if (mDecoderStateMachine) {
return mDecoderStateMachine->SizeOfAudioQueue();
@ -1488,15 +1511,18 @@ MediaDecoder::SizeOfAudioQueue() {
return 0;
}
void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes) {
void MediaDecoder::AddSizeOfResources(ResourceSizes* aSizes)
{
MOZ_ASSERT(NS_IsMainThread());
if (GetResource()) {
aSizes->mByteSize += GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
aSizes->mByteSize +=
GetResource()->SizeOfIncludingThis(aSizes->mMallocSizeOf);
}
}
void
MediaDecoder::NotifyDataArrived() {
MediaDecoder::NotifyDataArrived()
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
mDataArrivedEvent.Notify();
@ -1504,7 +1530,8 @@ MediaDecoder::NotifyDataArrived() {
// Provide access to the state machine object
MediaDecoderStateMachine*
MediaDecoder::GetStateMachine() const {
MediaDecoder::GetStateMachine() const
{
MOZ_ASSERT(NS_IsMainThread());
return mDecoderStateMachine;
}
@ -1592,9 +1619,9 @@ MediaDecoder::IsWebMEnabled()
bool
MediaDecoder::IsAndroidMediaPluginEnabled()
{
return AndroidBridge::Bridge() &&
AndroidBridge::Bridge()->GetAPIVersion() < 16 &&
Preferences::GetBool("media.plugins.enabled");
return AndroidBridge::Bridge()
&& AndroidBridge::Bridge()->GetAPIVersion() < 16
&& Preferences::GetBool("media.plugins.enabled");
}
#endif
@ -1602,8 +1629,6 @@ NS_IMETHODIMP
MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize)
{
int64_t video = 0, audio = 0;
// NB: When resourceSizes' ref count goes to 0 the promise will report the
// resources memory and finish the asynchronous memory report.
RefPtr<MediaDecoder::ResourceSizes> resourceSizes =
@ -1613,7 +1638,8 @@ MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
nsCOMPtr<nsISupports> data = aData;
resourceSizes->Promise()->Then(
// Non-DocGroup version of AbstractThread::MainThread is fine for memory report.
// Non-DocGroup version of AbstractThread::MainThread is fine for memory
// report.
AbstractThread::MainThread(),
__func__,
[handleReport, data] (size_t size) {
@ -1633,6 +1659,8 @@ MediaMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport,
},
[] (size_t) { /* unused reject function */ });
int64_t video = 0;
int64_t audio = 0;
DecodersArray& decoders = Decoders();
for (size_t i = 0; i < decoders.Length(); ++i) {
MediaDecoder* decoder = decoders[i];
@ -1731,11 +1759,13 @@ MediaDecoder::NextFrameBufferedStatus()
// Use the buffered range to consider if we have the next frame available.
media::TimeUnit currentPosition =
media::TimeUnit::FromMicroseconds(CurrentPosition());
media::TimeInterval interval(currentPosition,
currentPosition + media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
media::TimeInterval interval(
currentPosition,
currentPosition
+ media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return GetBuffered().Contains(interval)
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
nsCString

View File

@ -62,7 +62,8 @@ class MediaDecoder : public AbstractMediaDecoder
public:
// Used to register with MediaResource to receive notifications which will
// be forwarded to MediaDecoder.
class ResourceCallback : public MediaResourceCallback {
class ResourceCallback : public MediaResourceCallback
{
// Throttle calls to MediaDecoder::NotifyDataArrived()
// to be at most once per 500ms.
static const uint32_t sDelay = 500;
@ -96,12 +97,15 @@ public:
const RefPtr<AbstractThread> mAbstractMainThread;
};
typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */, /* IsExclusive = */ true> SeekPromise;
typedef MozPromise<bool /* aIgnored */, bool /* aIgnored */,
/* IsExclusive = */ true>
SeekPromise;
NS_DECL_THREADSAFE_ISUPPORTS
// Enumeration for the valid play states (see mPlayState)
enum PlayState {
enum PlayState
{
PLAY_STATE_START,
PLAY_STATE_LOADING,
PLAY_STATE_PAUSED,
@ -207,7 +211,8 @@ public:
// Add an output stream. All decoder output will be sent to the stream.
// The stream is initially blocked. The decoder is responsible for unblocking
// it while it is playing back.
virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
virtual void AddOutputStream(ProcessedMediaStream* aStream,
bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
virtual void RemoveOutputStream(MediaStream* aStream);
@ -430,7 +435,9 @@ private:
return mAbstractMainThread;
}
typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */, /* IsExclusive = */ true> CDMProxyPromise;
typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */,
/* IsExclusive = */ true>
CDMProxyPromise;
// Resolved when a CDMProxy is available and the capabilities are known or
// rejected when this decoder is about to shut down.
@ -476,12 +483,15 @@ private:
GetOwner()->UpdateReadyState();
}
virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus() { return mNextFrameStatus; }
virtual MediaDecoderOwner::NextFrameStatus NextFrameStatus()
{
return mNextFrameStatus;
}
virtual MediaDecoderOwner::NextFrameStatus NextFrameBufferedStatus();
// Returns a string describing the state of the media player internal
// data. Used for debugging purposes.
virtual void GetMozDebugReaderData(nsACString& aString) {}
virtual void GetMozDebugReaderData(nsACString& aString) { }
virtual void DumpDebugInfo();
@ -787,45 +797,46 @@ protected:
public:
AbstractCanonical<media::NullableTimeUnit>* CanonicalDurationOrNull() override;
AbstractCanonical<double>* CanonicalVolume() {
return &mVolume;
}
AbstractCanonical<bool>* CanonicalPreservesPitch() {
AbstractCanonical<double>* CanonicalVolume() { return &mVolume; }
AbstractCanonical<bool>* CanonicalPreservesPitch()
{
return &mPreservesPitch;
}
AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration() {
AbstractCanonical<media::NullableTimeUnit>* CanonicalEstimatedDuration()
{
return &mEstimatedDuration;
}
AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration() {
AbstractCanonical<Maybe<double>>* CanonicalExplicitDuration()
{
return &mExplicitDuration;
}
AbstractCanonical<PlayState>* CanonicalPlayState() {
return &mPlayState;
}
AbstractCanonical<PlayState>* CanonicalNextPlayState() {
return &mNextState;
}
AbstractCanonical<bool>* CanonicalLogicallySeeking() {
AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
AbstractCanonical<PlayState>* CanonicalNextPlayState() { return &mNextState; }
AbstractCanonical<bool>* CanonicalLogicallySeeking()
{
return &mLogicallySeeking;
}
AbstractCanonical<bool>* CanonicalSameOriginMedia() {
AbstractCanonical<bool>* CanonicalSameOriginMedia()
{
return &mSameOriginMedia;
}
AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle() {
AbstractCanonical<PrincipalHandle>* CanonicalMediaPrincipalHandle()
{
return &mMediaPrincipalHandle;
}
AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond() {
AbstractCanonical<double>* CanonicalPlaybackBytesPerSecond()
{
return &mPlaybackBytesPerSecond;
}
AbstractCanonical<bool>* CanonicalPlaybackRateReliable() {
AbstractCanonical<bool>* CanonicalPlaybackRateReliable()
{
return &mPlaybackRateReliable;
}
AbstractCanonical<int64_t>* CanonicalDecoderPosition() {
AbstractCanonical<int64_t>* CanonicalDecoderPosition()
{
return &mDecoderPosition;
}
AbstractCanonical<bool>* CanonicalIsVisible() {
return &mIsVisible;
}
AbstractCanonical<bool>* CanonicalIsVisible() { return &mIsVisible; }
private:
// Notify owner when the audible state changed

View File

@ -29,13 +29,16 @@ class MediaDecoderReader;
struct WaitForDataRejectValue
{
enum Reason {
enum Reason
{
SHUTDOWN,
CANCELED
};
WaitForDataRejectValue(MediaData::Type aType, Reason aReason)
:mType(aType), mReason(aReason) {}
:mType(aType), mReason(aReason)
{
}
MediaData::Type mType;
Reason mReason;
};
@ -43,11 +46,11 @@ struct WaitForDataRejectValue
struct SeekRejectValue
{
MOZ_IMPLICIT SeekRejectValue(const MediaResult& aError)
: mType(MediaData::NULL_DATA), mError(aError) {}
: mType(MediaData::NULL_DATA), mError(aError) { }
MOZ_IMPLICIT SeekRejectValue(nsresult aResult)
: mType(MediaData::NULL_DATA), mError(aResult) {}
: mType(MediaData::NULL_DATA), mError(aResult) { }
SeekRejectValue(MediaData::Type aType, const MediaResult& aError)
: mType(aType), mError(aError) {}
: mType(aType), mError(aError) { }
MediaData::Type mType;
MediaResult mError;
};
@ -60,7 +63,7 @@ public:
nsAutoPtr<MetadataTags> mTags;
private:
virtual ~MetadataHolder() {}
virtual ~MetadataHolder() { }
};
// Encapsulates the decoding and reading of media data. Reading can either
@ -69,7 +72,8 @@ private:
// callback.
// Unless otherwise specified, methods and fields of this class can only
// be accessed on the decode task queue.
class MediaDecoderReader {
class MediaDecoderReader
{
friend class ReRequestVideoWithSkipTask;
friend class ReRequestAudioTask;
@ -104,7 +108,7 @@ public:
// Called by MDSM in dormant state to release resources allocated by this
// reader. The reader can resume decoding by calling Seek() to a specific
// position.
virtual void ReleaseResources() {}
virtual void ReleaseResources() { }
// Destroys the decoding state. The reader cannot be made usable again.
// This is different from ReleaseMediaResources() as it is irreversable,
@ -128,8 +132,9 @@ public:
//
// aParam is a set of TrackInfo::TrackType enums specifying which
// queues need to be reset, defaulting to both audio and video tracks.
virtual nsresult ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
TrackInfo::kVideoTrack));
virtual nsresult ResetDecode(
TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack,
TrackInfo::kVideoTrack));
// Requests one audio sample from the reader.
//
@ -258,7 +263,7 @@ public:
// Switch the video decoder to BlankDecoderModule. It might takes effective
// since a few samples later depends on how much demuxed samples are already
// queued in the original video decoder.
virtual void SetVideoBlankDecode(bool aIsBlankDecode) {}
virtual void SetVideoBlankDecode(bool aIsBlankDecode) { }
protected:
virtual ~MediaDecoderReader();

File diff suppressed because it is too large Load Diff

View File

@ -114,7 +114,8 @@ class TaskQueue;
extern LazyLogModule gMediaDecoderLog;
extern LazyLogModule gMediaSampleLog;
enum class MediaEventType : int8_t {
enum class MediaEventType : int8_t
{
PlaybackStarted,
PlaybackStopped,
PlaybackEnded,
@ -150,7 +151,8 @@ public:
nsresult Init(MediaDecoder* aDecoder);
// Enumeration for the valid decoding states
enum State {
enum State
{
DECODER_STATE_DECODING_METADATA,
DECODER_STATE_WAIT_FOR_CDM,
DECODER_STATE_DORMANT,
@ -430,7 +432,8 @@ protected:
// [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
// not start at 0. Note this is different than the "current playback position",
// which is in the range [0,duration].
int64_t GetMediaTime() const {
int64_t GetMediaTime() const
{
MOZ_ASSERT(OnTaskQueue());
return mCurrentPosition;
}
@ -494,7 +497,11 @@ private:
UniquePtr<StateObject> mStateObj;
media::TimeUnit Duration() const { MOZ_ASSERT(OnTaskQueue()); return mDuration.Ref().ref(); }
media::TimeUnit Duration() const
{
MOZ_ASSERT(OnTaskQueue());
return mDuration.Ref().ref();
}
// Recomputes the canonical duration from various sources.
void RecomputeDuration();
@ -515,8 +522,8 @@ private:
bool IsLogicallyPlaying()
{
MOZ_ASSERT(OnTaskQueue());
return mPlayState == MediaDecoder::PLAY_STATE_PLAYING ||
mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
return mPlayState == MediaDecoder::PLAY_STATE_PLAYING
|| mNextPlayState == MediaDecoder::PLAY_STATE_PLAYING;
}
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
@ -751,22 +758,25 @@ private:
public:
AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration()
{
return &mDuration;
}
AbstractCanonical<bool>* CanonicalIsShutdown() {
return &mIsShutdown;
}
AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus() {
AbstractCanonical<bool>* CanonicalIsShutdown() { return &mIsShutdown; }
AbstractCanonical<NextFrameStatus>* CanonicalNextFrameStatus()
{
return &mNextFrameStatus;
}
AbstractCanonical<int64_t>* CanonicalCurrentPosition() {
AbstractCanonical<int64_t>* CanonicalCurrentPosition()
{
return &mCurrentPosition;
}
AbstractCanonical<int64_t>* CanonicalPlaybackOffset() {
AbstractCanonical<int64_t>* CanonicalPlaybackOffset()
{
return &mPlaybackOffset;
}
AbstractCanonical<bool>* CanonicalIsAudioDataAudible() {
AbstractCanonical<bool>* CanonicalIsAudioDataAudible()
{
return &mIsAudioDataAudible;
}
};

View File

@ -102,9 +102,7 @@ StaticMutex DecoderAllocPolicy::sMutex;
class DecoderAllocPolicy::AutoDeallocToken : public Token
{
public:
explicit AutoDeallocToken(TrackType aTrack)
: mTrack(aTrack)
{}
explicit AutoDeallocToken(TrackType aTrack) : mTrack(aTrack) { }
private:
~AutoDeallocToken()
@ -120,7 +118,8 @@ DecoderAllocPolicy::DecoderAllocPolicy(TrackType aTrack)
, mDecoderLimit(MediaPrefs::MediaDecoderLimit())
, mTrack(aTrack)
{
// Non DocGroup-version AbstractThread::MainThread is fine for ClearOnShutdown().
// Non DocGroup-version AbstractThread::MainThread is fine for
// ClearOnShutdown().
AbstractThread::MainThread()->Dispatch(NS_NewRunnableFunction([this] () {
ClearOnShutdown(this, ShutdownPhase::ShutdownThreads);
}));
@ -197,13 +196,13 @@ class MediaFormatReader::DecoderFactory
using Token = DecoderAllocPolicy::Token;
public:
explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) {}
explicit DecoderFactory(MediaFormatReader* aOwner) : mOwner(aOwner) { }
void CreateDecoder(TrackType aTrack);
// Shutdown any decoder pending initialization.
RefPtr<ShutdownPromise> ShutdownDecoder(TrackType aTrack)
{
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
aTrack == TrackInfo::kVideoTrack);
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
|| aTrack == TrackInfo::kVideoTrack);
auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
data.mTokenRequest.DisconnectIfExists();
data.mInitRequest.DisconnectIfExists();
@ -253,8 +252,8 @@ private:
void
MediaFormatReader::DecoderFactory::CreateDecoder(TrackType aTrack)
{
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
aTrack == TrackInfo::kVideoTrack);
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
|| aTrack == TrackInfo::kVideoTrack);
RunStage(aTrack);
}
@ -370,7 +369,8 @@ MediaFormatReader::DecoderFactory::DoCreateDecoder(TrackType aTrack)
auto& data = aTrack == TrackInfo::kAudioTrack ? mAudio : mVideo;
auto decoderCreatingError = "error creating audio decoder";
MediaResult result = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
MediaResult result =
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, decoderCreatingError);
if (!mOwner->mPlatform) {
mOwner->mPlatform = new PDMFactory();
@ -475,10 +475,10 @@ class MediaFormatReader::DemuxerProxy
class Wrapper;
public:
explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* mainThread)
explicit DemuxerProxy(MediaDataDemuxer* aDemuxer, AbstractThread* aMainThread)
: mTaskQueue(new AutoTaskQueue(
GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
mainThread))
aMainThread))
, mData(new Data(aDemuxer))
{
MOZ_COUNT_CTOR(DemuxerProxy);
@ -579,7 +579,8 @@ private:
explicit Data(MediaDataDemuxer* aDemuxer)
: mInitDone(false)
, mDemuxer(aDemuxer)
{ }
{
}
Atomic<bool> mInitDone;
// Only ever accessed over mTaskQueue once.
@ -609,7 +610,8 @@ public:
, mGetSamplesMayBlock(aTrackDemuxer->GetSamplesMayBlock())
, mInfo(aTrackDemuxer->GetInfo())
, mTrackDemuxer(aTrackDemuxer)
{ }
{
}
UniquePtr<TrackInfo> GetInfo() const override
{
@ -1045,7 +1047,8 @@ public:
, mInitDataType(aInitDataType)
{
}
NS_IMETHOD Run() override {
NS_IMETHOD Run() override
{
// Note: Null check the owner, as the decoder could have been shutdown
// since this event was dispatched.
MediaDecoderOwner* owner = mDecoder->GetOwner();
@ -1121,8 +1124,8 @@ MediaFormatReader::OnDemuxerInitDone(nsresult)
}
// To decode, we need valid video and a place to put it.
bool videoActive = !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) &&
GetImageContainer();
bool videoActive =
!!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) && GetImageContainer();
if (videoActive) {
// We currently only handle the first video track.
@ -1135,7 +1138,8 @@ MediaFormatReader::OnDemuxerInitDone(nsresult)
UniquePtr<TrackInfo> videoInfo = mVideo.mTrackDemuxer->GetInfo();
videoActive = videoInfo && videoInfo->IsValid();
if (videoActive) {
if (platform && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
if (platform
&& !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) {
// We have no decoder for this track. Error.
mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
return;
@ -1162,9 +1166,11 @@ MediaFormatReader::OnDemuxerInitDone(nsresult)
UniquePtr<TrackInfo> audioInfo = mAudio.mTrackDemuxer->GetInfo();
// We actively ignore audio tracks that we know we can't play.
audioActive = audioInfo && audioInfo->IsValid() &&
(!platform ||
platform->SupportsMimeType(audioInfo->mMimeType, nullptr));
audioActive =
audioInfo
&& audioInfo->IsValid()
&& (!platform || platform->SupportsMimeType(audioInfo->mMimeType,
nullptr));
if (audioActive) {
mInfo.mAudio = *audioInfo->GetAsAudioInfo();
@ -1184,7 +1190,8 @@ MediaFormatReader::OnDemuxerInitDone(nsresult)
// Try and dispatch 'encrypted'. Won't go if ready state still HAVE_NOTHING.
for (uint32_t i = 0; i < crypto->mInitDatas.Length(); i++) {
NS_DispatchToMainThread(
new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData, crypto->mInitDatas[i].mType));
new DispatchKeyNeededEvent(mDecoder, crypto->mInitDatas[i].mInitData,
crypto->mInitDatas[i].mType));
}
mInfo.mCrypto = *crypto;
}
@ -1234,8 +1241,8 @@ MediaFormatReader::MaybeResolveMetadataPromise()
{
MOZ_ASSERT(OnTaskQueue());
if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing()) ||
(HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
if ((HasAudio() && mAudio.mFirstDemuxedSampleTime.isNothing())
|| (HasVideo() && mVideo.mFirstDemuxedSampleTime.isNothing())) {
return;
}
@ -1262,8 +1269,8 @@ MediaFormatReader::MaybeResolveMetadataPromise()
bool
MediaFormatReader::IsEncrypted() const
{
return (HasAudio() && mInfo.mAudio.mCrypto.mValid) ||
(HasVideo() && mInfo.mVideo.mCrypto.mValid);
return (HasAudio() && mInfo.mAudio.mCrypto.mValid)
|| (HasVideo() && mInfo.mVideo.mCrypto.mValid);
}
void
@ -1282,8 +1289,8 @@ MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo)
MediaFormatReader::DecoderData&
MediaFormatReader::GetDecoderData(TrackType aTrack)
{
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack ||
aTrack == TrackInfo::kVideoTrack);
MOZ_ASSERT(aTrack == TrackInfo::kAudioTrack
|| aTrack == TrackInfo::kVideoTrack);
if (aTrack == TrackInfo::kAudioTrack) {
return mAudio;
}
@ -1291,7 +1298,8 @@ MediaFormatReader::GetDecoderData(TrackType aTrack)
}
bool
MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold)
MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe,
media::TimeUnit aTimeThreshold)
{
MOZ_ASSERT(HasVideo());
media::TimeUnit nextKeyframe;
@ -1300,9 +1308,10 @@ MediaFormatReader::ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThr
return aSkipToNextKeyframe;
}
return (nextKeyframe < aTimeThreshold ||
(mVideo.mTimeThreshold &&
mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold)) &&
nextKeyframe.ToMicroseconds() >= 0 && !nextKeyframe.IsInfinite();
(mVideo.mTimeThreshold
&& mVideo.mTimeThreshold.ref().EndTime() < aTimeThreshold))
&& nextKeyframe.ToMicroseconds() >= 0
&& !nextKeyframe.IsInfinite();
}
RefPtr<MediaDecoderReader::MediaDataPromise>
@ -1310,33 +1319,38 @@ MediaFormatReader::RequestVideoData(bool aSkipToNextKeyframe,
int64_t aTimeThreshold)
{
MOZ_ASSERT(OnTaskQueue());
MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking");
MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(),
"No sample requests allowed while seeking");
MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests");
MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() ||
mVideo.mTimeThreshold.isSome());
MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists()
|| mVideo.mTimeThreshold.isSome());
MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek");
LOGV("RequestVideoData(%d, %lld)", aSkipToNextKeyframe, aTimeThreshold);
if (!HasVideo()) {
LOG("called with no video track");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
if (IsSeeking()) {
LOG("called mid-seek. Rejecting.");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
__func__);
}
if (mShutdown) {
NS_WARNING("RequestVideoData on shutdown MediaFormatReader!");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
__func__);
}
media::TimeUnit timeThreshold{media::TimeUnit::FromMicroseconds(aTimeThreshold)};
media::TimeUnit timeThreshold{ media::TimeUnit::FromMicroseconds(
aTimeThreshold) };
// Ensure we have no pending seek going as ShouldSkip could return out of date
// information.
if (!mVideo.HasInternalSeekPending() &&
ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
if (!mVideo.HasInternalSeekPending()
&& ShouldSkip(aSkipToNextKeyframe, timeThreshold)) {
RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__);
SkipVideoDemuxToNextKeyFrame(timeThreshold);
return p;
@ -1403,11 +1417,14 @@ MediaFormatReader::DoDemuxVideo()
}
void
MediaFormatReader::OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
MediaFormatReader::OnVideoDemuxCompleted(
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
{
LOGV("%d video samples demuxed (sid:%d)",
aSamples->mSamples.Length(),
aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
aSamples->mSamples[0]->mTrackInfo
? aSamples->mSamples[0]->mTrackInfo->GetID()
: 0);
mVideo.mDemuxRequest.Complete();
mVideo.mQueuedSamples.AppendElements(aSamples->mSamples);
ScheduleUpdate(TrackInfo::kVideoTrack);
@ -1420,25 +1437,28 @@ MediaFormatReader::RequestAudioData()
MOZ_DIAGNOSTIC_ASSERT(!mAudio.HasPromise(), "No duplicate sample requests");
MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || mSeekPromise.IsEmpty(),
"No sample requests allowed while seeking");
MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() ||
!mAudio.mSeekRequest.Exists() ||
mAudio.mTimeThreshold.isSome());
MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking()
|| !mAudio.mSeekRequest.Exists()
|| mAudio.mTimeThreshold.isSome());
MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !IsSeeking(), "called mid-seek");
LOGV("");
if (!HasAudio()) {
LOG("called with no audio track");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
__func__);
}
if (IsSeeking()) {
LOG("called mid-seek. Rejecting.");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
__func__);
}
if (mShutdown) {
NS_WARNING("RequestAudioData on shutdown MediaFormatReader!");
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
__func__);
}
RefPtr<MediaDataPromise> p = mAudio.EnsurePromise(__func__);
@ -1470,19 +1490,22 @@ MediaFormatReader::DoDemuxAudio()
}
void
MediaFormatReader::OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
MediaFormatReader::OnAudioDemuxCompleted(
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
{
LOGV("%d audio samples demuxed (sid:%d)",
aSamples->mSamples.Length(),
aSamples->mSamples[0]->mTrackInfo ? aSamples->mSamples[0]->mTrackInfo->GetID() : 0);
aSamples->mSamples[0]->mTrackInfo
? aSamples->mSamples[0]->mTrackInfo->GetID()
: 0);
mAudio.mDemuxRequest.Complete();
mAudio.mQueuedSamples.AppendElements(aSamples->mSamples);
ScheduleUpdate(TrackInfo::kAudioTrack);
}
void
MediaFormatReader::NotifyNewOutput(TrackType aTrack,
const MediaDataDecoder::DecodedData& aResults)
MediaFormatReader::NotifyNewOutput(
TrackType aTrack, const MediaDataDecoder::DecodedData& aResults)
{
MOZ_ASSERT(OnTaskQueue());
auto& decoder = GetDecoderData(aTrack);
@ -1560,13 +1583,13 @@ MediaFormatReader::NeedInput(DecoderData& aDecoder)
// The decoder will not be fed a new raw sample until the current decoding
// requests has completed.
return
(aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome()) &&
!aDecoder.HasPendingDrain() &&
!aDecoder.HasFatalError() &&
!aDecoder.mDemuxRequest.Exists() &&
!aDecoder.mOutput.Length() &&
!aDecoder.HasInternalSeekPending() &&
!aDecoder.mDecodeRequest.Exists();
(aDecoder.HasPromise() || aDecoder.mTimeThreshold.isSome())
&& !aDecoder.HasPendingDrain()
&& !aDecoder.HasFatalError()
&& !aDecoder.mDemuxRequest.Exists()
&& !aDecoder.mOutput.Length()
&& !aDecoder.HasInternalSeekPending()
&& !aDecoder.mDecodeRequest.Exists();
}
void
@ -1634,17 +1657,18 @@ MediaFormatReader::UpdateReceivedNewData(TrackType aTrack)
return false;
}
if (!mSeekPromise.IsEmpty() &&
(!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
if (!mSeekPromise.IsEmpty()
&& (!IsVideoSeeking() || aTrack == TrackInfo::kVideoTrack)) {
MOZ_ASSERT(!decoder.HasPromise());
MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.mTimeThreshold) &&
!mVideo.mTimeThreshold,
"InternalSeek must have been aborted when Seek was first called");
MOZ_DIAGNOSTIC_ASSERT((IsVideoSeeking() || !mAudio.HasWaitingPromise()) &&
!mVideo.HasWaitingPromise(),
"Waiting promises must have been rejected when Seek was first called");
if (mVideo.mSeekRequest.Exists() ||
(!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
MOZ_DIAGNOSTIC_ASSERT(
(IsVideoSeeking() || !mAudio.mTimeThreshold) && !mVideo.mTimeThreshold,
"InternalSeek must have been aborted when Seek was first called");
MOZ_DIAGNOSTIC_ASSERT(
(IsVideoSeeking() || !mAudio.HasWaitingPromise())
&& !mVideo.HasWaitingPromise(),
"Waiting promises must have been rejected when Seek was first called");
if (mVideo.mSeekRequest.Exists()
|| (!IsVideoSeeking() && mAudio.mSeekRequest.Exists())) {
// Already waiting for a seek to complete. Nothing more to do.
return true;
}
@ -1717,8 +1741,8 @@ MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
}
void
MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
AbstractMediaDecoder::AutoNotifyDecoded& aA)
MediaFormatReader::HandleDemuxedSamples(
TrackType aTrack, AbstractMediaDecoder::AutoNotifyDecoded& aA)
{
MOZ_ASSERT(OnTaskQueue());
@ -1746,8 +1770,8 @@ MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
RefPtr<SharedTrackInfo> info = sample->mTrackInfo;
if (info && decoder.mLastStreamSourceID != info->GetID()) {
bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling() &&
decoder.mDecoder->SupportDecoderRecycling();
bool supportRecycling = MediaPrefs::MediaDecoderCheckRecycling()
&& decoder.mDecoder->SupportDecoderRecycling();
if (decoder.mNextStreamSourceID.isNothing() ||
decoder.mNextStreamSourceID.ref() != info->GetID()) {
if (!supportRecycling) {
@ -1810,7 +1834,8 @@ MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
}
void
MediaFormatReader::InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget)
MediaFormatReader::InternalSeek(TrackType aTrack,
const InternalSeekTarget& aTarget)
{
MOZ_ASSERT(OnTaskQueue());
LOG("%s internal seek to %f",
@ -1826,8 +1851,9 @@ MediaFormatReader::InternalSeek(TrackType aTrack, const InternalSeekTarget& aTar
[self, aTrack] (media::TimeUnit aTime) {
auto& decoder = self->GetDecoderData(aTrack);
decoder.mSeekRequest.Complete();
MOZ_ASSERT(decoder.mTimeThreshold,
"Seek promise must be disconnected when timethreshold is reset");
MOZ_ASSERT(
decoder.mTimeThreshold,
"Seek promise must be disconnected when timethreshold is reset");
decoder.mTimeThreshold.ref().mHasSeeked = true;
self->SetVideoDecodeThreshold();
self->ScheduleUpdate(aTrack);
@ -1924,10 +1950,10 @@ MediaFormatReader::Update(TrackType aTrack)
return;
}
MOZ_DIAGNOSTIC_ASSERT(!decoder.HasInternalSeekPending() ||
(!decoder.mOutput.Length() &&
!decoder.mQueuedSamples.Length()),
"No frames can be demuxed or decoded while an internal seek is pending");
MOZ_DIAGNOSTIC_ASSERT(
!decoder.HasInternalSeekPending()
|| (!decoder.mOutput.Length() && !decoder.mQueuedSamples.Length()),
"No frames can be demuxed or decoded while an internal seek is pending");
// Record number of frames decoded and parsed. Automatically update the
// stats counters using the AutoNotifyDecoded stack-based class.
@ -1955,7 +1981,8 @@ MediaFormatReader::Update(TrackType aTrack)
}
}
while (decoder.mOutput.Length() && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
while (decoder.mOutput.Length()
&& decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
LOGV("Dropping null data. Time: %lld", decoder.mOutput[0]->mTime);
decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1;
@ -1981,7 +2008,8 @@ MediaFormatReader::Update(TrackType aTrack)
if (output->mKeyframe) {
if (mPreviousDecodedKeyframeTime_us < output->mTime) {
// There is a previous keyframe -> Record inter-keyframe stats.
uint64_t segment_us = output->mTime - mPreviousDecodedKeyframeTime_us;
uint64_t segment_us =
output->mTime - mPreviousDecodedKeyframeTime_us;
a.mStats.mInterKeyframeSum_us += segment_us;
a.mStats.mInterKeyframeCount += 1;
if (a.mStats.mInterKeyFrameMax_us < segment_us) {
@ -2013,7 +2041,8 @@ MediaFormatReader::Update(TrackType aTrack)
// last sample decoded.
LOG("Seeking to last sample time: %lld",
decoder.mLastSampleTime.ref().mStart.ToMicroseconds());
InternalSeek(aTrack, InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
InternalSeek(aTrack,
InternalSeekTarget(decoder.mLastSampleTime.ref(), true));
}
if (!decoder.mReceivedNewData) {
LOG("Rejecting %s promise: WAITING_FOR_DATA", TrackTypeToStr(aTrack));
@ -2048,8 +2077,10 @@ MediaFormatReader::Update(TrackType aTrack)
}
if (decoder.mError && !decoder.HasFatalError()) {
bool needsNewDecoder = decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
if (!needsNewDecoder && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
bool needsNewDecoder =
decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER;
if (!needsNewDecoder
&& ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) {
NotifyError(aTrack, decoder.mError.ref());
return;
}
@ -2057,12 +2088,14 @@ MediaFormatReader::Update(TrackType aTrack)
LOG("%s decoded error count %d", TrackTypeToStr(aTrack),
decoder.mNumOfConsecutiveError);
media::TimeUnit nextKeyframe;
if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() &&
NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending()
&& NS_SUCCEEDED(
decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) {
if (needsNewDecoder) {
ShutdownDecoder(aTrack);
}
SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length());
SkipVideoDemuxToNextKeyFrame(
decoder.mLastSampleTime.refOr(TimeInterval()).Length());
} else if (aTrack == TrackType::kAudioTrack) {
decoder.Flush();
}
@ -2312,7 +2345,8 @@ MediaFormatReader::OnVideoSkipCompleted(uint32_t aSkipped)
}
void
MediaFormatReader::OnVideoSkipFailed(MediaTrackDemuxer::SkipFailureHolder aFailure)
MediaFormatReader::OnVideoSkipFailed(
MediaTrackDemuxer::SkipFailureHolder aFailure)
{
MOZ_ASSERT(OnTaskQueue());
LOG("Skipping failed, skipped %u frames", aFailure.mSkipped);
@ -2351,7 +2385,8 @@ MediaFormatReader::Seek(const SeekTarget& aTarget)
MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || !mAudio.HasPromise());
MOZ_DIAGNOSTIC_ASSERT(mPendingSeekTime.isNothing());
MOZ_DIAGNOSTIC_ASSERT(mVideo.mTimeThreshold.isNothing());
MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly() || mAudio.mTimeThreshold.isNothing());
MOZ_DIAGNOSTIC_ASSERT(aTarget.IsVideoOnly()
|| mAudio.mTimeThreshold.isNothing());
if (!mInfo.mMediaSeekable && !mInfo.mMediaSeekableOnlyInBufferedRanges) {
LOG("Seek() END (Unseekable)");
@ -2387,7 +2422,8 @@ MediaFormatReader::ScheduleSeek()
return;
}
mSeekScheduled = true;
OwnerThread()->Dispatch(NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
OwnerThread()->Dispatch(
NewRunnableMethod(this, &MediaFormatReader::AttemptSeek));
}
void
@ -2435,9 +2471,10 @@ MediaFormatReader::OnSeekFailed(TrackType aTrack, const MediaResult& aError)
}
if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) {
if (HasVideo() && aTrack == TrackType::kAudioTrack &&
mFallbackSeekTime.isSome() &&
mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
if (HasVideo()
&& aTrack == TrackType::kAudioTrack
&& mFallbackSeekTime.isSome()
&& mPendingSeekTime.ref() != mFallbackSeekTime.ref()) {
// We have failed to seek audio where video seeked to earlier.
// Attempt to seek instead to the closest point that we know we have in
// order to limit A/V sync discrepency.
@ -2468,8 +2505,8 @@ MediaFormatReader::OnSeekFailed(TrackType aTrack, const MediaResult& aError)
MOZ_ASSERT(!mVideo.mSeekRequest.Exists() && !mAudio.mSeekRequest.Exists());
mPendingSeekTime.reset();
auto type = aTrack == TrackType::kAudioTrack
? MediaData::AUDIO_DATA : MediaData::VIDEO_DATA;
auto type = aTrack == TrackType::kAudioTrack ? MediaData::AUDIO_DATA
: MediaData::VIDEO_DATA;
mSeekPromise.Reject(SeekRejectValue(type, aError), __func__);
}
@ -2627,8 +2664,9 @@ MediaFormatReader::NotifyDataArrived()
{
MOZ_ASSERT(OnTaskQueue());
if (mShutdown || !mDemuxer ||
(!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
if (mShutdown
|| !mDemuxer
|| (!mDemuxerInitDone && !mDemuxerInitRequest.Exists())) {
return;
}
@ -2717,8 +2755,8 @@ MediaFormatReader::UpdateBuffered()
layers::ImageContainer*
MediaFormatReader::GetImageContainer()
{
return mVideoFrameContainer
? mVideoFrameContainer->GetImageContainer() : nullptr;
return mVideoFrameContainer ? mVideoFrameContainer->GetImageContainer()
: nullptr;
}
void
@ -2741,44 +2779,37 @@ MediaFormatReader::GetMozDebugReaderData(nsACString& aString)
result += nsPrintfCString("audio frames decoded: %lld\n",
mAudio.mNumSamplesOutputTotal);
if (HasAudio()) {
result += nsPrintfCString("audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mAudio), mAudio.HasPromise(),
mAudio.mDemuxRequest.Exists(),
int(mAudio.mQueuedSamples.Length()),
mAudio.mTimeThreshold
? mAudio.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mAudio.mTimeThreshold
? mAudio.mTimeThreshold.ref().mHasSeeked
: -1,
mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
unsigned(size_t(mAudio.mSizeOfQueue)),
unsigned(mAudio.mOutput.Length()),
mAudio.mWaitingForData,
mAudio.mLastStreamSourceID);
result += nsPrintfCString(
"audio state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
"out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mAudio), mAudio.HasPromise(), mAudio.mDemuxRequest.Exists(),
int(mAudio.mQueuedSamples.Length()),
mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().mHasSeeked : -1,
mAudio.mNumSamplesInput, mAudio.mNumSamplesOutput,
unsigned(size_t(mAudio.mSizeOfQueue)), unsigned(mAudio.mOutput.Length()),
mAudio.mWaitingForData, mAudio.mLastStreamSourceID);
}
result += nsPrintfCString("video decoder: %s\n", videoName);
result += nsPrintfCString("hardware video decoding: %s\n",
VideoIsHardwareAccelerated() ? "enabled" : "disabled");
result +=
nsPrintfCString("hardware video decoding: %s\n",
VideoIsHardwareAccelerated() ? "enabled" : "disabled");
result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n",
mVideo.mNumSamplesOutputTotal,
mVideo.mNumSamplesSkippedTotal);
if (HasVideo()) {
result += nsPrintfCString("video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mVideo), mVideo.HasPromise(),
mVideo.mDemuxRequest.Exists(),
int(mVideo.mQueuedSamples.Length()),
mVideo.mTimeThreshold
? mVideo.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mVideo.mTimeThreshold
? mVideo.mTimeThreshold.ref().mHasSeeked
: -1,
mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
unsigned(size_t(mVideo.mSizeOfQueue)),
unsigned(mVideo.mOutput.Length()),
mVideo.mWaitingForData,
mVideo.mLastStreamSourceID);
result += nsPrintfCString(
"video state: ni=%d no=%d demuxr:%d demuxq:%d tt:%f tths:%d in:%llu "
"out:%llu qs=%u pending:%u waiting:%d sid:%u\n",
NeedInput(mVideo), mVideo.HasPromise(), mVideo.mDemuxRequest.Exists(),
int(mVideo.mQueuedSamples.Length()),
mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
: -1.0,
mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().mHasSeeked : -1,
mVideo.mNumSamplesInput, mVideo.mNumSamplesOutput,
unsigned(size_t(mVideo.mSizeOfQueue)), unsigned(mVideo.mOutput.Length()),
mVideo.mWaitingForData, mVideo.mLastStreamSourceID);
}
aString += result;
}
@ -2808,8 +2839,8 @@ MediaFormatReader::SetBlankDecode(TrackType aTrack, bool aIsBlankDecode)
}
void
MediaFormatReader::OnFirstDemuxCompleted(TrackInfo::TrackType aType,
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
MediaFormatReader::OnFirstDemuxCompleted(
TrackInfo::TrackType aType, RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples)
{
MOZ_ASSERT(OnTaskQueue());

View File

@ -109,13 +109,15 @@ private:
void DecodeDemuxedSamples(TrackType aTrack,
MediaRawData* aSample);
struct InternalSeekTarget {
struct InternalSeekTarget
{
InternalSeekTarget(const media::TimeInterval& aTime, bool aDropTarget)
: mTime(aTime)
, mDropTarget(aDropTarget)
, mWaiting(false)
, mHasSeeked(false)
{}
{
}
media::TimeUnit Time() const { return mTime.mStart; }
media::TimeUnit EndTime() const { return mTime.mEnd; }
@ -160,7 +162,8 @@ private:
RefPtr<PDMFactory> mPlatform;
struct DecoderData {
struct DecoderData
{
DecoderData(MediaFormatReader* aOwner,
MediaData::Type aType,
uint32_t aNumOfMaxError)
@ -187,7 +190,8 @@ private:
, mIsHardwareAccelerated(false)
, mLastStreamSourceID(UINT32_MAX)
, mIsBlankDecode(false)
{}
{
}
MediaFormatReader* mOwner;
// Disambiguate Audio vs Video.
@ -208,8 +212,8 @@ private:
if (mDecoder) {
RefPtr<MediaFormatReader> owner = mOwner;
TrackType type = mType == MediaData::AUDIO_DATA
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
mDecoder->Shutdown()
->Then(mOwner->OwnerThread(), __func__,
[owner, this, type]() {
@ -343,8 +347,8 @@ private:
if (mDecoder && !mFlushed) {
RefPtr<MediaFormatReader> owner = mOwner;
TrackType type = mType == MediaData::AUDIO_DATA
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
? TrackType::kAudioTrack
: TrackType::kVideoTrack;
mDecoder->Flush()
->Then(mOwner->OwnerThread(), __func__,
[owner, type, this]() {
@ -419,15 +423,16 @@ private:
};
class DecoderDataWithPromise : public DecoderData {
class DecoderDataWithPromise : public DecoderData
{
public:
DecoderDataWithPromise(MediaFormatReader* aOwner,
MediaData::Type aType,
uint32_t aNumOfMaxError)
: DecoderData(aOwner, aType, aNumOfMaxError)
, mHasPromise(false)
{}
{
}
bool HasPromise() const override
{

View File

@ -23,13 +23,15 @@ class AudioInfo;
class VideoInfo;
class TextInfo;
class MetadataTag {
class MetadataTag
{
public:
MetadataTag(const nsACString& aKey,
const nsACString& aValue)
: mKey(aKey)
, mValue(aValue)
{}
{
}
nsCString mKey;
nsCString mValue;
};
@ -37,9 +39,11 @@ public:
// Maximum channel number we can currently handle (7.1)
#define MAX_AUDIO_CHANNELS 8
class TrackInfo {
class TrackInfo
{
public:
enum TrackType {
enum TrackType
{
kUndefinedTrack,
kAudioTrack,
kVideoTrack,
@ -175,9 +179,11 @@ private:
};
// Stores info relevant to presenting media frames.
class VideoInfo : public TrackInfo {
class VideoInfo : public TrackInfo
{
public:
enum Rotation {
enum Rotation
{
kDegree_0 = 0,
kDegree_90 = 90,
kDegree_180 = 180,
@ -272,8 +278,9 @@ public:
// container.
nsIntRect ScaledImageRect(int64_t aWidth, int64_t aHeight) const
{
if ((aWidth == mImage.width && aHeight == mImage.height) ||
!mImage.width || !mImage.height) {
if ((aWidth == mImage.width && aHeight == mImage.height)
|| !mImage.width
|| !mImage.height) {
return ImageRect();
}
nsIntRect imageRect = ImageRect();
@ -325,7 +332,8 @@ private:
bool mAlphaPresent = false;
};
class AudioInfo : public TrackInfo {
class AudioInfo : public TrackInfo
{
public:
AudioInfo()
: TrackInfo(kAudioTrack, NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
@ -392,17 +400,18 @@ public:
RefPtr<MediaByteBuffer> mCodecSpecificConfig;
RefPtr<MediaByteBuffer> mExtraData;
};
class EncryptionInfo {
class EncryptionInfo
{
public:
EncryptionInfo()
: mEncrypted(false)
{
}
struct InitData {
struct InitData
{
template<typename AInitDatas>
InitData(const nsAString& aType, AInitDatas&& aInitData)
: mType(aType)
@ -449,7 +458,8 @@ private:
bool mEncrypted;
};
class MediaInfo {
class MediaInfo
{
public:
bool HasVideo() const
{
@ -484,8 +494,8 @@ public:
bool IsEncrypted() const
{
return (HasAudio() && mAudio.mCrypto.mValid) ||
(HasVideo() && mVideo.mCrypto.mValid);
return (HasAudio() && mAudio.mCrypto.mValid)
|| (HasVideo() && mVideo.mCrypto.mValid);
}
bool HasValidMedia() const
@ -499,8 +509,9 @@ public:
"Audio track ID must be valid");
NS_ASSERTION(!HasVideo() || mVideo.mTrackId != TRACK_INVALID,
"Audio track ID must be valid");
NS_ASSERTION(!HasAudio() || !HasVideo() ||
mAudio.mTrackId != mVideo.mTrackId,
NS_ASSERTION(!HasAudio()
|| !HasVideo()
|| mAudio.mTrackId != mVideo.mTrackId,
"Duplicate track IDs");
}
@ -529,7 +540,8 @@ public:
media::TimeUnit mStartTime;
};
class SharedTrackInfo {
class SharedTrackInfo
{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedTrackInfo)
public:
SharedTrackInfo(const TrackInfo& aOriginal, uint32_t aStreamID)
@ -571,7 +583,7 @@ public:
}
private:
~SharedTrackInfo() {};
~SharedTrackInfo() { }
UniquePtr<TrackInfo> mInfo;
// A unique ID, guaranteed to change when changing streams.
uint32_t mStreamSourceID;
@ -580,9 +592,11 @@ public:
const nsCString& mMimeType;
};
class AudioConfig {
class AudioConfig
{
public:
enum Channel {
enum Channel
{
CHANNEL_INVALID = -1,
CHANNEL_MONO = 0,
CHANNEL_LEFT,
@ -596,15 +610,14 @@ public:
CHANNEL_LFE,
};
class ChannelLayout {
class ChannelLayout
{
public:
ChannelLayout()
: mChannelMap(0)
, mValid(false)
{}
ChannelLayout() : mChannelMap(0), mValid(false) { }
explicit ChannelLayout(uint32_t aChannels)
: ChannelLayout(aChannels, SMPTEDefault(aChannels))
{}
{
}
ChannelLayout(uint32_t aChannels, const Channel* aConfig)
: ChannelLayout()
{
@ -645,9 +658,7 @@ public:
// the current layout can be easily reordered to aOther.
// aMap must be an array of size MAX_AUDIO_CHANNELS.
bool MappingTable(const ChannelLayout& aOther, uint8_t* aMap = nullptr) const;
bool IsValid() const {
return mValid;
}
bool IsValid() const { return mValid; }
bool HasChannel(Channel aChannel) const
{
return mChannelMap & (1 << aChannel);
@ -660,7 +671,8 @@ public:
bool mValid;
};
enum SampleFormat {
enum SampleFormat
{
FORMAT_NONE = 0,
FORMAT_U8,
FORMAT_S16,
@ -710,9 +722,10 @@ public:
}
bool operator==(const AudioConfig& aOther) const
{
return mChannelLayout == aOther.mChannelLayout &&
mRate == aOther.mRate && mFormat == aOther.mFormat &&
mInterleaved == aOther.mInterleaved;
return mChannelLayout == aOther.mChannelLayout
&& mRate == aOther.mRate
&& mFormat == aOther.mFormat
&& mInterleaved == aOther.mInterleaved;
}
bool operator!=(const AudioConfig& aOther) const
{

View File

@ -705,8 +705,8 @@ private:
{
MOZ_ASSERT(mRecorder->mAudioNode != nullptr);
nsIDocument* doc = mRecorder->mAudioNode->GetOwner()
? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
: nullptr;
? mRecorder->mAudioNode->GetOwner()->GetExtantDoc()
: nullptr;
nsCOMPtr<nsIPrincipal> principal = doc ? doc->NodePrincipal() : nullptr;
return PrincipalSubsumes(principal);
}

View File

@ -207,8 +207,10 @@ MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
// The logic is different from the manipulating of aStream->mTracks part.
// So it is not combined with the manipulating of aStream->mTracks part.
StreamTime offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart : aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
StreamTime offset =
(data->mCommands & SourceMediaStream::TRACK_CREATE)
? data->mStart
: aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
// Audio case.
if (data->mData->GetType() == MediaSegment::AUDIO) {
@ -395,13 +397,14 @@ MediaStreamGraphImpl::ProcessChunkMetadataForInterval(MediaStream* aStream,
PrincipalHandle principalHandle = chunk->GetPrincipalHandle();
if (principalHandle != aSegment.GetLastPrincipalHandle()) {
aSegment.SetLastPrincipalHandle(principalHandle);
STREAM_LOG(LogLevel::Debug, ("MediaStream %p track %d, principalHandle "
"changed in %sChunk with duration %lld",
aStream, aTrackID,
aSegment.GetType() == MediaSegment::AUDIO
? "Audio" : "Video",
(long long) chunk->GetDuration()));
for (const TrackBound<MediaStreamTrackListener>& listener : aStream->mTrackListeners) {
STREAM_LOG(LogLevel::Debug,
("MediaStream %p track %d, principalHandle "
"changed in %sChunk with duration %lld",
aStream, aTrackID,
aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
(long long)chunk->GetDuration()));
for (const TrackBound<MediaStreamTrackListener>& listener :
aStream->mTrackListeners) {
if (listener.mTrackID == aTrackID) {
listener.mListener->NotifyPrincipalHandleChanged(this, principalHandle);
}

View File

@ -55,8 +55,8 @@ DirectMediaStreamTrackListener::NotifyRealtimeTrackDataAndApplyTrackDisabling(Me
}
DisabledTrackMode mode = mDisabledBlackCount > 0
? DisabledTrackMode::SILENCE_BLACK
: DisabledTrackMode::SILENCE_FREEZE;
? DisabledTrackMode::SILENCE_BLACK
: DisabledTrackMode::SILENCE_FREEZE;
if (!mMedia) {
mMedia = aMedia.CreateEmptyClone();
}

View File

@ -124,9 +124,8 @@ public:
// 1- coded sample number if blocksize is variable or
// 2- coded frame number if blocksize is known.
// A frame is made of Blocksize sample.
mIndex = mVariableBlockSize
? frame_or_sample_num
: frame_or_sample_num * mBlocksize;
mIndex = mVariableBlockSize ? frame_or_sample_num
: frame_or_sample_num * mBlocksize;
// Sample rate.
if (sr_code < 12) {

View File

@ -71,8 +71,10 @@ DecodedAudioDataSink::DecodedAudioDataSink(AbstractThread* aThread,
bool monoAudioEnabled = MediaPrefs::MonoAudio();
mOutputChannels = monoAudioEnabled
? 1 : (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
mOutputChannels =
monoAudioEnabled
? 1
: (MediaPrefs::AudioSinkForceStereo() ? 2 : mInfo.mChannels);
}
DecodedAudioDataSink::~DecodedAudioDataSink()

View File

@ -310,7 +310,8 @@ public:
return NS_ERROR_NOT_AVAILABLE;
}
uint64_t frameDuration = (completeIdx + 1u < mapping.Length())
uint64_t frameDuration =
(completeIdx + 1u < mapping.Length())
? mapping[completeIdx + 1].mTimecode - mapping[completeIdx].mTimecode
: mapping[completeIdx].mTimecode - previousMapping.ref().mTimecode;
aStart = mapping[0].mTimecode / NS_PER_USEC;

View File

@ -284,8 +284,8 @@ MediaSourceDecoder::NextFrameBufferedStatus()
currentPosition
+ media::TimeUnit::FromMicroseconds(DEFAULT_NEXT_FRAME_AVAILABLE_BUFFERED));
return buffered.ContainsStrict(ClampIntervalToEnd(interval))
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
: MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
}
bool

View File

@ -291,10 +291,9 @@ MediaSourceTrackDemuxer::MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
, mType(aType)
, mMonitor("MediaSourceTrackDemuxer")
, mReset(true)
, mPreRoll(
TimeUnit::FromMicroseconds(
OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType)
? 80000 : 0))
, mPreRoll(TimeUnit::FromMicroseconds(
OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType) ? 80000
: 0))
{
}

View File

@ -1319,13 +1319,14 @@ TrackBuffersManager::CompleteCodedFrameProcessing()
// 6. Remove the media segment bytes from the beginning of the input buffer.
// Clear our demuxer from any already processed data.
int64_t safeToEvict = std::min(
HasVideo()
? mVideoTracks.mDemuxer->GetEvictionOffset(mVideoTracks.mLastParsedEndTime)
: INT64_MAX,
HasAudio()
? mAudioTracks.mDemuxer->GetEvictionOffset(mAudioTracks.mLastParsedEndTime)
: INT64_MAX);
int64_t safeToEvict = std::min(HasVideo()
? mVideoTracks.mDemuxer->GetEvictionOffset(
mVideoTracks.mLastParsedEndTime)
: INT64_MAX,
HasAudio()
? mAudioTracks.mDemuxer->GetEvictionOffset(
mAudioTracks.mLastParsedEndTime)
: INT64_MAX);
ErrorResult rv;
mCurrentInputBuffer->EvictBefore(safeToEvict, rv);
if (rv.Failed()) {
@ -1398,8 +1399,10 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Let presentation timestamp equal 0.
// Otherwise
// Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit() : TimeUnit::FromMicroseconds(aSamples[0]->mTime);
TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit()
: TimeUnit::FromMicroseconds(aSamples[0]->mTime);
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
CheckSequenceDiscontinuity(presentationTimestamp);
@ -1412,12 +1415,13 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// of +- mLongestFrameDuration on the append window start.
// We only apply the leeway with the default append window start of 0
// otherwise do as per spec.
TimeInterval targetWindow = mAppendWindow.mStart != TimeUnit::FromSeconds(0)
TimeInterval targetWindow =
mAppendWindow.mStart != TimeUnit::FromSeconds(0)
? mAppendWindow
: TimeInterval(mAppendWindow.mStart, mAppendWindow.mEnd,
trackBuffer.mLastFrameDuration.isSome()
? trackBuffer.mLongestFrameDuration
: TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
? trackBuffer.mLongestFrameDuration
: TimeUnit::FromMicroseconds(aSamples[0]->mDuration));
TimeIntervals samplesRange;
uint32_t sizeNewSamples = 0;
@ -1484,13 +1488,12 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
TimeInterval sampleInterval =
mSourceBufferAttributes->mGenerateTimestamps
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
TimeUnit decodeTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
TimeUnit decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
// 6. If last decode timestamp for track buffer is set and decode timestamp is less than last decode timestamp:
// OR
@ -1525,8 +1528,8 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Rather that restarting the process for the frame, we run the first
// steps again instead.
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
TimeUnit presentationTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit() : sampleTime;
TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps ? TimeUnit() : sampleTime;
CheckSequenceDiscontinuity(presentationTimestamp);
if (!sample->mKeyframe) {
@ -1538,13 +1541,12 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
sampleInterval =
mSourceBufferAttributes->mGenerateTimestamps
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
decodeTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
? TimeInterval(timestampOffset, timestampOffset + sampleDuration)
: TimeInterval(timestampOffset + sampleTime,
timestampOffset + sampleTime + sampleDuration);
decodeTimestamp = mSourceBufferAttributes->mGenerateTimestamps
? timestampOffset
: timestampOffset + sampleTimecode;
}
trackBuffer.mNeedRandomAccessPoint = false;
needDiscontinuityCheck = false;

View File

@ -363,8 +363,8 @@ TheoraState::Init()
int64_t n = mTheoraInfo.aspect_numerator;
int64_t d = mTheoraInfo.aspect_denominator;
float aspectRatio = (n == 0 || d == 0)
? 1.0f : static_cast<float>(n) / static_cast<float>(d);
float aspectRatio =
(n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d);
// Ensure the frame and picture regions aren't larger than our prescribed
// maximum, or zero sized.

View File

@ -395,10 +395,10 @@ AudioBuffer::StealJSArrayDataIntoSharedChannels(JSContext* aJSContext)
// The channel data arrays should all have originated in
// RestoreJSChannelData, where they are created unshared.
MOZ_ASSERT(!isSharedMemory);
auto stolenData = arrayBuffer
? static_cast<float*>(JS_StealArrayBufferContents(aJSContext,
arrayBuffer))
: nullptr;
auto stolenData =
arrayBuffer ? static_cast<float*>(
JS_StealArrayBufferContents(aJSContext, arrayBuffer))
: nullptr;
if (stolenData) {
result->SetData(i, stolenData, js_free, stolenData);
} else {