Bug 979104 - MediaDecoderReader/StateMachine asynchronous decoding. r=kinetik

This commit is contained in:
Chris Pearce 2014-06-18 17:07:02 +12:00
parent df7cba4e45
commit 05136cdace
22 changed files with 1655 additions and 622 deletions

View File

@ -37,6 +37,7 @@ public:
, mOffset(aOffset)
, mTime(aTimestamp)
, mDuration(aDuration)
, mDiscontinuity(false)
{}
virtual ~MediaData() {}
@ -53,6 +54,10 @@ public:
// Duration of sample, in microseconds.
const int64_t mDuration;
// True if this is the first sample after a gap or discontinuity in
// the stream. This is true for the first sample in a stream after a seek.
bool mDiscontinuity;
int64_t GetEndTime() const { return mTime + mDuration; }
};
@ -207,7 +212,7 @@ public:
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
static void SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
VideoInfo& aInfo,
VideoInfo& aInfo,
const YCbCrBuffer &aBuffer,
const IntRect& aPicture,
bool aCopyData);

View File

@ -0,0 +1,148 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MediaDataDecodedListener_h_
#define MediaDataDecodedListener_h_
#include "mozilla/Monitor.h"
#include "MediaDecoderReader.h"
namespace mozilla {
class MediaDecoderStateMachine;
class MediaData;
// A RequestSampleCallback implementation that forwards samples onto the
// MediaDecoderStateMachine via tasks that run on the supplied task queue.
template<class Target>
class MediaDataDecodedListener : public RequestSampleCallback {
public:
MediaDataDecodedListener(Target* aTarget,
MediaTaskQueue* aTaskQueue)
: mMonitor("MediaDataDecodedListener")
, mTaskQueue(aTaskQueue)
, mTarget(aTarget)
{
MOZ_ASSERT(aTarget);
MOZ_ASSERT(aTaskQueue);
}
virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE {
MonitorAutoLock lock(mMonitor);
nsAutoPtr<AudioData> sample(aSample);
if (!mTarget || !mTaskQueue) {
// We've been shutdown, abort.
return;
}
RefPtr<nsIRunnable> task(new DeliverAudioTask(sample.forget(), mTarget));
mTaskQueue->Dispatch(task);
}
virtual void OnAudioEOS() MOZ_OVERRIDE {
MonitorAutoLock lock(mMonitor);
if (!mTarget || !mTaskQueue) {
// We've been shutdown, abort.
return;
}
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnAudioEOS));
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
NS_WARNING("Failed to dispatch OnAudioEOS task");
}
}
virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {
MonitorAutoLock lock(mMonitor);
nsAutoPtr<VideoData> sample(aSample);
if (!mTarget || !mTaskQueue) {
// We've been shutdown, abort.
return;
}
RefPtr<nsIRunnable> task(new DeliverVideoTask(sample.forget(), mTarget));
mTaskQueue->Dispatch(task);
}
virtual void OnVideoEOS() MOZ_OVERRIDE {
MonitorAutoLock lock(mMonitor);
if (!mTarget || !mTaskQueue) {
// We've been shutdown, abort.
return;
}
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnVideoEOS));
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
NS_WARNING("Failed to dispatch OnVideoEOS task");
}
}
virtual void OnDecodeError() MOZ_OVERRIDE {
MonitorAutoLock lock(mMonitor);
if (!mTarget || !mTaskQueue) {
// We've been shutdown, abort.
return;
}
RefPtr<nsIRunnable> task(NS_NewRunnableMethod(mTarget, &Target::OnDecodeError));
if (NS_FAILED(mTaskQueue->Dispatch(task))) {
NS_WARNING("Failed to dispatch OnAudioDecoded task");
}
}
void BreakCycles() {
MonitorAutoLock lock(mMonitor);
mTarget = nullptr;
mTaskQueue = nullptr;
}
private:
class DeliverAudioTask : public nsRunnable {
public:
DeliverAudioTask(AudioData* aSample, Target* aTarget)
: mSample(aSample)
, mTarget(aTarget)
{
MOZ_COUNT_CTOR(DeliverAudioTask);
}
~DeliverAudioTask()
{
MOZ_COUNT_DTOR(DeliverAudioTask);
}
NS_METHOD Run() {
mTarget->OnAudioDecoded(mSample.forget());
return NS_OK;
}
private:
nsAutoPtr<AudioData> mSample;
RefPtr<Target> mTarget;
};
class DeliverVideoTask : public nsRunnable {
public:
DeliverVideoTask(VideoData* aSample, Target* aTarget)
: mSample(aSample)
, mTarget(aTarget)
{
MOZ_COUNT_CTOR(DeliverVideoTask);
}
~DeliverVideoTask()
{
MOZ_COUNT_DTOR(DeliverVideoTask);
}
NS_METHOD Run() {
mTarget->OnVideoDecoded(mSample.forget());
return NS_OK;
}
private:
nsAutoPtr<VideoData> mSample;
RefPtr<Target> mTarget;
};
Monitor mMonitor;
RefPtr<MediaTaskQueue> mTaskQueue;
RefPtr<Target> mTarget;
};
}
#endif // MediaDataDecodedListener_h_

View File

@ -1528,7 +1528,7 @@ int64_t MediaDecoder::GetEndMediaTime() const {
}
// Drop reference to state machine. Only called during shutdown dance.
void MediaDecoder::ReleaseStateMachine() {
void MediaDecoder::BreakCycles() {
mDecoderStateMachine = nullptr;
}

View File

@ -6,9 +6,9 @@
/*
Each video element based on MediaDecoder has a state machine to manage
its play state and keep the current frame up to date. All state machines
share time in a single shared thread. Each decoder also has one thread
dedicated to decoding audio and video data. This thread is shutdown when
playback is paused. Each decoder also has a thread to push decoded audio
share time in a single shared thread. Each decoder also has a MediaTaskQueue
running in a SharedThreadPool to decode audio and video data.
Each decoder also has a thread to push decoded audio
to the hardware. This thread is not created until playback starts, but
currently is not destroyed when paused, only when playback ends.
@ -234,6 +234,11 @@ struct SeekTarget {
, mType(aType)
{
}
SeekTarget(const SeekTarget& aOther)
: mTime(aOther.mTime)
, mType(aOther.mType)
{
}
bool IsValid() const {
return mType != SeekTarget::Invalid;
}
@ -824,7 +829,7 @@ public:
MediaDecoderStateMachine* GetStateMachine() const;
// Drop reference to state machine. Only called during shutdown dance.
virtual void ReleaseStateMachine();
virtual void BreakCycles();
// Notifies the element that decoding has failed.
virtual void DecodeError();

View File

@ -63,9 +63,11 @@ public:
};
MediaDecoderReader::MediaDecoderReader(AbstractMediaDecoder* aDecoder)
: mAudioCompactor(mAudioQueue),
mDecoder(aDecoder),
mIgnoreAudioOutputFormat(false)
: mAudioCompactor(mAudioQueue)
, mDecoder(aDecoder)
, mIgnoreAudioOutputFormat(false)
, mAudioDiscontinuity(false)
, mVideoDiscontinuity(false)
{
MOZ_COUNT_CTOR(MediaDecoderReader);
}
@ -97,6 +99,9 @@ nsresult MediaDecoderReader::ResetDecode()
VideoQueue().Reset();
AudioQueue().Reset();
mAudioDiscontinuity = true;
mVideoDiscontinuity = true;
return res;
}
@ -173,169 +178,6 @@ VideoData* MediaDecoderReader::FindStartTime(int64_t& aOutStartTime)
return videoData;
}
nsresult MediaDecoderReader::DecodeToTarget(int64_t aTarget)
{
DECODER_LOG(PR_LOG_DEBUG, ("MediaDecoderReader::DecodeToTarget(%lld) Begin", aTarget));
// Decode forward to the target frame. Start with video, if we have it.
if (HasVideo()) {
// Note: when decoding hits the end of stream we must keep the last frame
// in the video queue so that we'll have something to display after the
// seek completes. This makes our logic a bit messy.
bool eof = false;
nsAutoPtr<VideoData> video;
while (HasVideo() && !eof) {
while (VideoQueue().GetSize() == 0 && !eof) {
bool skip = false;
eof = !DecodeVideoFrame(skip, 0);
{
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
if (mDecoder->IsShutdown()) {
return NS_ERROR_FAILURE;
}
}
}
if (eof) {
// Hit end of file, we want to display the last frame of the video.
if (video) {
DECODER_LOG(PR_LOG_DEBUG,
("MediaDecoderReader::DecodeToTarget(%lld) repushing video frame [%lld, %lld] at EOF",
aTarget, video->mTime, video->GetEndTime()));
VideoQueue().PushFront(video.forget());
}
VideoQueue().Finish();
break;
}
video = VideoQueue().PeekFront();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (video && video->GetEndTime() <= aTarget) {
DECODER_LOG(PR_LOG_DEBUG,
("MediaDecoderReader::DecodeToTarget(%lld) pop video frame [%lld, %lld]",
aTarget, video->mTime, video->GetEndTime()));
VideoQueue().PopFront();
} else {
// Found a frame after or encompasing the seek target.
if (aTarget >= video->mTime && video->GetEndTime() >= aTarget) {
// The seek target lies inside this frame's time slice. Adjust the frame's
// start time to match the seek target. We do this by replacing the
// first frame with a shallow copy which has the new timestamp.
VideoQueue().PopFront();
VideoData* temp = VideoData::ShallowCopyUpdateTimestamp(video, aTarget);
video = temp;
VideoQueue().PushFront(video);
}
DECODER_LOG(PR_LOG_DEBUG,
("MediaDecoderReader::DecodeToTarget(%lld) found target video frame [%lld,%lld]",
aTarget, video->mTime, video->GetEndTime()));
video.forget();
break;
}
}
{
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
if (mDecoder->IsShutdown()) {
return NS_ERROR_FAILURE;
}
}
#ifdef PR_LOGGING
const VideoData* front = VideoQueue().PeekFront();
DECODER_LOG(PR_LOG_DEBUG, ("First video frame after decode is %lld",
front ? front->mTime : -1));
#endif
}
if (HasAudio()) {
// Decode audio forward to the seek target.
bool eof = false;
while (HasAudio() && !eof) {
while (!eof && AudioQueue().GetSize() == 0) {
eof = !DecodeAudioData();
{
ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
if (mDecoder->IsShutdown()) {
return NS_ERROR_FAILURE;
}
}
}
const AudioData* audio = AudioQueue().PeekFront();
if (!audio || eof) {
AudioQueue().Finish();
break;
}
CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudio.mRate);
CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudio.mRate);
if (!startFrame.isValid() || !targetFrame.isValid()) {
return NS_ERROR_FAILURE;
}
if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
// Our seek target lies after the frames in this AudioData. Pop it
// off the queue, and keep decoding forwards.
delete AudioQueue().PopFront();
audio = nullptr;
continue;
}
if (startFrame.value() > targetFrame.value()) {
// The seek target doesn't lie in the audio block just after the last
// audio frames we've seen which were before the seek target. This
// could have been the first audio data we've seen after seek, i.e. the
// seek terminated after the seek target in the audio stream. Just
// abort the audio decode-to-target, the state machine will play
// silence to cover the gap. Typically this happens in poorly muxed
// files.
NS_WARNING("Audio not synced after seek, maybe a poorly muxed file?");
break;
}
// The seek target lies somewhere in this AudioData's frames, strip off
// any frames which lie before the seek target, so we'll begin playback
// exactly at the seek target.
NS_ASSERTION(targetFrame.value() >= startFrame.value(),
"Target must at or be after data start.");
NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
"Data must end after target.");
int64_t framesToPrune = targetFrame.value() - startFrame.value();
if (framesToPrune > audio->mFrames) {
// We've messed up somehow. Don't try to trim frames, the |frames|
// variable below will overflow.
NS_WARNING("Can't prune more frames that we have!");
break;
}
uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune);
uint32_t channels = audio->mChannels;
nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[frames * channels]);
memcpy(audioData.get(),
audio->mAudioData.get() + (framesToPrune * channels),
frames * channels * sizeof(AudioDataValue));
CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudio.mRate);
if (!duration.isValid()) {
return NS_ERROR_FAILURE;
}
nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
aTarget,
duration.value(),
frames,
audioData.forget(),
channels));
delete AudioQueue().PopFront();
AudioQueue().PushFront(data.forget());
break;
}
}
#ifdef PR_LOGGING
const VideoData* v = VideoQueue().PeekFront();
const AudioData* a = AudioQueue().PeekFront();
DECODER_LOG(PR_LOG_DEBUG,
("MediaDecoderReader::DecodeToTarget(%lld) finished v=%lld a=%lld",
aTarget, v ? v->mTime : -1, a ? a->mTime : -1));
#endif
return NS_OK;
}
nsresult
MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
int64_t aStartTime)
@ -350,4 +192,174 @@ MediaDecoderReader::GetBuffered(mozilla::dom::TimeRanges* aBuffered,
return NS_OK;
}
class RequestVideoWithSkipTask : public nsRunnable {
public:
RequestVideoWithSkipTask(MediaDecoderReader* aReader,
int64_t aTimeThreshold)
: mReader(aReader)
, mTimeThreshold(aTimeThreshold)
{
}
NS_METHOD Run() {
bool skip = true;
mReader->RequestVideoData(skip, mTimeThreshold);
return NS_OK;
}
private:
nsRefPtr<MediaDecoderReader> mReader;
int64_t mTimeThreshold;
};
void
MediaDecoderReader::RequestVideoData(bool aSkipToNextKeyframe,
int64_t aTimeThreshold)
{
bool skip = aSkipToNextKeyframe;
while (VideoQueue().GetSize() == 0 &&
!VideoQueue().IsFinished()) {
if (!DecodeVideoFrame(skip, aTimeThreshold)) {
VideoQueue().Finish();
} else if (skip) {
// We still need to decode more data in order to skip to the next
// keyframe. Post another task to the decode task queue to decode
// again. We don't just decode straight in a loop here, as that
// would hog the decode task queue.
RefPtr<nsIRunnable> task(new RequestVideoWithSkipTask(this, aTimeThreshold));
mTaskQueue->Dispatch(task);
return;
}
}
if (VideoQueue().GetSize() > 0) {
VideoData* v = VideoQueue().PopFront();
if (v && mVideoDiscontinuity) {
v->mDiscontinuity = true;
mVideoDiscontinuity = false;
}
GetCallback()->OnVideoDecoded(v);
} else if (VideoQueue().IsFinished()) {
GetCallback()->OnVideoEOS();
}
}
void
MediaDecoderReader::RequestAudioData()
{
while (AudioQueue().GetSize() == 0 &&
!AudioQueue().IsFinished()) {
if (!DecodeAudioData()) {
AudioQueue().Finish();
}
}
if (AudioQueue().GetSize() > 0) {
AudioData* a = AudioQueue().PopFront();
if (mAudioDiscontinuity) {
a->mDiscontinuity = true;
mAudioDiscontinuity = false;
}
GetCallback()->OnAudioDecoded(a);
return;
} else if (AudioQueue().IsFinished()) {
GetCallback()->OnAudioEOS();
return;
}
}
void
MediaDecoderReader::SetCallback(RequestSampleCallback* aCallback)
{
mSampleDecodedCallback = aCallback;
}
void
MediaDecoderReader::SetTaskQueue(MediaTaskQueue* aTaskQueue)
{
mTaskQueue = aTaskQueue;
}
void
MediaDecoderReader::BreakCycles()
{
if (mSampleDecodedCallback) {
mSampleDecodedCallback->BreakCycles();
mSampleDecodedCallback = nullptr;
}
mTaskQueue = nullptr;
}
void
MediaDecoderReader::Shutdown()
{
ReleaseMediaResources();
}
AudioDecodeRendezvous::AudioDecodeRendezvous()
: mMonitor("AudioDecodeRendezvous")
, mHaveResult(false)
{
}
AudioDecodeRendezvous::~AudioDecodeRendezvous()
{
}
void
AudioDecodeRendezvous::OnAudioDecoded(AudioData* aSample)
{
MonitorAutoLock mon(mMonitor);
mSample = aSample;
mStatus = NS_OK;
mHaveResult = true;
mon.NotifyAll();
}
void
AudioDecodeRendezvous::OnAudioEOS()
{
MonitorAutoLock mon(mMonitor);
mSample = nullptr;
mStatus = NS_OK;
mHaveResult = true;
mon.NotifyAll();
}
void
AudioDecodeRendezvous::OnDecodeError()
{
MonitorAutoLock mon(mMonitor);
mSample = nullptr;
mStatus = NS_ERROR_FAILURE;
mHaveResult = true;
mon.NotifyAll();
}
void
AudioDecodeRendezvous::Reset()
{
MonitorAutoLock mon(mMonitor);
mHaveResult = false;
mStatus = NS_OK;
mSample = nullptr;
}
nsresult
AudioDecodeRendezvous::Await(nsAutoPtr<AudioData>& aSample)
{
MonitorAutoLock mon(mMonitor);
while (!mHaveResult) {
mon.Wait();
}
mHaveResult = false;
aSample = mSample;
return mStatus;
}
void
AudioDecodeRendezvous::Cancel()
{
MonitorAutoLock mon(mMonitor);
mStatus = NS_ERROR_ABORT;
mHaveResult = true;
mon.NotifyAll();
}
} // namespace mozilla

View File

@ -18,12 +18,19 @@ namespace dom {
class TimeRanges;
}
// Encapsulates the decoding and reading of media data. Reading can only be
// done on the decode thread. Never hold the decoder monitor when
// calling into this class. Unless otherwise specified, methods and fields of
// this class can only be accessed on the decode thread.
class RequestSampleCallback;
// Encapsulates the decoding and reading of media data. Reading can either
// synchronous and done on the calling "decode" thread, or asynchronous and
// performed on a background thread, with the result being returned by
// callback. Never hold the decoder monitor when calling into this class.
// Unless otherwise specified, methods and fields of this class can only
// be accessed on the decode task queue.
class MediaDecoderReader {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderReader)
MediaDecoderReader(AbstractMediaDecoder* aDecoder);
virtual ~MediaDecoderReader();
@ -36,24 +43,48 @@ public:
// True when this reader need to become dormant state
virtual bool IsDormantNeeded() { return false; }
// Release media resources they should be released in dormant state
// The reader can be made usable again by calling ReadMetadata().
virtual void ReleaseMediaResources() {};
// Release the decoder during shutdown
virtual void ReleaseDecoder() {};
// Breaks reference-counted cycles. Called during shutdown.
// WARNING: If you override this, you must call the base implementation
// in your override.
virtual void BreakCycles();
// Destroys the decoding state. The reader cannot be made usable again.
// This is different from ReleaseMediaResources() as it is irreversable,
// whereas ReleaseMediaResources() is.
virtual void Shutdown();
virtual void SetCallback(RequestSampleCallback* aDecodedSampleCallback);
virtual void SetTaskQueue(MediaTaskQueue* aTaskQueue);
// Resets all state related to decoding, emptying all buffers etc.
// Cancels all pending Request*Data() request callbacks, and flushes the
// decode pipeline. The decoder must not call any of the callbacks for
// outstanding Request*Data() calls after this is called. Calls to
// Request*Data() made after this should be processed as usual.
// Normally this call preceedes a Seek() call, or shutdown.
// The first samples of every stream produced after a ResetDecode() call
// *must* be marked as "discontinuities". If it's not, seeking work won't
// properly!
virtual nsresult ResetDecode();
// Decodes an unspecified amount of audio data, enqueuing the audio data
// in mAudioQueue. Returns true when there's more audio to decode,
// false if the audio is finished, end of file has been reached,
// or an un-recoverable read error has occured.
virtual bool DecodeAudioData() = 0;
// Requests the Reader to call OnAudioDecoded() on aCallback with one
// audio sample. The decode should be performed asynchronously, and
// the callback can be performed on any thread. Don't hold the decoder
// monitor while calling this, as the implementation may try to wait
// on something that needs the monitor and deadlock.
virtual void RequestAudioData();
// Reads and decodes one video frame. Packets with a timestamp less
// than aTimeThreshold will be decoded (unless they're not keyframes
// and aKeyframeSkip is true), but will not be added to the queue.
virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
int64_t aTimeThreshold) = 0;
// Requests the Reader to call OnVideoDecoded() on aCallback with one
// video sample. The decode should be performed asynchronously, and
// the callback can be performed on any thread. Don't hold the decoder
// monitor while calling this, as the implementation may try to wait
// on something that needs the monitor and deadlock.
// If aSkipToKeyframe is true, the decode should skip ahead to the
// the next keyframe at or after aTimeThreshold microseconds.
virtual void RequestVideoData(bool aSkipToNextKeyframe,
int64_t aTimeThreshold);
virtual bool HasAudio() = 0;
virtual bool HasVideo() = 0;
@ -65,6 +96,7 @@ public:
virtual nsresult ReadMetadata(MediaInfo* aInfo,
MetadataTags** aTags) = 0;
// TODO: DEPRECATED. This uses synchronous decoding.
// Stores the presentation time of the first frame we'd be able to play if
// we started playback at the current position. Returns the first video
// frame, if we have video.
@ -98,22 +130,6 @@ public:
mIgnoreAudioOutputFormat = true;
}
protected:
// Queue of audio frames. This queue is threadsafe, and is accessed from
// the audio, decoder, state machine, and main threads.
MediaQueue<AudioData> mAudioQueue;
// Queue of video frames. This queue is threadsafe, and is accessed from
// the decoder, state machine, and main threads.
MediaQueue<VideoData> mVideoQueue;
// An adapter to the audio queue which first copies data to buffers with
// minimal allocation slop and then pushes them to the queue. This is
// useful for decoders working with formats that give awkward numbers of
// frames such as mp3.
AudioCompactor mAudioCompactor;
public:
// Populates aBuffered with the time ranges which are buffered. aStartTime
// must be the presentation time of the first frame in the media, e.g.
// the media time corresponding to playback time/position 0. This function
@ -156,15 +172,51 @@ public:
AudioData* DecodeToFirstAudioData();
VideoData* DecodeToFirstVideoData();
// Decodes samples until we reach frames required to play at time aTarget
// (usecs). This also trims the samples to start exactly at aTarget,
// by discarding audio samples and adjusting start times of video frames.
nsresult DecodeToTarget(int64_t aTarget);
MediaInfo GetMediaInfo() { return mInfo; }
protected:
// Overrides of this function should decodes an unspecified amount of
// audio data, enqueuing the audio data in mAudioQueue. Returns true
// when there's more audio to decode, false if the audio is finished,
// end of file has been reached, or an un-recoverable read error has
// occured. This function blocks until the decode is complete.
virtual bool DecodeAudioData() {
return false;
}
// Overrides of this function should read and decodes one video frame.
// Packets with a timestamp less than aTimeThreshold will be decoded
// (unless they're not keyframes and aKeyframeSkip is true), but will
// not be added to the queue. This function blocks until the decode
// is complete.
virtual bool DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) {
return false;
}
RequestSampleCallback* GetCallback() {
MOZ_ASSERT(mSampleDecodedCallback);
return mSampleDecodedCallback;
}
virtual MediaTaskQueue* GetTaskQueue() {
return mTaskQueue;
}
// Queue of audio frames. This queue is threadsafe, and is accessed from
// the audio, decoder, state machine, and main threads.
MediaQueue<AudioData> mAudioQueue;
// Queue of video frames. This queue is threadsafe, and is accessed from
// the decoder, state machine, and main threads.
MediaQueue<VideoData> mVideoQueue;
// An adapter to the audio queue which first copies data to buffers with
// minimal allocation slop and then pushes them to the queue. This is
// useful for decoders working with formats that give awkward numbers of
// frames such as mp3.
AudioCompactor mAudioCompactor;
// Reference to the owning decoder object.
AbstractMediaDecoder* mDecoder;
@ -175,6 +227,82 @@ protected:
// directly, because they have a number of channel higher than
// what we support.
bool mIgnoreAudioOutputFormat;
private:
nsRefPtr<RequestSampleCallback> mSampleDecodedCallback;
nsRefPtr<MediaTaskQueue> mTaskQueue;
// Flags whether a the next audio/video sample comes after a "gap" or
// "discontinuity" in the stream. For example after a seek.
bool mAudioDiscontinuity;
bool mVideoDiscontinuity;
};
// Interface that callers to MediaDecoderReader::Request{Audio,Video}Data()
// must implement to receive the requested samples asynchronously.
// This object is refcounted, and cycles must be broken by calling
// BreakCycles() during shutdown.
class RequestSampleCallback {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RequestSampleCallback)
// Receives the result of a RequestAudioData() call.
virtual void OnAudioDecoded(AudioData* aSample) = 0;
// Called when a RequestAudioData() call can't be fulfiled as we've
// reached the end of stream.
virtual void OnAudioEOS() = 0;
// Receives the result of a RequestVideoData() call.
virtual void OnVideoDecoded(VideoData* aSample) = 0;
// Called when a RequestVideoData() call can't be fulfiled as we've
// reached the end of stream.
virtual void OnVideoEOS() = 0;
// Called when there's a decode error. No more sample requests
// will succeed.
virtual void OnDecodeError() = 0;
// Called during shutdown to break any reference cycles.
virtual void BreakCycles() = 0;
virtual ~RequestSampleCallback() {}
};
// A RequestSampleCallback implementation that can be passed to the
// MediaDecoderReader to block the thread requesting an audio sample until
// the audio decode is complete. This is used to adapt the asynchronous
// model of the MediaDecoderReader to a synchronous model.
class AudioDecodeRendezvous : public RequestSampleCallback {
public:
AudioDecodeRendezvous();
~AudioDecodeRendezvous();
// RequestSampleCallback implementation. Called when decode is complete.
// Note: aSample is null at end of stream.
virtual void OnAudioDecoded(AudioData* aSample) MOZ_OVERRIDE;
virtual void OnAudioEOS() MOZ_OVERRIDE;
virtual void OnVideoDecoded(VideoData* aSample) MOZ_OVERRIDE {}
virtual void OnVideoEOS() MOZ_OVERRIDE {}
virtual void OnDecodeError() MOZ_OVERRIDE;
virtual void BreakCycles() MOZ_OVERRIDE {};
void Reset();
// Returns failure on error, or NS_OK.
// If *aSample is null, EOS has been reached.
nsresult Await(nsAutoPtr<AudioData>& aSample);
// Interrupts a call to Wait().
void Cancel();
private:
Monitor mMonitor;
nsresult mStatus;
nsAutoPtr<AudioData> mSample;
bool mHaveResult;
};
} // namespace mozilla

File diff suppressed because it is too large Load Diff

View File

@ -4,29 +4,36 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
Each video element for a media file has two threads:
1) The Audio thread writes the decoded audio data to the audio
hardware. This is done in a separate thread to ensure that the
audio hardware gets a constant stream of data without
interruption due to decoding or display. At some point
AudioStream will be refactored to have a callback interface
where it asks for data and an extra thread will no longer be
needed.
Each media element for a media file has one thread called the "audio thread".
2) The decode thread. This thread reads from the media stream and
decodes the Theora and Vorbis data. It places the decoded data into
queues for the other threads to pull from.
The audio thread writes the decoded audio data to the audio
hardware. This is done in a separate thread to ensure that the
audio hardware gets a constant stream of data without
interruption due to decoding or display. At some point
AudioStream will be refactored to have a callback interface
where it asks for data and this thread will no longer be
needed.
The element/state machine also has a MediaTaskQueue which runs in a
SharedThreadPool that is shared with all other elements/decoders. The state
machine dispatches tasks to this to call into the MediaDecoderReader to
request decoded audio or video data. The Reader will callback with decoded
sampled when it has them available, and the state machine places the decoded
samples into its queues for the consuming threads to pull from.
The MediaDecoderReader can choose to decode asynchronously, or synchronously
and return requested samples synchronously inside it's Request*Data()
functions via callback. Asynchronous decoding is preferred, and should be
used for any new readers.
All file reads, seeks, and all decoding must occur on the decode thread.
Synchronisation of state between the thread is done via a monitor owned
by MediaDecoder.
The lifetime of the decode and audio threads is controlled by the state
machine when it runs on the shared state machine thread. When playback
needs to occur they are created and events dispatched to them to run
them. These events exit when decoding/audio playback is completed or
no longer required.
The lifetime of the audio thread is controlled by the state machine when
it runs on the shared state machine thread. When playback needs to occur
the audio thread is created and an event dispatched to run it. The audio
thread exits when audio playback is completed or no longer required.
A/V synchronisation is handled by the state machine. It examines the audio
playback time and compares this to the next frame in the queue of video
@ -39,7 +46,7 @@ Frame skipping is done in the following ways:
display time is less than the current audio time. This ensures
the correct frame for the current time is always displayed.
2) The decode thread will stop decoding interframes and read to the
2) The decode tasks will stop decoding interframes and read to the
next keyframe if it determines that decoding the remaining
interframes will cause playback issues. It detects this by:
a) If the amount of audio data in the audio queue drops
@ -47,11 +54,13 @@ Frame skipping is done in the following ways:
b) If the video queue drops below a threshold where it
will be decoding video data that won't be displayed due
to the decode thread dropping the frame immediately.
TODO: In future we should only do this when the Reader is decoding
synchronously.
When hardware accelerated graphics is not available, YCbCr conversion
is done on the decode thread when video frames are decoded.
is done on the decode task queue when video frames are decoded.
The decode thread pushes decoded audio and videos frames into two
The decode task queue pushes decoded audio and videos frames into two
separate queues - one for audio and one for video. These are kept
separate to make it easy to constantly feed audio data to the audio
hardware while allowing frame skipping of video data. These queues are
@ -59,13 +68,10 @@ threadsafe, and neither the decode, audio, or state machine should
be able to monopolize them, and cause starvation of the other threads.
Both queues are bounded by a maximum size. When this size is reached
the decode thread will no longer decode video or audio depending on the
queue that has reached the threshold. If both queues are full, the decode
thread will wait on the decoder monitor.
When the decode queues are full (they've reaced their maximum size) and
the decoder is not in PLAYING play state, the state machine may opt
to shut down the decode thread in order to conserve resources.
the decode tasks will no longer request video or audio depending on the
queue that has reached the threshold. If both queues are full, no more
decode tasks will be dispatched to the decode task queue, so other
decoders will have an opportunity to run.
During playback the audio thread will be idle (via a Wait() on the
monitor) if the audio queue is empty. Otherwise it constantly pops
@ -83,6 +89,7 @@ hardware (via AudioStream).
#include "MediaDecoderReader.h"
#include "MediaDecoderOwner.h"
#include "MediaMetadataManager.h"
#include "MediaDataDecodedListener.h"
class nsITimer;
@ -102,7 +109,7 @@ class SharedThreadPool;
/*
The state machine class. This manages the decoding and seeking in the
MediaDecoderReader on the decode thread, and A/V sync on the shared
MediaDecoderReader on the decode task queue, and A/V sync on the shared
state machine thread, and controls the audio "push" thread.
All internal state is synchronised via the decoder monitor. State changes
@ -312,10 +319,9 @@ public:
void SetFragmentEndTime(int64_t aEndTime);
// Drop reference to decoder. Only called during shutdown dance.
void ReleaseDecoder() {
MOZ_ASSERT(mReader);
void BreakCycles() {
if (mReader) {
mReader->ReleaseDecoder();
mReader->BreakCycles();
}
mDecoder = nullptr;
}
@ -357,11 +363,22 @@ public:
// samples in advance of when they're needed for playback.
void SetMinimizePrerollUntilPlaybackStarts();
void OnAudioDecoded(AudioData* aSample);
void OnAudioEOS();
void OnVideoDecoded(VideoData* aSample);
void OnVideoEOS();
void OnDecodeError();
protected:
virtual ~MediaDecoderStateMachine();
void AssertCurrentThreadInMonitor() const { mDecoder->GetReentrantMonitor().AssertCurrentThreadIn(); }
// Inserts MediaData* samples into their respective MediaQueues.
// aSample must not be null.
void Push(AudioData* aSample);
void Push(VideoData* aSample);
class WakeDecoderRunnable : public nsRunnable {
public:
WakeDecoderRunnable(MediaDecoderStateMachine* aSM)
@ -397,8 +414,14 @@ protected:
};
WakeDecoderRunnable* GetWakeDecoderRunnable();
MediaQueue<AudioData>& AudioQueue() { return mReader->AudioQueue(); }
MediaQueue<VideoData>& VideoQueue() { return mReader->VideoQueue(); }
MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
nsresult FinishDecodeMetadata();
RefPtr<MediaDataDecodedListener<MediaDecoderStateMachine>> mMediaDecodedListener;
nsAutoPtr<MetadataTags> mMetadataTags;
// True if our buffers of decoded audio are not full, and we should
// decode more.
@ -468,11 +491,10 @@ protected:
// Called on the state machine thread.
int64_t GetClock();
// Returns the presentation time of the first audio or video frame in the
// media. If the media has video, it returns the first video frame. The
// decoder monitor must be held with exactly one lock count. Called on the
// state machine thread.
VideoData* FindStartTime();
nsresult DropAudioUpToSeekTarget(AudioData* aSample);
nsresult DropVideoUpToSeekTarget(VideoData* aSample);
void SetStartTime(int64_t aStartTimeUsecs);
// Update only the state machine's current playback position (and duration,
// if unknown). Does not update the playback position on the decoder or
@ -544,6 +566,10 @@ protected:
// The decoder monitor must be held.
nsresult EnqueueDecodeMetadataTask();
// Dispatches a task to the decode task queue to seek the decoder.
// The decoder monitor must be held.
nsresult EnqueueDecodeSeekTask();
nsresult DispatchAudioDecodeTaskIfNeeded();
// Ensures a to decode audio has been dispatched to the decode task queue.
@ -561,10 +587,6 @@ protected:
// The decoder monitor must be held.
nsresult EnsureVideoDecodeTaskQueued();
// Dispatches a task to the decode task queue to seek the decoder.
// The decoder monitor must be held.
nsresult EnqueueDecodeSeekTask();
// Calls the reader's SetIdle(). This is only called in a task dispatched to
// the decode task queue, don't call it directly.
void SetReaderIdle();
@ -575,12 +597,6 @@ protected:
// The decoder monitor must be held.
void DispatchDecodeTasksIfNeeded();
// Queries our state to see whether the decode has finished for all streams.
// If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
// to run.
// The decoder monitor must be held.
void CheckIfDecodeComplete();
// Returns the "media time". This is the absolute time which the media
// playback has reached. i.e. this returns values in the range
// [mStartTime, mEndTime], and mStartTime will not be 0 if the media does
@ -604,15 +620,29 @@ protected:
// must be held with exactly one lock count.
nsresult DecodeMetadata();
// Wraps the call to DecodeMetadata(), signals a DecodeError() on failure.
void CallDecodeMetadata();
// Checks whether we're finished decoding metadata, and switches to DECODING
// state if so.
void MaybeFinishDecodeMetadata();
// Seeks to mSeekTarget. Called on the decode thread. The decoder monitor
// must be held with exactly one lock count.
void DecodeSeek();
// Decode loop, decodes data until EOF or shutdown.
// Called on the decode thread.
void DecodeLoop();
void CheckIfSeekComplete();
bool IsAudioSeekComplete();
bool IsVideoSeekComplete();
void CallDecodeMetadata();
// Completes the seek operation, moves onto the next appropriate state.
void SeekCompleted();
// Queries our state to see whether the decode has finished for all streams.
// If so, we move into DECODER_STATE_COMPLETED and schedule the state machine
// to run.
// The decoder monitor must be held.
void CheckIfDecodeComplete();
// Copy audio from an AudioData packet to aOutput. This may require
// inserting silence depending on the timing of the audio packet.
@ -637,6 +667,11 @@ protected:
// case as it may not be needed again.
bool IsPausedAndDecoderWaiting();
// These return true if the respective stream's decode has not yet reached
// the end of stream.
bool IsAudioDecoding();
bool IsVideoDecoding();
// The decoder object that created this state machine. The state machine
// holds a strong reference to the decoder to ensure that the decoder stays
// alive once media element has started the decoder shutdown process, and has
@ -648,6 +683,19 @@ protected:
// state machine, audio and main threads.
nsRefPtr<MediaDecoder> mDecoder;
// Time at which the last video sample was requested. If it takes too long
// before the sample arrives, we will increase the amount of audio we buffer.
// This is necessary for legacy synchronous decoders to prevent underruns.
TimeStamp mVideoDecodeStartTime;
// Queue of audio frames. This queue is threadsafe, and is accessed from
// the audio, decoder, state machine, and main threads.
MediaQueue<AudioData> mAudioQueue;
// Queue of video frames. This queue is threadsafe, and is accessed from
// the decoder, state machine, and main threads.
MediaQueue<VideoData> mVideoQueue;
// The decoder monitor must be obtained before modifying this state.
// NotifyAll on the monitor must be called when the state is changed so
// that interested threads can wake up and alter behaviour if appropriate
@ -719,6 +767,14 @@ protected:
// this value. Accessed on main and decode thread.
SeekTarget mSeekTarget;
// The position that we're currently seeking to. This differs from
// mSeekTarget, as mSeekTarget is the target we'll seek to next, whereas
// mCurrentSeekTarget is the position that the decode is in the process
// of seeking to.
// The decoder monitor lock must be obtained before reading or writing
// this value.
SeekTarget mCurrentSeekTarget;
// Media Fragment end time in microseconds. Access controlled by decoder monitor.
int64_t mFragmentEndTime;
@ -729,9 +785,8 @@ protected:
RefPtr<AudioStream> mAudioStream;
// The reader, don't call its methods with the decoder monitor held.
// This is created in the play state machine's constructor, and destroyed
// in the play state machine's destructor.
nsAutoPtr<MediaDecoderReader> mReader;
// This is created in the state machine's constructor.
nsRefPtr<MediaDecoderReader> mReader;
// Accessed only on the state machine thread.
// Not an nsRevocableEventPtr since we must Revoke() it well before
@ -817,6 +872,12 @@ protected:
uint32_t mAudioPrerollUsecs;
uint32_t mVideoPrerollFrames;
// This temporarily stores the first frame we decode after we seek.
// This is so that if we hit end of stream while we're decoding to reach
// the seek target, we will still have a frame that we can display as the
// last frame in the media.
nsAutoPtr<VideoData> mFirstVideoFrameAfterSeek;
// When we start decoding (either for the first time, or after a pause)
// we may be low on decoded data. We don't want our "low data" logic to
// kick in and decide that we're low on decoded data because the download
@ -836,19 +897,11 @@ protected:
// yet decoded to end of stream.
bool mIsVideoDecoding;
// True when we have dispatched a task to the decode task queue to run
// the audio decode.
bool mDispatchedAudioDecodeTask;
// True when we have dispatched a task to the decode task queue to run
// the video decode.
bool mDispatchedVideoDecodeTask;
// If the video decode is falling behind the audio, we'll start dropping the
// inter-frames up until the next keyframe which is at or before the current
// playback position. skipToNextKeyframe is true if we're currently
// skipping up to the next keyframe.
bool mSkipToNextKeyFrame;
// True when we have dispatched a task to the decode task queue to request
// decoded audio/video, and/or we are waiting for the requested sample to be
// returned by callback from the Reader.
bool mAudioRequestPending;
bool mVideoRequestPending;
// True if we shouldn't play our audio (but still write it to any capturing
// streams). When this is true, mStopAudioThread is always true and
@ -924,10 +977,21 @@ protected:
// dispatch multiple tasks to re-do the metadata loading.
bool mDispatchedDecodeMetadataTask;
// True if we've dispatched a task to the decode task queue to call
// Seek on the reader. We maintain a flag to ensure that we don't
// dispatch multiple tasks to re-do the seek.
bool mDispatchedDecodeSeekTask;
// These two flags are true when we need to drop decoded samples that
// we receive up to the next discontinuity. We do this when we seek;
// the first sample in each stream after the seek is marked as being
// a "discontinuity".
bool mDropAudioUntilNextDiscontinuity;
bool mDropVideoUntilNextDiscontinuity;
// True if we need to decode forwards to the seek target inside
// mCurrentSeekTarget.
bool mDecodeToSeekTarget;
// We record the playback position before we seek in order to
// determine where the seek terminated relative to the playback position
// we were at before the seek.
int64_t mCurrentTimeBeforeSeek;
// Stores presentation info required for playback. The decoder monitor
// must be held when accessing this.

View File

@ -43,11 +43,13 @@ template <class T> class MediaQueue : private nsDeque {
inline void Push(T* aItem) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
MOZ_ASSERT(aItem);
nsDeque::Push(aItem);
}
inline void PushFront(T* aItem) {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
MOZ_ASSERT(aItem);
nsDeque::PushFront(aItem);
}
@ -75,11 +77,6 @@ template <class T> class MediaQueue : private nsDeque {
nsDeque::Empty();
}
inline void Erase() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
nsDeque::Erase();
}
void Reset() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
while (GetSize() > 0) {

View File

@ -9,6 +9,8 @@
#include "nsSize.h"
#include "VorbisUtils.h"
#include "ImageContainer.h"
#include "SharedThreadPool.h"
#include "mozilla/Preferences.h"
#include <stdint.h>
@ -190,4 +192,10 @@ IsValidVideoRegion(const nsIntSize& aFrame, const nsIntRect& aPicture,
aDisplay.width * aDisplay.height != 0;
}
TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool()
{
return SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
Preferences::GetUint("media.num-decode-threads", 25));
}
} // end namespace mozilla

View File

@ -20,6 +20,7 @@
#include "nsThreadUtils.h"
#include "prtime.h"
#include "AudioSampleFormat.h"
#include "mozilla/RefPtr.h"
using mozilla::CheckedInt64;
using mozilla::CheckedUint64;
@ -208,6 +209,12 @@ private:
const T mValue;
};
class SharedThreadPool;
// Returns the thread pool that is shared amongst all decoder state machines
// for decoding streams.
TemporaryRef<SharedThreadPool> GetMediaDecodeThreadPool();
} // end namespace mozilla
#endif

View File

@ -43,6 +43,8 @@ class MediaSourceReader : public MediaDecoderReader
public:
MediaSourceReader(MediaSourceDecoder* aDecoder, dom::MediaSource* aSource)
: MediaDecoderReader(aDecoder)
, mTimeThreshold(-1)
, mDropVideoBeforeThreshold(false)
, mActiveVideoDecoder(-1)
, mActiveAudioDecoder(-1)
, mMediaSource(aSource)
@ -62,53 +64,72 @@ public:
return mDecoders.IsEmpty() && mPendingDecoders.IsEmpty();
}
bool DecodeAudioData() MOZ_OVERRIDE
void RequestAudioData() MOZ_OVERRIDE
{
if (!GetAudioReader()) {
MSE_DEBUG("%p DecodeAudioFrame called with no audio reader", this);
MOZ_ASSERT(mPendingDecoders.IsEmpty());
return false;
GetCallback()->OnDecodeError();
return;
}
bool rv = GetAudioReader()->DecodeAudioData();
nsAutoTArray<AudioData*, 10> audio;
GetAudioReader()->AudioQueue().GetElementsAfter(-1, &audio);
for (uint32_t i = 0; i < audio.Length(); ++i) {
AudioQueue().Push(audio[i]);
}
GetAudioReader()->AudioQueue().Empty();
return rv;
GetAudioReader()->RequestAudioData();
}
bool DecodeVideoFrame(bool& aKeyFrameSkip, int64_t aTimeThreshold) MOZ_OVERRIDE
void OnAudioDecoded(AudioData* aSample)
{
GetCallback()->OnAudioDecoded(aSample);
}
void OnAudioEOS()
{
GetCallback()->OnAudioEOS();
}
void RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) MOZ_OVERRIDE
{
if (!GetVideoReader()) {
MSE_DEBUG("%p DecodeVideoFrame called with no video reader", this);
MOZ_ASSERT(mPendingDecoders.IsEmpty());
return false;
GetCallback()->OnDecodeError();
return;
}
mTimeThreshold = aTimeThreshold;
GetVideoReader()->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
}
if (MaybeSwitchVideoReaders(aTimeThreshold)) {
GetVideoReader()->DecodeToTarget(aTimeThreshold);
}
bool rv = GetVideoReader()->DecodeVideoFrame(aKeyFrameSkip, aTimeThreshold);
nsAutoTArray<VideoData*, 10> video;
GetVideoReader()->VideoQueue().GetElementsAfter(-1, &video);
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoQueue().Push(video[i]);
}
GetVideoReader()->VideoQueue().Empty();
if (rv) {
return true;
void OnVideoDecoded(VideoData* aSample)
{
if (mDropVideoBeforeThreshold) {
if (aSample->mTime < mTimeThreshold) {
delete aSample;
GetVideoReader()->RequestVideoData(false, mTimeThreshold);
} else {
mDropVideoBeforeThreshold = false;
GetCallback()->OnVideoDecoded(aSample);
}
} else {
GetCallback()->OnVideoDecoded(aSample);
}
}
void OnVideoEOS()
{
// End of stream. See if we can switch to another video decoder.
MSE_DEBUG("%p MSR::DecodeVF %d (%p) returned false (readers=%u)",
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
return rv;
if (MaybeSwitchVideoReaders()) {
// Success! Resume decoding with next video decoder.
RequestVideoData(false, mTimeThreshold);
} else {
// End of stream.
MSE_DEBUG("%p MSR::DecodeVF %d (%p) EOS (readers=%u)",
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
GetCallback()->OnVideoEOS();
}
}
void OnDecodeError() {
GetCallback()->OnDecodeError();
}
bool HasVideo() MOZ_OVERRIDE
@ -126,7 +147,22 @@ public:
int64_t aCurrentTime) MOZ_OVERRIDE;
nsresult GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime) MOZ_OVERRIDE;
already_AddRefed<SubBufferDecoder> CreateSubDecoder(const nsACString& aType,
MediaSourceDecoder* aParentDecoder);
MediaSourceDecoder* aParentDecoder,
MediaTaskQueue* aTaskQueue);
void Shutdown() MOZ_OVERRIDE {
MediaDecoderReader::Shutdown();
for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
mDecoders[i]->GetReader()->Shutdown();
}
}
virtual void BreakCycles() MOZ_OVERRIDE {
MediaDecoderReader::BreakCycles();
for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
mDecoders[i]->GetReader()->BreakCycles();
}
}
void InitializePendingDecoders();
@ -136,7 +172,12 @@ public:
}
private:
bool MaybeSwitchVideoReaders(int64_t aTimeThreshold) {
// These are read and written on the decode task queue threads.
int64_t mTimeThreshold;
bool mDropVideoBeforeThreshold;
bool MaybeSwitchVideoReaders() {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
MOZ_ASSERT(mActiveVideoDecoder != -1);
@ -146,7 +187,7 @@ private:
if (!mDecoders[i]->GetReader()->GetMediaInfo().HasVideo()) {
continue;
}
if (aTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
if (mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
GetVideoReader()->SetIdle();
mActiveVideoDecoder = i;
@ -196,7 +237,7 @@ public:
if (!mReader) {
return nullptr;
}
return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder);
return static_cast<MediaSourceReader*>(mReader.get())->CreateSubDecoder(aType, aParentDecoder, mDecodeTaskQueue);
}
nsresult EnqueueDecoderInitialization() {
@ -366,7 +407,9 @@ MediaSourceReader::InitializePendingDecoders()
}
already_AddRefed<SubBufferDecoder>
MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder* aParentDecoder)
MediaSourceReader::CreateSubDecoder(const nsACString& aType,
MediaSourceDecoder* aParentDecoder,
MediaTaskQueue* aTaskQueue)
{
// XXX: Why/when is mDecoder null here, since it should be equal to aParentDecoder?!
nsRefPtr<SubBufferDecoder> decoder =
@ -375,6 +418,13 @@ MediaSourceReader::CreateSubDecoder(const nsACString& aType, MediaSourceDecoder*
if (!reader) {
return nullptr;
}
// Set a callback on the subreader that forwards calls to this reader.
// This reader will then forward them onto the state machine via this
// reader's callback.
RefPtr<MediaDataDecodedListener<MediaSourceReader>> callback =
new MediaDataDecodedListener<MediaSourceReader>(this, aTaskQueue);
reader->SetCallback(callback);
reader->SetTaskQueue(aTaskQueue);
reader->Init(nullptr);
ReentrantMonitorAutoEnter mon(aParentDecoder->GetReentrantMonitor());
MSE_DEBUG("Registered subdecoder %p subreader %p", decoder.get(), reader.get());
@ -424,7 +474,7 @@ MediaSourceReader::Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
while (!mMediaSource->ActiveSourceBuffers()->AllContainsTime (aTime / USECS_PER_S)
&& !IsShutdown()) {
mMediaSource->WaitForData();
MaybeSwitchVideoReaders(aTime);
MaybeSwitchVideoReaders();
}
if (IsShutdown()) {

View File

@ -78,6 +78,7 @@ EXPORTS += [
'Latency.h',
'MediaCache.h',
'MediaData.h',
'MediaDataDecodedListener.h',
'MediaDecoder.h',
'MediaDecoderOwner.h',
'MediaDecoderReader.h',

View File

@ -59,9 +59,6 @@ MediaOmxReader::MediaOmxReader(AbstractMediaDecoder *aDecoder)
MediaOmxReader::~MediaOmxReader()
{
ReleaseMediaResources();
ReleaseDecoder();
mOmxDecoder.clear();
}
nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
@ -69,6 +66,15 @@ nsresult MediaOmxReader::Init(MediaDecoderReader* aCloneDonor)
return NS_OK;
}
void MediaOmxReader::Shutdown()
{
ReleaseMediaResources();
if (mOmxDecoder.get()) {
mOmxDecoder->ReleaseDecoder();
}
mOmxDecoder.clear();
}
bool MediaOmxReader::IsWaitingMediaResources()
{
if (!mOmxDecoder.get()) {
@ -99,13 +105,6 @@ void MediaOmxReader::ReleaseMediaResources()
}
}
void MediaOmxReader::ReleaseDecoder()
{
if (mOmxDecoder.get()) {
mOmxDecoder->ReleaseDecoder();
}
}
nsresult MediaOmxReader::InitOmxDecoder()
{
if (!mOmxDecoder.get()) {
@ -375,7 +374,6 @@ nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndT
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
EnsureActive();
ResetDecode();
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
if (container && container->GetImageContainer()) {
container->GetImageContainer()->ClearAllImagesExceptFront();

View File

@ -80,14 +80,14 @@ public:
virtual bool IsDormantNeeded();
virtual void ReleaseMediaResources();
virtual void ReleaseDecoder() MOZ_OVERRIDE;
virtual nsresult ReadMetadata(MediaInfo* aInfo,
MetadataTags** aTags);
virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
virtual void SetIdle() MOZ_OVERRIDE;
virtual void Shutdown() MOZ_OVERRIDE;
void SetAudioChannel(dom::AudioChannel aAudioChannel) {
mAudioChannel = aAudioChannel;
}

View File

@ -35,11 +35,6 @@ MediaPluginReader::MediaPluginReader(AbstractMediaDecoder *aDecoder,
{
}
MediaPluginReader::~MediaPluginReader()
{
ResetDecode();
}
nsresult MediaPluginReader::Init(MediaDecoderReader* aCloneDonor)
{
return NS_OK;
@ -104,18 +99,22 @@ nsresult MediaPluginReader::ReadMetadata(MediaInfo* aInfo,
return NS_OK;
}
void MediaPluginReader::Shutdown()
{
ResetDecode();
if (mPlugin) {
GetMediaPluginHost()->DestroyDecoder(mPlugin);
mPlugin = nullptr;
}
}
// Resets all state related to decoding, emptying all buffers etc.
nsresult MediaPluginReader::ResetDecode()
{
if (mLastVideoFrame) {
mLastVideoFrame = nullptr;
}
if (mPlugin) {
GetMediaPluginHost()->DestroyDecoder(mPlugin);
mPlugin = nullptr;
}
return NS_OK;
return MediaDecoderReader::ResetDecode();
}
bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip,
@ -321,9 +320,6 @@ nsresult MediaPluginReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aE
{
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
mVideoQueue.Reset();
mAudioQueue.Reset();
if (mHasAudio && mHasVideo) {
// The decoder seeks/demuxes audio and video streams separately. So if
// we seek both audio and video to aTarget, the audio stream can typically

View File

@ -43,7 +43,6 @@ class MediaPluginReader : public MediaDecoderReader
public:
MediaPluginReader(AbstractMediaDecoder* aDecoder,
const nsACString& aContentType);
~MediaPluginReader();
virtual nsresult Init(MediaDecoderReader* aCloneDonor);
virtual nsresult ResetDecode();
@ -66,6 +65,8 @@ public:
MetadataTags** aTags);
virtual nsresult Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime);
virtual void Shutdown() MOZ_OVERRIDE;
class ImageBufferCallback : public MPAPI::BufferCallback {
typedef mozilla::layers::Image Image;

View File

@ -389,7 +389,7 @@ var gUnseekableTests = [
{ name:"bogus.duh", type:"bogus/duh"}
];
// Unfortunately big-buck-bunny-unseekable.mp4 is doesn't play on Windows 7, so
// only include it in the unseekable tests if we're on later versions of Windows.
// only include it in the unseekable tests if we're on later versions of Windows.
// This test actually only passes on win8 at the moment.
if (navigator.userAgent.indexOf("Windows") != -1 && IsWindows8OrLater()) {
gUnseekableTests = gUnseekableTests.concat([
@ -677,6 +677,14 @@ function MediaTestManager() {
is(this.numTestsRunning, this.tokens.length, "[started " + token + "] Length of array should match number of running tests");
}
this.watchdog = null;
this.watchdogFn = function() {
if (this.tokens.length > 0) {
info("Watchdog remaining tests= " + this.tokens);
}
}
// Registers that the test corresponding to 'token' has finished. Call when
// you've finished your test. If all tests are complete this will finish the
// run, otherwise it may start up the next run. It's ok to call multiple times
@ -687,10 +695,18 @@ function MediaTestManager() {
// Remove the element from the list of running tests.
this.tokens.splice(i, 1);
}
if (this.watchdog) {
clearTimeout(this.watchdog);
this.watchdog = null;
}
info("[finished " + token + "] remaining= " + this.tokens);
this.numTestsRunning--;
is(this.numTestsRunning, this.tokens.length, "[finished " + token + "] Length of array should match number of running tests");
if (this.tokens.length < PARALLEL_TESTS) {
this.nextTest();
this.watchdog = setTimeout(this.watchdogFn.bind(this), 10000);
}
}

View File

@ -14,11 +14,14 @@
var manager = new MediaTestManager;
function startTest(e) {
var v = e.target;
info(v._name + " loadedmetadata");
e.target.play();
}
function playbackEnded(e) {
var v = e.target;
info(v._name + " ended");
if (v._finished)
return;
ok(v.currentTime >= v.duration - 0.1 && v.currentTime <= v.duration + 0.1,
@ -32,6 +35,7 @@ function playbackEnded(e) {
function seekEnded(e) {
var v = e.target;
info(v._name + " seeked");
if (v._finished)
return;
ok(v.currentTime == 0, "Checking currentTime after seek: " +
@ -42,6 +46,11 @@ function seekEnded(e) {
manager.finished(v.token);
}
function seeking(e) {
var v = e.target;
info(v._name + " seeking");
}
function initTest(test, token) {
var type = getMajorMimeType(test.type);
var v = document.createElement(type);
@ -62,6 +71,7 @@ function initTest(test, token) {
v.addEventListener("loadedmetadata", startTest, false);
v.addEventListener("ended", playbackEnded, false);
v.addEventListener("seeked", seekEnded, false);
v.addEventListener("seeking", seeking, false);
document.body.appendChild(v);
}

View File

@ -20,17 +20,22 @@ SimpleTest.expectAssertions(0, 2);
var manager = new MediaTestManager;
function start(e) {
var v = e.target;
info("[" + v._name + "] start");
e.target.currentTime = e.target.duration / 4;
}
function startSeeking(e) {
var v = e.target;
info("[" + v._name + "] seeking");
e.target._seeked = true;
}
function canPlayThrough(e) {
var v = e.target;
info("[" + v._name + "] canPlayThrough");
if (v._seeked && !v._finished) {
ok(true, "Got canplaythrough after seek for " + v._name);
ok(true, "[" + v._name + "] got canplaythrough after seek");
v._finished = true;
v.parentNode.removeChild(v);
v.src = "";
@ -38,6 +43,16 @@ function canPlayThrough(e) {
}
}
function seeked(e) {
var v = e.target;
info("[" + v._name + "] seeked");
}
function error(e) {
var v = e.target;
info("[" + v._name + "] error");
}
function startTest(test, token) {
// TODO: Bug 568402, there's a bug in the WAV backend where we sometimes
// don't send canplaythrough events after seeking. Once that is fixed,
@ -58,6 +73,8 @@ function startTest(test, token) {
v.addEventListener("loadedmetadata", start, false);
v.addEventListener("canplaythrough", canPlayThrough, false);
v.addEventListener("seeking", startSeeking, false);
v.addEventListener("seeked", seeked, false);
v.addEventListener("error", error, false);
document.body.appendChild(v);
}

View File

@ -61,10 +61,10 @@ function createTestArray() {
function startTest(test, token) {
var v = document.createElement('video');
manager.started(token);
v.token = token += "-seek" + test.number + ".js";
manager.started(v.token);
v.src = test.name;
v.preload = "metadata";
v.token = token;
document.body.appendChild(v);
var name = test.name + " seek test " + test.number;
var localIs = function(name) { return function(a, b, msg) {
@ -76,7 +76,7 @@ function startTest(test, token) {
var localFinish = function(v, manager) { return function() {
v.onerror = null;
removeNodeAndSource(v);
dump("SEEK-TEST: Finished " + name + "\n");
dump("SEEK-TEST: Finished " + name + " token: " + v.token + "\n");
manager.finished(v.token);
}}(v, manager);
dump("SEEK-TEST: Started " + name + "\n");

View File

@ -252,12 +252,25 @@ MediaDecodeTask::Decode()
return;
}
while (mDecoderReader->DecodeAudioData()) {
// consume all of the buffer
continue;
MediaQueue<AudioData> audioQueue;
nsRefPtr<AudioDecodeRendezvous> barrier(new AudioDecodeRendezvous());
mDecoderReader->SetCallback(barrier);
while (1) {
mDecoderReader->RequestAudioData();
nsAutoPtr<AudioData> audio;
if (NS_FAILED(barrier->Await(audio))) {
ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent);
return;
}
if (!audio) {
// End of stream.
break;
}
audioQueue.Push(audio.forget());
}
mDecoderReader->Shutdown();
mDecoderReader->BreakCycles();
MediaQueue<AudioData>& audioQueue = mDecoderReader->AudioQueue();
uint32_t frameCount = audioQueue.FrameCount();
uint32_t channelCount = mediaInfo.mAudio.mChannels;
uint32_t sampleRate = mediaInfo.mAudio.mRate;