Bug 1179665. Part 3 - move code about sending stream data into DecodedStream. r=roc.

This commit is contained in:
JW Wang 2015-07-06 11:36:15 +08:00
parent 6f682ecb0d
commit 96f57cac6e
4 changed files with 362 additions and 254 deletions

View File

@ -6,7 +6,13 @@
#include "DecodedStream.h"
#include "MediaStreamGraph.h"
#include "mozilla/ReentrantMonitor.h"
#include "AudioSegment.h"
#include "VideoSegment.h"
#include "MediaQueue.h"
#include "MediaData.h"
#include "MediaInfo.h"
#include "SharedBuffer.h"
#include "VideoUtils.h"
namespace mozilla {
@ -324,4 +330,317 @@ DecodedStream::SetPlaying(bool aPlaying)
mData->SetPlaying(aPlaying);
}
bool
DecodedStream::HaveEnoughAudio(const MediaInfo& aInfo) const
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
if (mData->mStreamInitialized && !mData->mHaveSentFinishAudio) {
MOZ_ASSERT(aInfo.HasAudio());
TrackID audioTrackId = aInfo.mAudio.mTrackId;
if (!mData->mStream->HaveEnoughBuffered(audioTrackId)) {
return false;
}
}
return true;
}
bool
DecodedStream::HaveEnoughVideo(const MediaInfo& aInfo) const
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
if (mData->mStreamInitialized && !mData->mHaveSentFinishVideo) {
MOZ_ASSERT(aInfo.HasVideo());
TrackID videoTrackId = aInfo.mVideo.mTrackId;
if (!mData->mStream->HaveEnoughBuffered(videoTrackId)) {
return false;
}
}
return true;
}
void
DecodedStream::InitTracks(int64_t aStartTime, const MediaInfo& aInfo)
{
GetReentrantMonitor().AssertCurrentThreadIn();
if (mData->mStreamInitialized) {
return;
}
SourceMediaStream* sourceStream = mData->mStream;
if (aInfo.HasAudio()) {
TrackID audioTrackId = aInfo.mAudio.mTrackId;
AudioSegment* audio = new AudioSegment();
sourceStream->AddAudioTrack(audioTrackId, aInfo.mAudio.mRate, 0, audio,
SourceMediaStream::ADDTRACK_QUEUED);
mData->mNextAudioTime = aStartTime;
}
if (aInfo.HasVideo()) {
TrackID videoTrackId = aInfo.mVideo.mTrackId;
VideoSegment* video = new VideoSegment();
sourceStream->AddTrack(videoTrackId, 0, video,
SourceMediaStream::ADDTRACK_QUEUED);
mData->mNextVideoTime = aStartTime;
}
sourceStream->FinishAddTracks();
mData->mStreamInitialized = true;
}
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
AudioData* aAudio, AudioSegment* aOutput,
uint32_t aRate, double aVolume)
{
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(aAudio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
frameOffset.value() + aAudio->mFrames <= audioWrittenOffset.value()) {
return;
}
if (audioWrittenOffset.value() < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());
int64_t offset = audioWrittenOffset.value() - frameOffset.value();
size_t framesToWrite = aAudio->mFrames - offset;
aAudio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
nsAutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < aAudio->mChannels; ++i) {
channels.AppendElement(bufferData + i * aAudio->mFrames + offset);
}
aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
aStream->mAudioFramesWritten += framesToWrite;
aOutput->ApplyVolume(aVolume);
aStream->mNextAudioTime = aAudio->GetEndTime();
}
void
DecodedStream::SendAudio(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<AudioData>& aQueue,
double aVolume, bool aIsSameOrigin)
{
GetReentrantMonitor().AssertCurrentThreadIn();
if (!aInfo.HasAudio()) {
return;
}
AudioSegment output;
uint32_t rate = aInfo.mAudio.mRate;
nsAutoTArray<nsRefPtr<AudioData>,10> audio;
TrackID audioTrackId = aInfo.mAudio.mTrackId;
SourceMediaStream* sourceStream = mData->mStream;
// It's OK to hold references to the AudioData because AudioData
// is ref-counted.
aQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
for (uint32_t i = 0; i < audio.Length(); ++i) {
SendStreamAudio(mData.get(), aStartTime, audio[i], &output, rate, aVolume);
}
if (!aIsSameOrigin) {
output.ReplaceWithDisabled();
}
// |mNextAudioTime| is updated as we process each audio sample in
// SendStreamAudio(). This is consistent with how |mNextVideoTime|
// is updated for video samples.
if (output.GetDuration() > 0) {
sourceStream->AppendToTrack(audioTrackId, &output);
}
if (aQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
sourceStream->EndTrack(audioTrackId);
mData->mHaveSentFinishAudio = true;
}
}
static void
WriteVideoToMediaStream(MediaStream* aStream,
layers::Image* aImage,
int64_t aEndMicroseconds,
int64_t aStartMicroseconds,
const mozilla::gfx::IntSize& aIntrinsicSize,
VideoSegment* aOutput)
{
nsRefPtr<layers::Image> image = aImage;
StreamTime duration =
aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize);
}
static bool
ZeroDurationAtLastChunk(VideoSegment& aInput)
{
// Get the last video frame's start time in VideoSegment aInput.
// If the start time is equal to the duration of aInput, means the last video
// frame's duration is zero.
StreamTime lastVideoStratTime;
aInput.GetLastFrame(&lastVideoStratTime);
return lastVideoStratTime == aInput.GetDuration();
}
void
DecodedStream::SendVideo(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<VideoData>& aQueue,
bool aIsSameOrigin)
{
GetReentrantMonitor().AssertCurrentThreadIn();
if (!aInfo.HasVideo()) {
return;
}
VideoSegment output;
TrackID videoTrackId = aInfo.mVideo.mTrackId;
nsAutoTArray<nsRefPtr<VideoData>, 10> video;
SourceMediaStream* sourceStream = mData->mStream;
// It's OK to hold references to the VideoData because VideoData
// is ref-counted.
aQueue.GetElementsAfter(mData->mNextVideoTime, &video);
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
if (mData->mNextVideoTime < v->mTime) {
// Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video.
// TODO: |mLastVideoImage| should come from the last image rendered
// by the state machine. This will avoid the black frame when capture
// happens in the middle of playback (especially in th middle of a
// video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long.
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, &output);
mData->mNextVideoTime = v->mTime;
}
if (mData->mNextVideoTime < v->GetEndTime()) {
WriteVideoToMediaStream(sourceStream, v->mImage,
v->GetEndTime(), mData->mNextVideoTime, v->mDisplay, &output);
mData->mNextVideoTime = v->GetEndTime();
mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay;
}
}
// Check the output is not empty.
if (output.GetLastFrame()) {
mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
}
if (!aIsSameOrigin) {
output.ReplaceWithDisabled();
}
if (output.GetDuration() > 0) {
sourceStream->AppendToTrack(videoTrackId, &output);
}
if (aQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
if (mData->mEOSVideoCompensation) {
VideoSegment endSegment;
// Calculate the deviation clock time from DecodedStream.
int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1);
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime,
mData->mLastVideoImageDisplaySize, &endSegment);
mData->mNextVideoTime += deviation_usec;
MOZ_ASSERT(endSegment.GetDuration() > 0);
if (!aIsSameOrigin) {
endSegment.ReplaceWithDisabled();
}
sourceStream->AppendToTrack(videoTrackId, &endSegment);
}
sourceStream->EndTrack(videoTrackId);
mData->mHaveSentFinishVideo = true;
}
}
void
DecodedStream::AdvanceTracks(int64_t aStartTime, const MediaInfo& aInfo)
{
GetReentrantMonitor().AssertCurrentThreadIn();
StreamTime endPosition = 0;
if (aInfo.HasAudio()) {
StreamTime audioEnd = mData->mStream->TicksToTimeRoundDown(
aInfo.mAudio.mRate, mData->mAudioFramesWritten);
endPosition = std::max(endPosition, audioEnd);
}
if (aInfo.HasVideo()) {
StreamTime videoEnd = mData->mStream->MicrosecondsToStreamTimeRoundDown(
mData->mNextVideoTime - aStartTime);
endPosition = std::max(endPosition, videoEnd);
}
if (!mData->mHaveSentFinish) {
mData->mStream->AdvanceKnownTracksTime(endPosition);
}
}
bool
DecodedStream::SendData(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue,
double aVolume, bool aIsSameOrigin)
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
InitTracks(aStartTime, aInfo);
SendAudio(aStartTime, aInfo, aAudioQueue, aVolume, aIsSameOrigin);
SendVideo(aStartTime, aInfo, aVideoQueue, aIsSameOrigin);
AdvanceTracks(aStartTime, aInfo);
bool finished = (!aInfo.HasAudio() || aAudioQueue.IsFinished()) &&
(!aInfo.HasVideo() || aVideoQueue.IsFinished());
if (finished && !mData->mHaveSentFinish) {
mData->mHaveSentFinish = true;
mData->mStream->Finish();
}
return finished;
}
CheckedInt64
DecodedStream::AudioEndTime(int64_t aStartTime, uint32_t aRate) const
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
return aStartTime + FramesToUsecs(mData->mAudioFramesWritten, aRate);
}
} // namespace mozilla

View File

@ -9,11 +9,19 @@
#include "nsRefPtr.h"
#include "nsTArray.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/gfx/Point.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/ReentrantMonitor.h"
namespace mozilla {
class AudioData;
class VideoData;
class MediaInfo;
class AudioSegment;
class MediaStream;
class MediaInputPort;
class SourceMediaStream;
class ProcessedMediaStream;
@ -23,6 +31,8 @@ class OutputStreamListener;
class ReentrantMonitor;
class MediaStreamGraph;
template <class T> class MediaQueue;
namespace layers {
class Image;
}
@ -92,11 +102,33 @@ public:
void Connect(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
void Remove(MediaStream* aStream);
void SetPlaying(bool aPlaying);
bool HaveEnoughAudio(const MediaInfo& aInfo) const;
bool HaveEnoughVideo(const MediaInfo& aInfo) const;
CheckedInt64 AudioEndTime(int64_t aStartTime, uint32_t aRate) const;
// Return true if stream is finished.
bool SendData(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue,
double aVolume, bool aIsSameOrigin);
private:
ReentrantMonitor& GetReentrantMonitor() const;
void Connect(OutputStreamData* aStream);
nsTArray<OutputStreamData>& OutputStreams();
void InitTracks(int64_t aStartTime, const MediaInfo& aInfo);
void AdvanceTracks(int64_t aStartTime, const MediaInfo& aInfo);
void SendAudio(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<AudioData>& aQueue,
double aVolume, bool aIsSameOrigin);
void SendVideo(int64_t aStartTime,
const MediaInfo& aInfo,
MediaQueue<VideoData>& aQueue,
bool aIsSameOrigin);
UniquePtr<DecodedStreamData> mData;
// Data about MediaStreams that are being fed by the decoder.

View File

@ -45,7 +45,6 @@
namespace mozilla {
using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::layers;
using namespace mozilla::media;
@ -358,235 +357,21 @@ int64_t MediaDecoderStateMachine::GetDecodedAudioDuration()
return audioDecoded;
}
void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
DecodedStreamData* aStream,
AudioSegment* aOutput)
{
MOZ_ASSERT(OnTaskQueue());
AssertCurrentThreadInMonitor();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(mStreamStartTime, mInfo.mAudio.mRate);
CheckedInt64 frameOffset = UsecsToFrames(aAudio->mTime, mInfo.mAudio.mRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
frameOffset.value() + aAudio->mFrames <= audioWrittenOffset.value()) {
return;
}
if (audioWrittenOffset.value() < frameOffset.value()) {
int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
// Write silence to catch up
VERBOSE_LOG("writing %lld frames of silence to MediaStream", silentFrames);
AudioSegment silence;
silence.InsertNullDataAtStart(silentFrames);
aStream->mAudioFramesWritten += silentFrames;
audioWrittenOffset += silentFrames;
aOutput->AppendFrom(&silence);
}
MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());
int64_t offset = audioWrittenOffset.value() - frameOffset.value();
size_t framesToWrite = aAudio->mFrames - offset;
aAudio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
nsAutoTArray<const AudioDataValue*,2> channels;
for (uint32_t i = 0; i < aAudio->mChannels; ++i) {
channels.AppendElement(bufferData + i*aAudio->mFrames + offset);
}
aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
VERBOSE_LOG("writing %u frames of data to MediaStream for AudioData at %lld",
static_cast<unsigned>(framesToWrite),
aAudio->mTime);
aStream->mAudioFramesWritten += framesToWrite;
aOutput->ApplyVolume(mVolume);
aStream->mNextAudioTime = aAudio->GetEndTime();
}
static void WriteVideoToMediaStream(MediaStream* aStream,
layers::Image* aImage,
int64_t aEndMicroseconds,
int64_t aStartMicroseconds,
const IntSize& aIntrinsicSize,
VideoSegment* aOutput)
{
nsRefPtr<layers::Image> image = aImage;
StreamTime duration =
aStream->MicrosecondsToStreamTimeRoundDown(aEndMicroseconds) -
aStream->MicrosecondsToStreamTimeRoundDown(aStartMicroseconds);
aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize);
}
static bool ZeroDurationAtLastChunk(VideoSegment& aInput)
{
// Get the last video frame's start time in VideoSegment aInput.
// If the start time is equal to the duration of aInput, means the last video
// frame's duration is zero.
StreamTime lastVideoStratTime;
aInput.GetLastFrame(&lastVideoStratTime);
return lastVideoStratTime == aInput.GetDuration();
}
void MediaDecoderStateMachine::SendStreamData()
{
MOZ_ASSERT(OnTaskQueue());
AssertCurrentThreadInMonitor();
MOZ_ASSERT(!mAudioSink, "Should've been stopped in RunStateMachine()");
DecodedStreamData* stream = GetDecodedStream();
bool finished = mDecodedStream.SendData(
mStreamStartTime, mInfo, AudioQueue(), VideoQueue(),
mVolume, mDecoder->IsSameOriginMedia());
bool finished =
(!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
(!mInfo.HasVideo() || VideoQueue().IsFinished());
{
SourceMediaStream* mediaStream = stream->mStream;
StreamTime endPosition = 0;
const bool isSameOrigin = mDecoder->IsSameOriginMedia();
if (!stream->mStreamInitialized) {
if (mInfo.HasAudio()) {
TrackID audioTrackId = mInfo.mAudio.mTrackId;
AudioSegment* audio = new AudioSegment();
mediaStream->AddAudioTrack(audioTrackId, mInfo.mAudio.mRate, 0, audio,
SourceMediaStream::ADDTRACK_QUEUED);
stream->mNextAudioTime = mStreamStartTime;
}
if (mInfo.HasVideo()) {
TrackID videoTrackId = mInfo.mVideo.mTrackId;
VideoSegment* video = new VideoSegment();
mediaStream->AddTrack(videoTrackId, 0, video,
SourceMediaStream::ADDTRACK_QUEUED);
stream->mNextVideoTime = mStreamStartTime;
}
mediaStream->FinishAddTracks();
stream->mStreamInitialized = true;
}
if (mInfo.HasAudio()) {
MOZ_ASSERT(stream->mNextAudioTime != -1, "Should've been initialized");
TrackID audioTrackId = mInfo.mAudio.mTrackId;
nsAutoTArray<nsRefPtr<AudioData>,10> audio;
// It's OK to hold references to the AudioData because AudioData
// is ref-counted.
AudioQueue().GetElementsAfter(stream->mNextAudioTime, &audio);
AudioSegment output;
for (uint32_t i = 0; i < audio.Length(); ++i) {
SendStreamAudio(audio[i], stream, &output);
}
if (!isSameOrigin) {
output.ReplaceWithDisabled();
}
// |mNextAudioTime| is updated as we process each audio sample in
// SendStreamAudio(). This is consistent with how |mNextVideoTime|
// is updated for video samples.
if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(audioTrackId, &output);
}
if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
mediaStream->EndTrack(audioTrackId);
stream->mHaveSentFinishAudio = true;
}
endPosition = std::max(endPosition,
mediaStream->TicksToTimeRoundDown(mInfo.mAudio.mRate,
stream->mAudioFramesWritten));
CheckedInt64 playedUsecs = mStreamStartTime +
FramesToUsecs(stream->mAudioFramesWritten, mInfo.mAudio.mRate);
if (playedUsecs.isValid()) {
OnAudioEndTimeUpdate(playedUsecs.value());
}
}
if (mInfo.HasVideo()) {
MOZ_ASSERT(stream->mNextVideoTime != -1, "Should've been initialized");
TrackID videoTrackId = mInfo.mVideo.mTrackId;
nsAutoTArray<nsRefPtr<VideoData>,10> video;
// It's OK to hold references to the VideoData because VideoData
// is ref-counted.
VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
VideoSegment output;
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
if (stream->mNextVideoTime < v->mTime) {
VERBOSE_LOG("writing last video to MediaStream %p for %lldus",
mediaStream, v->mTime - stream->mNextVideoTime);
// Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video.
// TODO: |mLastVideoImage| should come from the last image rendered
// by the state machine. This will avoid the black frame when capture
// happens in the middle of playback (especially in th middle of a
// video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long.
WriteVideoToMediaStream(mediaStream, stream->mLastVideoImage,
v->mTime, stream->mNextVideoTime, stream->mLastVideoImageDisplaySize,
&output);
stream->mNextVideoTime = v->mTime;
}
if (stream->mNextVideoTime < v->GetEndTime()) {
VERBOSE_LOG("writing video frame %lldus to MediaStream %p for %lldus",
v->mTime, mediaStream, v->GetEndTime() - stream->mNextVideoTime);
WriteVideoToMediaStream(mediaStream, v->mImage,
v->GetEndTime(), stream->mNextVideoTime, v->mDisplay,
&output);
stream->mNextVideoTime = v->GetEndTime();
stream->mLastVideoImage = v->mImage;
stream->mLastVideoImageDisplaySize = v->mDisplay;
} else {
VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream",
v->mTime, v->GetEndTime());
}
}
// Check the output is not empty.
if (output.GetLastFrame()) {
stream->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
}
if (!isSameOrigin) {
output.ReplaceWithDisabled();
}
if (output.GetDuration() > 0) {
mediaStream->AppendToTrack(videoTrackId, &output);
}
if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
if (stream->mEOSVideoCompensation) {
VideoSegment endSegment;
// Calculate the deviation clock time from DecodedStream.
int64_t deviation_usec = mediaStream->StreamTimeToMicroseconds(1);
WriteVideoToMediaStream(mediaStream, stream->mLastVideoImage,
stream->mNextVideoTime + deviation_usec, stream->mNextVideoTime,
stream->mLastVideoImageDisplaySize, &endSegment);
stream->mNextVideoTime += deviation_usec;
MOZ_ASSERT(endSegment.GetDuration() > 0);
if (!isSameOrigin) {
endSegment.ReplaceWithDisabled();
}
mediaStream->AppendToTrack(videoTrackId, &endSegment);
}
mediaStream->EndTrack(videoTrackId);
stream->mHaveSentFinishVideo = true;
}
endPosition = std::max(endPosition,
mediaStream->MicrosecondsToStreamTimeRoundDown(
stream->mNextVideoTime - mStreamStartTime));
}
if (!stream->mHaveSentFinish) {
stream->mStream->AdvanceKnownTracksTime(endPosition);
}
if (finished && !stream->mHaveSentFinish) {
stream->mHaveSentFinish = true;
stream->mStream->Finish();
if (mInfo.HasAudio()) {
CheckedInt64 playedUsecs = mDecodedStream.AudioEndTime(
mStreamStartTime, mInfo.mAudio.mRate);
if (playedUsecs.isValid()) {
OnAudioEndTimeUpdate(playedUsecs.value());
}
}
@ -620,21 +405,8 @@ bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
GetDecodedAudioDuration() < aAmpleAudioUSecs) {
return false;
}
if (!mAudioCaptured) {
return true;
}
DecodedStreamData* stream = GetDecodedStream();
if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
MOZ_ASSERT(mInfo.HasAudio());
TrackID audioTrackId = mInfo.mAudio.mTrackId;
if (!stream->mStream->HaveEnoughBuffered(audioTrackId)) {
return false;
}
}
return true;
return !mAudioCaptured || mDecodedStream.HaveEnoughAudio(mInfo);
}
bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
@ -646,17 +418,7 @@ bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
return false;
}
DecodedStreamData* stream = GetDecodedStream();
if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
MOZ_ASSERT(mInfo.HasVideo());
TrackID videoTrackId = mInfo.mVideo.mTrackId;
if (!stream->mStream->HaveEnoughBuffered(videoTrackId)) {
return false;
}
}
return true;
return !mAudioCaptured || mDecodedStream.HaveEnoughVideo(mInfo);
}
bool

View File

@ -635,11 +635,6 @@ protected:
// The decoder monitor must be held.
void CheckIfDecodeComplete();
// Copy audio from an AudioData packet to aOutput. This may require
// inserting silence depending on the timing of the audio packet.
void SendStreamAudio(AudioData* aAudio, DecodedStreamData* aStream,
AudioSegment* aOutput);
// Performs one "cycle" of the state machine. Polls the state, and may send
// a video frame to be displayed, and generally manages the decode. Called
// periodically via timer to ensure the video stays in sync.