diff --git a/dom/media/VideoFrameConverter.h b/dom/media/VideoFrameConverter.h index 9f58cccfe35c..8490d3727509 100644 --- a/dom/media/VideoFrameConverter.h +++ b/dom/media/VideoFrameConverter.h @@ -59,7 +59,8 @@ class VideoFrameConverter { new TaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER), "VideoFrameConverter")), mPacingTimer(new MediaTimer()), - mLastImage(-1), // -1 is not a guaranteed invalid serial (Bug 1262134). + mLastImage( + -2), // -2 or -1 are not guaranteed invalid serials (Bug 1262134). mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE), mLastFrameQueuedForProcessing(TimeStamp::Now()), mEnabled(true) { @@ -203,6 +204,10 @@ class VideoFrameConverter { // Set the last-img check to indicate black. // -1 is not a guaranteed invalid serial. See bug 1262134. serial = -1; + } else if (!aImage) { + // Set the last-img check to indicate reset. + // -2 is not a guaranteed invalid serial. See bug 1262134. + serial = -2; } else { serial = aImage->GetSerial(); } @@ -271,7 +276,11 @@ class VideoFrameConverter { return; } - MOZ_RELEASE_ASSERT(aImage, "Must have image if not forcing black"); + if (!aImage) { + // Don't send anything for null images. + return; + } + MOZ_ASSERT(aImage->GetSize() == aSize); if (layers::PlanarYCbCrImage* image = aImage->AsPlanarYCbCrImage()) { diff --git a/dom/media/VideoStreamTrack.cpp b/dom/media/VideoStreamTrack.cpp index 1557ba58618a..00bdf809d2a4 100644 --- a/dom/media/VideoStreamTrack.cpp +++ b/dom/media/VideoStreamTrack.cpp @@ -98,8 +98,10 @@ class VideoOutput : public DirectMediaStreamTrackListener { lastPrincipalHandle = chunk.GetPrincipalHandle(); } - // Don't update if there are no changes. if (images.IsEmpty()) { + // This could happen if the only images in mFrames are null. We leave the + // container at the current frame in this case. + mVideoFrameContainer->ClearFutureFrames(); return; } @@ -117,8 +119,6 @@ class VideoOutput : public DirectMediaStreamTrackListener { mMainThread->Dispatch(NewRunnableMethod("VideoFrameContainer::Invalidate", mVideoFrameContainer, &VideoFrameContainer::Invalidate)); - - images.ClearAndRetainStorage(); } public: diff --git a/dom/media/gtest/TestVideoFrameConverter.cpp b/dom/media/gtest/TestVideoFrameConverter.cpp index 294e4c85d93a..932475200e31 100644 --- a/dom/media/gtest/TestVideoFrameConverter.cpp +++ b/dom/media/gtest/TestVideoFrameConverter.cpp @@ -173,3 +173,39 @@ TEST_F(VideoFrameConverterTest, BlackOnDisable) { EXPECT_EQ(frames[1].first().height(), 480); EXPECT_GT(frames[1].second(), now + TimeDuration::FromMilliseconds(1100)); } + +TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) { + TimeStamp now = TimeStamp::Now(); + TimeStamp future1 = now + TimeDuration::FromMilliseconds(100); + TimeStamp future2 = now + TimeDuration::FromMilliseconds(200); + TimeStamp future3 = now + TimeDuration::FromMilliseconds(150); + + mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false); + WaitForNConverted(1); + + // We are now at t=100ms+. Queue a future frame and jump back in time to + // signal a reset. + + mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false); + VideoChunk nullChunk; + nullChunk.mFrame = VideoFrame(nullptr, gfx::IntSize(800, 600)); + nullChunk.mTimeStamp = TimeStamp::Now(); + ASSERT_GT(nullChunk.mTimeStamp, future1); + mConverter->QueueVideoChunk(nullChunk, false); + + // We queue one more chunk after the reset so we don't have to wait a full + // second for the same-frame timer. It has a different time and resolution + // so we can differentiate them. + mConverter->QueueVideoChunk(GenerateChunk(320, 240, future3), false); + + auto frames = WaitForNConverted(2); + EXPECT_GT(TimeStamp::Now(), future3); + EXPECT_LT(TimeStamp::Now(), future2); + ASSERT_EQ(frames.size(), 2U); + EXPECT_EQ(frames[0].first().width(), 640); + EXPECT_EQ(frames[0].first().height(), 480); + EXPECT_GT(frames[0].second(), future1); + EXPECT_EQ(frames[1].first().width(), 320); + EXPECT_EQ(frames[1].first().height(), 240); + EXPECT_GT(frames[1].second(), future3); +} diff --git a/dom/media/mediasink/DecodedStream.cpp b/dom/media/mediasink/DecodedStream.cpp index b275bfe7895d..771f241d3e40 100644 --- a/dom/media/mediasink/DecodedStream.cpp +++ b/dom/media/mediasink/DecodedStream.cpp @@ -421,9 +421,10 @@ void DecodedStream::Stop() { AssertOwnerThread(); MOZ_ASSERT(mStartTime.isSome(), "playback not started."); + DisconnectListener(); + ResetVideo(mPrincipalHandle); mStreamTimeOffset += SentDuration(); mStartTime.reset(); - DisconnectListener(); mAudioEndedPromise = nullptr; mVideoEndedPromise = nullptr; @@ -609,6 +610,43 @@ static bool ZeroDurationAtLastChunk(VideoSegment& aInput) { return lastVideoStratTime == aInput.GetDuration(); } +void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) { + AssertOwnerThread(); + + if (!mData) { + return; + } + + if (!mInfo.HasVideo()) { + return; + } + + VideoSegment resetter; + TimeStamp currentTime; + TimeUnit currentPosition = GetPosition(¤tTime); + + // Giving direct consumers a frame (really *any* frame, so in this case: + // nullptr) at an earlier time than the previous, will signal to that consumer + // to discard any frames ahead in time of the new frame. To be honest, this is + // an ugly hack because the direct listeners of the MediaStreamGraph do not + // have an API that supports clearing the future frames. ImageContainer and + // VideoFrameContainer do though, and we will need to move to a similar API + // for video tracks as part of bug 1493618. + resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize, + aPrincipalHandle, false, currentTime); + mData->mStream->AppendToTrack(mInfo.mVideo.mTrackId, &resetter); + + // Consumer buffers have been reset. We now set mNextVideoTime to the start + // time of the current frame, so that it can be displayed again on resuming. + if (RefPtr v = mVideoQueue.PeekFront()) { + mData->mNextVideoTime = v->mTime; + } else { + // There was no current frame in the queue. We set the next time to push to + // the current time, so we at least don't resume starting in the future. + mData->mNextVideoTime = currentPosition; + } +} + void DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle) { AssertOwnerThread(); @@ -727,6 +765,10 @@ void DecodedStream::SendData() { return; } + if (!mPlaying) { + return; + } + SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle); SendVideo(mSameOrigin, mPrincipalHandle); } @@ -772,6 +814,11 @@ void DecodedStream::NotifyOutput(int64_t aTime) { void DecodedStream::PlayingChanged() { AssertOwnerThread(); + if (!mPlaying) { + // On seek or pause we discard future frames. + ResetVideo(mPrincipalHandle); + } + mAbstractMainThread->Dispatch(NewRunnableMethod( "OutputStreamManager::SetPlaying", mOutputStreamManager, &OutputStreamManager::SetPlaying, mPlaying)); diff --git a/dom/media/mediasink/DecodedStream.h b/dom/media/mediasink/DecodedStream.h index ca76dec42fdd..d36cbaabaf88 100644 --- a/dom/media/mediasink/DecodedStream.h +++ b/dom/media/mediasink/DecodedStream.h @@ -80,6 +80,7 @@ class DecodedStream : public MediaSink { void SendAudio(double aVolume, bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle); void SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle); + void ResetVideo(const PrincipalHandle& aPrincipalHandle); StreamTime SentDuration(); void SendData(); void NotifyOutput(int64_t aTime);