Bug 1570673 - Add an active state to VideoFrameConverter and propagate it from MediaPipeline. r=bwc

Differential Revision: https://phabricator.services.mozilla.com/D40598

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-08-05 18:00:48 +00:00
parent 34e0a5f4fe
commit 50628ddd24
3 changed files with 145 additions and 60 deletions

View File

@ -59,11 +59,9 @@ class VideoFrameConverter {
new TaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
"VideoFrameConverter")),
mPacingTimer(new MediaTimer()),
mLastImage(
-2), // -2 or -1 are not guaranteed invalid serials (Bug 1262134).
mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE),
mLastFrameQueuedForProcessing(TimeStamp::Now()),
mEnabled(true) {
mActive(false),
mTrackEnabled(true) {
MOZ_COUNT_CTOR(VideoFrameConverter);
}
@ -103,20 +101,52 @@ class VideoFrameConverter {
[] {});
}
void SetTrackEnabled(bool aEnabled) {
/**
* An active VideoFrameConverter actively converts queued video frames.
* While inactive, we keep track of the frame most recently queued for
* processing, so it can be immediately sent out once activated.
*/
void SetActive(bool aActive) {
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
__func__, [self = RefPtr<VideoFrameConverter>(this), this, aEnabled] {
__func__, [self = RefPtr<VideoFrameConverter>(this), this, aActive] {
if (mActive == aActive) {
return;
}
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("VideoFrameConverter is now %s",
aActive ? "active" : "inactive"));
mActive = aActive;
if (aActive && mLastFrameQueuedForProcessing.Serial() != -2) {
// After activating, we re-process the last image that was queued
// for processing so it can be immediately sent.
FrameToProcess f = mLastFrameQueuedForProcessing;
f.mTime = TimeStamp::Now();
ProcessVideoFrame(std::move(f));
}
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void SetTrackEnabled(bool aTrackEnabled) {
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
__func__,
[self = RefPtr<VideoFrameConverter>(this), this, aTrackEnabled] {
if (mTrackEnabled == aTrackEnabled) {
return;
}
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("VideoFrameConverter Track is now %s",
aEnabled ? "enabled" : "disabled"));
mEnabled = aEnabled;
if (!aEnabled && mLastFrameConverted) {
aTrackEnabled ? "enabled" : "disabled"));
mTrackEnabled = aTrackEnabled;
if (!aTrackEnabled && mLastFrameConverted) {
// After disabling, we re-send the last frame as black in case the
// source had already stopped and no frame is coming soon.
ProcessVideoFrame(nullptr, TimeStamp::Now(),
gfx::IntSize(mLastFrameConverted->width(),
mLastFrameConverted->height()),
true);
ProcessVideoFrame(
FrameToProcess{nullptr, TimeStamp::Now(),
gfx::IntSize(mLastFrameConverted->width(),
mLastFrameConverted->height()),
true});
}
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
@ -154,12 +184,36 @@ class VideoFrameConverter {
mSameFrameTimer->Cancel();
}
mListeners.Clear();
mBufferPool.Release();
mLastFrameQueuedForProcessing = FrameToProcess();
mLastFrameConverted = nullptr;
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
protected:
struct FrameToProcess {
RefPtr<layers::Image> mImage;
TimeStamp mTime = TimeStamp::Now();
gfx::IntSize mSize;
bool mForceBlack = false;
int32_t Serial() {
if (mForceBlack) {
// Set the last-img check to indicate black.
// -1 is not a guaranteed invalid serial. See bug 1262134.
return -1;
}
if (!mImage) {
// Set the last-img check to indicate reset.
// -2 is not a guaranteed invalid serial. See bug 1262134.
return -2;
}
return mImage->GetSerial();
}
};
virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
static void SameFrameTick(nsITimer* aTimer, void* aClosure) {
@ -200,53 +254,48 @@ class VideoFrameConverter {
gfx::IntSize aSize, bool aForceBlack) {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
int32_t serial;
if (aForceBlack || !mEnabled) {
// Set the last-img check to indicate black.
// -1 is not a guaranteed invalid serial. See bug 1262134.
serial = -1;
} else if (!aImage) {
// Set the last-img check to indicate reset.
// -2 is not a guaranteed invalid serial. See bug 1262134.
serial = -2;
} else {
serial = aImage->GetSerial();
}
FrameToProcess frame{std::move(aImage), aTime, aSize,
aForceBlack || !mTrackEnabled};
if (serial == mLastImage) {
if (frame.Serial() == mLastFrameQueuedForProcessing.Serial()) {
// With a non-direct listener we get passed duplicate frames every ~10ms
// even with no frame change.
return;
}
if (aTime <= mLastFrameQueuedForProcessing) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame because time did not progress (%.3f)",
(mLastFrameQueuedForProcessing - aTime).ToSeconds()));
if (frame.mTime <= mLastFrameQueuedForProcessing.mTime) {
MOZ_LOG(
gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame because time did not progress (%.3f)",
(mLastFrameQueuedForProcessing.mTime - frame.mTime).ToSeconds()));
return;
}
mLastImage = serial;
mLastFrameQueuedForProcessing = aTime;
mLastFrameQueuedForProcessing = std::move(frame);
if (!mActive) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Ignoring a frame because we're inactive"));
return;
}
nsresult rv = mTaskQueue->Dispatch(
NewRunnableMethod<StoreCopyPassByLRef<RefPtr<layers::Image>>, TimeStamp,
gfx::IntSize, bool>(
NewRunnableMethod<StoreCopyPassByLRef<FrameToProcess>>(
"VideoFrameConverter::ProcessVideoFrame", this,
&VideoFrameConverter::ProcessVideoFrame, std::move(aImage), aTime,
aSize, aForceBlack || !mEnabled));
&VideoFrameConverter::ProcessVideoFrame,
mLastFrameQueuedForProcessing));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void ProcessVideoFrame(const RefPtr<layers::Image>& aImage, TimeStamp aTime,
gfx::IntSize aSize, bool aForceBlack) {
void ProcessVideoFrame(const FrameToProcess& aFrame) {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (aTime < mLastFrameQueuedForProcessing) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame that is %.3f seconds behind latest",
(mLastFrameQueuedForProcessing - aTime).ToSeconds()));
if (aFrame.mTime < mLastFrameQueuedForProcessing.mTime) {
MOZ_LOG(
gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame that is %.3f seconds behind latest",
(mLastFrameQueuedForProcessing.mTime - aFrame.mTime).ToSeconds()));
return;
}
@ -254,10 +303,10 @@ class VideoFrameConverter {
// passed into QueueVideoChunk rather than the webrtc.org clock here.
int64_t now = webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
if (aForceBlack) {
if (aFrame.mForceBlack) {
// Send a black image.
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
mBufferPool.CreateBuffer(aSize.width, aSize.height);
mBufferPool.CreateBuffer(aFrame.mSize.width, aFrame.mSize.height);
if (!buffer) {
MOZ_DIAGNOSTIC_ASSERT(false,
"Buffers not leaving scope except for "
@ -277,21 +326,21 @@ class VideoFrameConverter {
return;
}
if (!aImage) {
if (!aFrame.mImage) {
// Don't send anything for null images.
return;
}
MOZ_ASSERT(aImage->GetSize() == aSize);
MOZ_ASSERT(aFrame.mImage->GetSize() == aFrame.mSize);
if (layers::PlanarYCbCrImage* image = aImage->AsPlanarYCbCrImage()) {
if (layers::PlanarYCbCrImage* image = aFrame.mImage->AsPlanarYCbCrImage()) {
dom::ImageUtils utils(image);
if (utils.GetFormat() == dom::ImageBitmapFormat::YUV420P &&
image->GetData()) {
const layers::PlanarYCbCrData* data = image->GetData();
rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
aImage->GetSize().width, aImage->GetSize().height,
aFrame.mImage->GetSize().width, aFrame.mImage->GetSize().height,
data->mYChannel, data->mYStride, data->mCbChannel,
data->mCbCrStride, data->mCrChannel, data->mCbCrStride,
rtc::KeepRefUntilDone(image)));
@ -307,7 +356,7 @@ class VideoFrameConverter {
}
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
mBufferPool.CreateBuffer(aSize.width, aSize.height);
mBufferPool.CreateBuffer(aFrame.mSize.width, aFrame.mSize.height);
if (!buffer) {
MOZ_DIAGNOSTIC_ASSERT(++mFramesDropped <= 100, "Buffers must be leaking");
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Warning,
@ -320,7 +369,7 @@ class VideoFrameConverter {
#endif
nsresult rv =
ConvertToI420(aImage, buffer->MutableDataY(), buffer->StrideY(),
ConvertToI420(aFrame.mImage, buffer->MutableDataY(), buffer->StrideY(),
buffer->MutableDataU(), buffer->StrideU(),
buffer->MutableDataV(), buffer->StrideV());
@ -345,12 +394,12 @@ class VideoFrameConverter {
TimeStamp mLastFrameQueuedForPacing;
// Accessed only from mTaskQueue.
int32_t mLastImage; // Serial number of last processed Image
webrtc::I420BufferPool mBufferPool;
nsCOMPtr<nsITimer> mSameFrameTimer;
TimeStamp mLastFrameQueuedForProcessing;
FrameToProcess mLastFrameQueuedForProcessing;
UniquePtr<webrtc::VideoFrame> mLastFrameConverted;
bool mEnabled;
bool mActive;
bool mTrackEnabled;
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
size_t mFramesDropped = 0;
#endif

View File

@ -80,6 +80,7 @@ VideoChunk GenerateChunk(int32_t aWidth, int32_t aHeight, TimeStamp aTime) {
TEST_F(VideoFrameConverterTest, BasicConversion) {
TimeStamp now = TimeStamp::Now();
VideoChunk chunk = GenerateChunk(640, 480, now);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(1);
ASSERT_EQ(frames.size(), 1U);
@ -92,6 +93,7 @@ TEST_F(VideoFrameConverterTest, BasicPacing) {
TimeStamp now = TimeStamp::Now();
TimeStamp future = now + TimeDuration::FromMilliseconds(100);
VideoChunk chunk = GenerateChunk(640, 480, future);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(1);
EXPECT_GT(TimeStamp::Now(), future);
@ -106,6 +108,7 @@ TEST_F(VideoFrameConverterTest, MultiPacing) {
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
VideoChunk chunk = GenerateChunk(640, 480, future1);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(chunk, false);
chunk = GenerateChunk(640, 480, future2);
mConverter->QueueVideoChunk(chunk, false);
@ -125,6 +128,7 @@ TEST_F(VideoFrameConverterTest, Duplication) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
VideoChunk chunk = GenerateChunk(640, 480, future1);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(2);
EXPECT_GT(TimeStamp::Now(), now + TimeDuration::FromMilliseconds(1100));
@ -141,6 +145,7 @@ TEST_F(VideoFrameConverterTest, DropsOld) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(1000);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(100);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
auto frames = WaitForNConverted(1);
@ -159,6 +164,7 @@ TEST_F(VideoFrameConverterTest, BlackOnDisable) {
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
TimeStamp future3 = now + TimeDuration::FromMilliseconds(400);
mConverter->SetActive(true);
mConverter->SetTrackEnabled(false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
@ -178,6 +184,7 @@ TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
TimeStamp start = TimeStamp::Now();
TimeStamp future1 = start + TimeDuration::FromMilliseconds(100);
mConverter->SetActive(true);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
WaitForNConverted(1);
@ -211,3 +218,27 @@ TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
EXPECT_EQ(frames[1].first().height(), 240);
EXPECT_GT(frames[1].second(), future3);
}
// We check that the no frame is converted while inactive, and that on
// activating the most recently queued frame gets converted.
TEST_F(VideoFrameConverterTest, NoConversionsWhileInactive) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now - TimeDuration::FromMilliseconds(1);
TimeStamp future2 = now;
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
// SetActive needs to follow the same async path as the frames to be in sync.
auto q =
MakeRefPtr<TaskQueue>(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
"VideoFrameConverterTest");
auto timer = MakeRefPtr<MediaTimer>(false);
timer->WaitFor(TimeDuration::FromMilliseconds(100), __func__)
->Then(q, __func__,
[converter = mConverter] { converter->SetActive(true); });
auto frames = WaitForNConverted(1);
ASSERT_EQ(frames.size(), 1U);
EXPECT_EQ(frames[0].first().width(), 800);
EXPECT_EQ(frames[0].first().height(), 600);
}

View File

@ -676,7 +676,12 @@ class MediaPipelineTransmit::PipelineListener
}
}
void SetActive(bool aActive) { mActive = aActive; }
void SetActive(bool aActive) {
mActive = aActive;
if (mConverter) {
mConverter->SetActive(aActive);
}
}
void SetEnabled(bool aEnabled) { mEnabled = aEnabled; }
// These are needed since nested classes don't have access to any particular
@ -1112,12 +1117,6 @@ void MediaPipelineTransmit::PipelineListener::
void MediaPipelineTransmit::PipelineListener::NewData(
const MediaSegment& aMedia, TrackRate aRate /* = 0 */) {
if (!mActive) {
MOZ_LOG(gMediaPipelineLog, LogLevel::Debug,
("Discarding packets because transport not ready"));
return;
}
if (mConduit->type() != (aMedia.GetType() == MediaSegment::AUDIO
? MediaSessionConduit::AUDIO
: MediaSessionConduit::VIDEO)) {
@ -1133,6 +1132,12 @@ void MediaPipelineTransmit::PipelineListener::NewData(
if (aMedia.GetType() == MediaSegment::AUDIO) {
MOZ_RELEASE_ASSERT(aRate > 0);
if (!mActive) {
MOZ_LOG(gMediaPipelineLog, LogLevel::Debug,
("Discarding audio packets because transport not ready"));
return;
}
const AudioSegment* audio = static_cast<const AudioSegment*>(&aMedia);
for (AudioSegment::ConstChunkIterator iter(*audio); !iter.IsEnded();
iter.Next()) {