Bug 1172394 - Simplify MediaSink somewhat. r=padenot

This patches does several minor things:
- Moves SetSink (from setSinkid) to automatic coalescing of multiple calls
  through a Canonical/Mirror setup instead of a manual atomic counter.
- Simplifies the logic for when to update the sink in SetSink.
- Removes PlaybackParams as a general MediaSink property, as it only contains
  audio params.
- Makes PlaybackParams an internal AudioSink concept, that AudioSinkWrapper
  knows about.
- Ensures mMediaSink is only accessed on the decoder TaskQueue, to allow
  accessing mirrored members when creating it.

Differential Revision: https://phabricator.services.mozilla.com/D52043

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andreas Pehrson 2019-11-13 08:55:54 +00:00
parent 53d52cde4e
commit 82611c9705
13 changed files with 122 additions and 124 deletions

View File

@ -6,6 +6,7 @@
#include "MediaDecoder.h"
#include "AudioDeviceInfo.h"
#include "DOMMediaStream.h"
#include "DecoderBenchmark.h"
#include "ImageContainer.h"
@ -225,10 +226,11 @@ void MediaDecoder::SetVolume(double aVolume) {
mVolume = aVolume;
}
RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) {
MOZ_ASSERT(NS_IsMainThread());
AbstractThread::AutoEnter context(AbstractMainThread());
return GetStateMachine()->InvokeSetSink(aSink);
mSinkDevice = aSinkDevice;
return GetStateMachine()->InvokeSetSink(aSinkDevice);
}
void MediaDecoder::SetOutputCaptured(bool aCaptured) {
@ -309,6 +311,7 @@ MediaDecoder::MediaDecoder(MediaDecoderInit& aInit)
INIT_CANONICAL(mVolume, aInit.mVolume),
INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
INIT_CANONICAL(mLooping, aInit.mLooping),
INIT_CANONICAL(mSinkDevice, nullptr),
INIT_CANONICAL(mOutputCaptured, false),
INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),

View File

@ -155,7 +155,7 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void SetLooping(bool aLooping);
// Set the given device as the output device.
RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice);
bool GetMinimizePreroll() const { return mMinimizePreroll; }
@ -614,6 +614,10 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
Canonical<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether this MediaDecoder's output is captured. When captured, all decoded
// data must be played out through mOutputTracks.
Canonical<bool> mOutputCaptured;
@ -656,6 +660,9 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
return &mPreservesPitch;
}
AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
return &mSinkDevice;
}
AbstractCanonical<bool>* CanonicalOutputCaptured() {
return &mOutputCaptured;
}

View File

@ -2591,6 +2591,7 @@ RefPtr<ShutdownPromise> MediaDecoderStateMachine::ShutdownState::Enter() {
master->mVolume.DisconnectIfConnected();
master->mPreservesPitch.DisconnectIfConnected();
master->mLooping.DisconnectIfConnected();
master->mSinkDevice.DisconnectIfConnected();
master->mOutputCaptured.DisconnectIfConnected();
master->mOutputTracks.DisconnectIfConnected();
master->mOutputPrincipal.DisconnectIfConnected();
@ -2641,6 +2642,7 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
INIT_MIRROR(mVolume, 1.0),
INIT_MIRROR(mPreservesPitch, true),
INIT_MIRROR(mLooping, false),
INIT_MIRROR(mSinkDevice, nullptr),
INIT_MIRROR(mOutputCaptured, false),
INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
@ -2649,8 +2651,7 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mDuration, NullableTimeUnit()),
INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
INIT_CANONICAL(mIsAudioDataAudible, false),
mSetSinkRequestsCount(0) {
INIT_CANONICAL(mIsAudioDataAudible, false) {
MOZ_COUNT_CTOR(MediaDecoderStateMachine);
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@ -2677,6 +2678,7 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
mVolume.Connect(aDecoder->CanonicalVolume());
mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
mLooping.Connect(aDecoder->CanonicalLooping());
mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
@ -2698,6 +2700,8 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
mWatchManager.Watch(mOutputPrincipal,
&MediaDecoderStateMachine::OutputPrincipalChanged);
mMediaSink = CreateMediaSink();
MOZ_ASSERT(!mStateObj);
auto* s = new DecodeMetadataState(this);
mStateObj.reset(s);
@ -2714,21 +2718,23 @@ MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
MOZ_ASSERT(self->OnTaskQueue());
AudioSink* audioSink =
new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
self->Info().mAudio);
self->Info().mAudio, self->mSinkDevice.Ref());
self->mAudibleListener = audioSink->AudibleEvent().Connect(
self->mTaskQueue, self.get(),
&MediaDecoderStateMachine::AudioAudibleChanged);
return audioSink;
};
return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator,
mVolume, mPlaybackRate, mPreservesPitch);
}
already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
bool aOutputCaptured) {
already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
MOZ_ASSERT(OnTaskQueue());
RefPtr<MediaSink> audioSink =
aOutputCaptured
? new DecodedStream(this, mOutputTracks, mAudioQueue, mVideoQueue)
mOutputCaptured
? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate,
mPreservesPitch, mAudioQueue, mVideoQueue)
: CreateAudioSink();
RefPtr<MediaSink> mediaSink =
@ -2819,8 +2825,6 @@ nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) {
mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
mMediaSink = CreateMediaSink(mOutputCaptured);
nsresult rv = mReader->Init();
NS_ENSURE_SUCCESS(rv, rv);
@ -3544,7 +3548,7 @@ void MediaDecoderStateMachine::UpdateOutputCaptured() {
mMediaSink->Shutdown();
// Create a new sink according to whether output is captured.
mMediaSink = CreateMediaSink(mOutputCaptured);
mMediaSink = CreateMediaSink();
// Don't buffer as much when audio is captured because we don't need to worry
// about high latency audio devices.
@ -3570,42 +3574,35 @@ RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aSink);
Unused << ++mSetSinkRequestsCount;
return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::SetSink, aSink);
}
RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
RefPtr<AudioDeviceInfo> aSink) {
RefPtr<AudioDeviceInfo> aSinkDevice) {
MOZ_ASSERT(OnTaskQueue());
if (mOutputCaptured) {
// Not supported yet.
return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
}
// Backup current playback parameters.
bool wasPlaying = mMediaSink->IsPlaying();
if (--mSetSinkRequestsCount > 0) {
MOZ_ASSERT(mSetSinkRequestsCount > 0);
return GenericPromise::CreateAndResolve(wasPlaying, __func__);
if (mSinkDevice.Ref() != aSinkDevice) {
// A new sink was set before this ran.
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
}
MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
params.mSink = std::move(aSink);
if (!mMediaSink->IsStarted()) {
mMediaSink->SetPlaybackParams(params);
return GenericPromise::CreateAndResolve(false, __func__);
if (mMediaSink->AudioDevice() == aSinkDevice) {
// The sink has not changed.
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
}
const bool wasPlaying = IsPlaying();
// Stop and shutdown the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether audio is captured.
mMediaSink = CreateMediaSink(false);
// Restore playback parameters.
mMediaSink->SetPlaybackParams(params);
mMediaSink = CreateMediaSink();
// Start the new sink
if (wasPlaying) {
nsresult rv = StartMediaSink();

View File

@ -424,7 +424,7 @@ class MediaDecoderStateMachine
// Always create mediasink which contains an AudioSink or DecodedStream
// inside.
already_AddRefed<MediaSink> CreateMediaSink(bool aOutputCaptured);
already_AddRefed<MediaSink> CreateMediaSink();
// Stops the media sink and shut it down.
// The decoder monitor must be held with exactly one lock count.
@ -704,6 +704,10 @@ class MediaDecoderStateMachine
// upon reaching the end.
Mirror<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether all output should be captured into mOutputTracks. While true, the
// media sink will only play if there are output tracks.
Mirror<bool> mOutputCaptured;
@ -729,9 +733,6 @@ class MediaDecoderStateMachine
// Used to distinguish whether the audio is producing sound.
Canonical<bool> mIsAudioDataAudible;
// Used to count the number of pending requests to set a new sink.
Atomic<int> mSetSinkRequestsCount;
public:
AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;

View File

@ -6,6 +6,7 @@
#include "AudioSink.h"
#include "AudioConverter.h"
#include "AudioDeviceInfo.h"
#include "MediaQueue.h"
#include "VideoUtils.h"
#include "mozilla/CheckedInt.h"
@ -34,9 +35,11 @@ using media::TimeUnit;
AudioSink::AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
const TimeUnit& aStartTime, const AudioInfo& aInfo)
const TimeUnit& aStartTime, const AudioInfo& aInfo,
AudioDeviceInfo* aAudioDevice)
: mStartTime(aStartTime),
mInfo(aInfo),
mAudioDevice(aAudioDevice),
mPlaying(true),
mMonitor("AudioSink"),
mWritten(0),
@ -183,7 +186,7 @@ nsresult AudioSink::InitializeAudioStream(const PlaybackParams& aParams) {
// StaticPrefs::accessibility_monoaudio_enable() or
// StaticPrefs::media_forcestereo_enabled() is applied.
nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
aParams.mSink);
mAudioDevice);
if (NS_FAILED(rv)) {
mAudioStream->Shutdown();
mAudioStream = nullptr;

View File

@ -23,11 +23,20 @@ namespace mozilla {
class AudioConverter;
class AudioSink : private AudioStream::DataSource {
using PlaybackParams = MediaSink::PlaybackParams;
public:
struct PlaybackParams {
PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
: mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
};
AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
const media::TimeUnit& aStartTime, const AudioInfo& aInfo,
AudioDeviceInfo* aAudioDevice);
~AudioSink();
@ -59,6 +68,8 @@ class AudioSink : private AudioStream::DataSource {
void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
private:
// Allocate and initialize mAudioStream. Returns NS_OK on success.
nsresult InitializeAudioStream(const PlaybackParams& aParams);
@ -87,6 +98,10 @@ class AudioSink : private AudioStream::DataSource {
const AudioInfo mInfo;
// The output device this AudioSink is playing data to. The system's default
// device is used if this is null.
const RefPtr<AudioDeviceInfo> mAudioDevice;
// Used on the task queue of MDSM only.
bool mPlaying;

View File

@ -21,21 +21,6 @@ void AudioSinkWrapper::Shutdown() {
mCreator = nullptr;
}
const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
if (mAudioSink) {
mAudioSink->SetVolume(aParams.mVolume);
mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
}
mParams = aParams;
}
RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
@ -154,6 +139,11 @@ void AudioSinkWrapper::SetPlaying(bool aPlaying) {
}
}
double AudioSinkWrapper::PlaybackRate() const {
AssertOwnerThread();
return mParams.mPlaybackRate;
}
nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
const MediaInfo& aInfo) {
AssertOwnerThread();

View File

@ -24,6 +24,8 @@ class MediaQueue;
* A wrapper around AudioSink to provide the interface of MediaSink.
*/
class AudioSinkWrapper : public MediaSink {
using PlaybackParams = AudioSink::PlaybackParams;
// An AudioSink factory.
class Creator {
public:
@ -46,19 +48,18 @@ class AudioSinkWrapper : public MediaSink {
template <typename Function>
AudioSinkWrapper(AbstractThread* aOwnerThread,
const MediaQueue<AudioData>& aAudioQueue,
const Function& aFunc)
const Function& aFunc, double aVolume, double aPlaybackRate,
bool aPreservesPitch)
: mOwnerThread(aOwnerThread),
mCreator(new CreatorImpl<Function>(aFunc)),
mIsStarted(false),
mParams(aVolume, aPlaybackRate, aPreservesPitch),
// Give an invalid value to facilitate debug if used before playback
// starts.
mPlayDuration(media::TimeUnit::Invalid()),
mAudioEnded(true),
mAudioQueue(aAudioQueue) {}
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override;
media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
@ -69,6 +70,8 @@ class AudioSinkWrapper : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override;
void Stop() override;

View File

@ -363,7 +363,8 @@ void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
DecodedStream::DecodedStream(
MediaDecoderStateMachine* aStateMachine,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
double aPlaybackRate, bool aPreservesPitch,
MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
: mOwnerThread(aStateMachine->OwnerThread()),
mWatchManager(this, mOwnerThread),
@ -371,6 +372,9 @@ DecodedStream::DecodedStream(
mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
"DecodedStream::mPrincipalHandle (Mirror)"),
mOutputTracks(std::move(aOutputTracks)),
mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch),
mAudioQueue(aAudioQueue),
mVideoQueue(aVideoQueue) {
mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
@ -382,16 +386,6 @@ DecodedStream::~DecodedStream() {
MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
}
const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mParams = aParams;
}
RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isSome());
@ -549,17 +543,22 @@ void DecodedStream::SetPlaying(bool aPlaying) {
void DecodedStream::SetVolume(double aVolume) {
AssertOwnerThread();
mParams.mVolume = aVolume;
mVolume = aVolume;
}
void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
AssertOwnerThread();
mParams.mPlaybackRate = aPlaybackRate;
mPlaybackRate = aPlaybackRate;
}
void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
AssertOwnerThread();
mParams.mPreservesPitch = aPreservesPitch;
mPreservesPitch = aPreservesPitch;
}
double DecodedStream::PlaybackRate() const {
AssertOwnerThread();
return mPlaybackRate;
}
static void SendStreamAudio(DecodedStreamData* aStream,
@ -849,7 +848,7 @@ void DecodedStream::SendData() {
return;
}
SendAudio(mParams.mVolume, mPrincipalHandle);
SendAudio(mVolume, mPrincipalHandle);
SendVideo(mPrincipalHandle);
}

View File

@ -33,18 +33,13 @@ template <class T>
class MediaQueue;
class DecodedStream : public MediaSink {
using MediaSink::PlaybackParams;
public:
DecodedStream(MediaDecoderStateMachine* aStateMachine,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
double aVolume, double aPlaybackRate, bool aPreservesPitch,
MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue);
// MediaSink functions.
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override;
media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
@ -58,6 +53,8 @@ class DecodedStream : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override;
void Stop() override;
@ -100,7 +97,9 @@ class DecodedStream : public MediaSink {
Mirror<PrincipalHandle> mPrincipalHandle;
const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
PlaybackParams mParams;
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
media::NullableTimeUnit mStartTime;
media::TimeUnit mLastOutputTime;

View File

@ -7,7 +7,6 @@
#ifndef MediaSink_h_
#define MediaSink_h_
#include "AudioDeviceInfo.h"
#include "MediaInfo.h"
#include "mozilla/MozPromise.h"
#include "mozilla/RefPtr.h"
@ -39,23 +38,6 @@ class MediaSink {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
typedef mozilla::TrackInfo::TrackType TrackType;
struct PlaybackParams {
PlaybackParams()
: mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
RefPtr<AudioDeviceInfo> mSink;
};
// Return the playback parameters of this sink.
// Can be called in any state.
virtual const PlaybackParams& GetPlaybackParams() const = 0;
// Set the playback parameters of this sink.
// Can be called in any state.
virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
// EndedPromise needs to be a non-exclusive promise as it is shared between
// both the AudioSink and VideoSink.
typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
@ -100,6 +82,10 @@ class MediaSink {
// Pause/resume the playback. Only work after playback starts.
virtual void SetPlaying(bool aPlaying) = 0;
// Get the playback rate.
// Can be called in any state.
virtual double PlaybackRate() const = 0;
// Single frame rendering operation may need to be done before playback
// started (1st frame) or right after seek completed or playback stopped.
// Do nothing if this sink has no video track. Can be called in any state.
@ -122,6 +108,10 @@ class MediaSink {
// Can be called in any state.
virtual bool IsPlaying() const = 0;
// The audio output device this MediaSink is playing audio data to. The
// default device is used if this returns null.
virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
// Called on the state machine thread to shut down the sink. All resources
// allocated by this sink should be released.
// Must be called after playback stopped.

View File

@ -156,18 +156,6 @@ VideoSink::~VideoSink() {
#endif
}
const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
AssertOwnerThread();
return mAudioSink->GetPlaybackParams();
}
void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mAudioSink->SetPlaybackParams(aParams);
}
RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
AssertOwnerThread();
MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
@ -223,6 +211,12 @@ void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
mAudioSink->SetPreservesPitch(aPreservesPitch);
}
double VideoSink::PlaybackRate() const {
AssertOwnerThread();
return mAudioSink->PlaybackRate();
}
void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
#ifdef XP_WIN
const bool needed = IsPlaying();
@ -440,8 +434,8 @@ void VideoSink::TryUpdateRenderedVideoFrames() {
// If we send this future frame to the compositor now, it will be rendered
// immediately and break A/V sync. Instead, we schedule a timer to send it
// later.
int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
mAudioSink->GetPlaybackParams().mPlaybackRate;
int64_t delta =
(v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate();
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure(
@ -481,7 +475,7 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
AutoTArray<ImageContainer::NonOwningImage, 16> images;
TimeStamp lastFrameTime;
MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
double playbackRate = mAudioSink->PlaybackRate();
for (uint32_t i = 0; i < frames.Length(); ++i) {
VideoData* frame = frames[i];
bool wasSent = frame->IsSentToCompositor();
@ -499,8 +493,8 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
MOZ_ASSERT(!aClockTimeStamp.IsNull());
int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
TimeStamp t = aClockTimeStamp +
TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
TimeStamp t =
aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate);
if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
// Timestamps out of order; drop the new frame. In theory we should
// probably replace the previous frame with the new frame if the
@ -613,9 +607,8 @@ void VideoSink::UpdateRenderedVideoFrames() {
int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
MIN_UPDATE_INTERVAL_US);
TimeStamp target =
nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->PlaybackRate());
RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure(
@ -647,7 +640,7 @@ void VideoSink::MaybeResolveEndPromise() {
"end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
mAudioSink->GetPlaybackParams().mPlaybackRate;
mAudioSink->PlaybackRate();
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
self->mEndPromiseHolder.ResolveIfExists(true, __func__);

View File

@ -32,10 +32,6 @@ class VideoSink : public MediaSink {
MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
TimeUnit GetEndTime(TrackType aType) const override;
@ -52,6 +48,8 @@ class VideoSink : public MediaSink {
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
void Redraw(const VideoInfo& aInfo) override;
nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;