Backed out 15 changesets (bug 1500049, bug 1172394, bug 1546756, bug 1302379) for failures on browser_disabledForMediaStreamVideos.js. CLOSED TREE

Backed out changeset 355f090421a6 (bug 1500049)
Backed out changeset 306341d0b586 (bug 1302379)
Backed out changeset 3ff0d72d23a2 (bug 1546756)
Backed out changeset a4f256e68cef (bug 1172394)
Backed out changeset d0aa43657e8c (bug 1172394)
Backed out changeset edff95b6f724 (bug 1172394)
Backed out changeset 94bd21d9b396 (bug 1172394)
Backed out changeset 7e7baa73e1ef (bug 1172394)
Backed out changeset c3bd415507e8 (bug 1172394)
Backed out changeset 1c45b135318d (bug 1172394)
Backed out changeset c57c41e8c39e (bug 1172394)
Backed out changeset a796541fe5ef (bug 1172394)
Backed out changeset 89ad0b553b0f (bug 1172394)
Backed out changeset 744fb77a5833 (bug 1172394)
Backed out changeset afb4b226ff04 (bug 1172394)
This commit is contained in:
Csoregi Natalia 2019-11-14 00:32:51 +02:00
parent 6db46a2b36
commit 6ba30843e8
42 changed files with 1586 additions and 1148 deletions

File diff suppressed because it is too large Load Diff

View File

@ -113,26 +113,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
typedef mozilla::MediaDecoderOwner MediaDecoderOwner; typedef mozilla::MediaDecoderOwner MediaDecoderOwner;
typedef mozilla::MetadataTags MetadataTags; typedef mozilla::MetadataTags MetadataTags;
// Helper struct to keep track of the MediaStreams returned by
// mozCaptureStream(). For each OutputMediaStream, dom::MediaTracks get
// captured into MediaStreamTracks which get added to
// OutputMediaStream::mStream.
struct OutputMediaStream {
OutputMediaStream(RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
bool aFinishWhenEnded);
~OutputMediaStream();
RefPtr<DOMMediaStream> mStream;
const bool mCapturingAudioOnly;
const bool mFinishWhenEnded;
// If mFinishWhenEnded is true, this is the URI of the first resource
// mStream got tracks for, if not a MediaStream.
nsCOMPtr<nsIURI> mFinishWhenEndedLoadingSrc;
// If mFinishWhenEnded is true, this is the first MediaStream mStream got
// tracks for, if not a resource.
RefPtr<DOMMediaStream> mFinishWhenEndedAttrStream;
};
MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement) MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement)
NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED
@ -271,9 +251,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
void DispatchAsyncEvent(const nsAString& aName) final; void DispatchAsyncEvent(const nsAString& aName) final;
// Triggers a recomputation of readyState. // Triggers a recomputation of readyState.
void UpdateReadyState() override { void UpdateReadyState() override { UpdateReadyStateInternal(); }
mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
}
// Dispatch events that were raised while in the bfcache // Dispatch events that were raised while in the bfcache
nsresult DispatchPendingMediaEvents(); nsresult DispatchPendingMediaEvents();
@ -715,6 +693,10 @@ class HTMLMediaElement : public nsGenericHTMLElement,
Document* GetDocument() const override; Document* GetDocument() const override;
void ConstructMediaTracks(const MediaInfo* aInfo) override;
void RemoveMediaTracks() override;
already_AddRefed<GMPCrashHelper> CreateGMPCrashHelper() override; already_AddRefed<GMPCrashHelper> CreateGMPCrashHelper() override;
nsISerialEventTarget* MainThreadEventTarget() { nsISerialEventTarget* MainThreadEventTarget() {
@ -747,17 +729,37 @@ class HTMLMediaElement : public nsGenericHTMLElement,
class AudioChannelAgentCallback; class AudioChannelAgentCallback;
class ChannelLoader; class ChannelLoader;
class ErrorSink; class ErrorSink;
class MediaElementTrackSource;
class MediaLoadListener; class MediaLoadListener;
class MediaStreamRenderer; class MediaStreamRenderer;
class MediaStreamTrackListener; class MediaStreamTrackListener;
class FirstFrameListener; class FirstFrameListener;
class ShutdownObserver; class ShutdownObserver;
class StreamCaptureTrackSource;
MediaDecoderOwner::NextFrameStatus NextFrameStatus(); MediaDecoderOwner::NextFrameStatus NextFrameStatus();
void SetDecoder(MediaDecoder* aDecoder); void SetDecoder(MediaDecoder* aDecoder);
// Holds references to the DOM wrappers for the MediaStreams that we're
// writing to.
struct OutputMediaStream {
OutputMediaStream();
~OutputMediaStream();
RefPtr<DOMMediaStream> mStream;
// Dummy stream to keep mGraph from shutting down when MediaDecoder shuts
// down. Shared across all OutputMediaStreams as one stream is enough to
// keep the graph alive.
RefPtr<SharedDummyTrack> mGraphKeepAliveDummyStream;
bool mFinishWhenEnded;
bool mCapturingAudioOnly;
bool mCapturingDecoder;
bool mCapturingMediaStream;
// The following members are keeping state for a captured MediaStream.
nsTArray<Pair<nsString, RefPtr<MediaStreamTrackSource>>> mTracks;
};
void PlayInternal(bool aHandlingUserInput); void PlayInternal(bool aHandlingUserInput);
/** Use this method to change the mReadyState member, so required /** Use this method to change the mReadyState member, so required
@ -852,35 +854,28 @@ class HTMLMediaElement : public nsGenericHTMLElement,
*/ */
void NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack); void NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack);
/**
* Convenience method to get in a single list all enabled AudioTracks and, if
* this is a video element, the selected VideoTrack.
*/
void GetAllEnabledMediaTracks(nsTArray<RefPtr<MediaTrack>>& aTracks);
/** /**
* Enables or disables all tracks forwarded from mSrcStream to all * Enables or disables all tracks forwarded from mSrcStream to all
* OutputMediaStreams. We do this for muting the tracks when pausing, * OutputMediaStreams. We do this for muting the tracks when pausing,
* and unmuting when playing the media element again. * and unmuting when playing the media element again.
*
* If mSrcStream is unset, this does nothing.
*/ */
void SetCapturedOutputStreamsEnabled(bool aEnabled); void SetCapturedOutputStreamsEnabled(bool aEnabled);
/** /**
* Create a new MediaStreamTrack for the TrackSource corresponding to aTrack * Create a new MediaStreamTrack for aTrack and add it to the DOMMediaStream
* and add it to the DOMMediaStream in aOutputStream. This automatically sets * in aOutputStream. This automatically sets the output track to enabled or
* the output track to enabled or disabled depending on our current playing * disabled depending on our current playing state.
* state.
*/ */
enum class AddTrackMode { ASYNC, SYNC }; void AddCaptureMediaTrackToOutputStream(dom::MediaTrack* aTrack,
void AddOutputTrackSourceToOutputStream( OutputMediaStream& aOutputStream,
MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream, bool aAsyncAddtrack = true);
AddTrackMode aMode = AddTrackMode::ASYNC);
/** /**
* Creates output track sources when this media element is captured, tracks * Discard all output streams that are flagged to finish when playback ends.
* exist, playback is not ended and readyState is >= HAVE_METADATA.
*/ */
void UpdateOutputTrackSources(); void DiscardFinishWhenEndedOutputStreams();
/** /**
* Returns an DOMMediaStream containing the played contents of this * Returns an DOMMediaStream containing the played contents of this
@ -894,8 +889,8 @@ class HTMLMediaElement : public nsGenericHTMLElement,
* reaching the stream. No video tracks will be captured in this case. * reaching the stream. No video tracks will be captured in this case.
*/ */
already_AddRefed<DOMMediaStream> CaptureStreamInternal( already_AddRefed<DOMMediaStream> CaptureStreamInternal(
StreamCaptureBehavior aFinishBehavior, StreamCaptureBehavior aBehavior, StreamCaptureType aType,
StreamCaptureType aStreamCaptureType, MediaTrackGraph* aGraph); MediaTrackGraph* aGraph);
/** /**
* Initialize a decoder as a clone of an existing decoder in another * Initialize a decoder as a clone of an existing decoder in another
@ -1254,18 +1249,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// Pass information for deciding the video decode mode to decoder. // Pass information for deciding the video decode mode to decoder.
void NotifyDecoderActivityChanges() const; void NotifyDecoderActivityChanges() const;
// Constructs an AudioTrack in mAudioTrackList if aInfo reports that audio is
// available, and a VideoTrack in mVideoTrackList if aInfo reports that video
// is available.
void ConstructMediaTracks(const MediaInfo* aInfo);
// Removes all MediaTracks from mAudioTrackList and mVideoTrackList and fires
// "removetrack" on the lists accordingly.
// Note that by spec, this should not fire "removetrack". However, it appears
// other user agents do, per
// https://wpt.fyi/results/media-source/mediasource-avtracks.html.
void RemoveMediaTracks();
// Mark the decoder owned by the element as tainted so that the // Mark the decoder owned by the element as tainted so that the
// suspend-video-decoder is disabled. // suspend-video-decoder is disabled.
void MarkAsTainted(); void MarkAsTainted();
@ -1347,6 +1330,9 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// enabled audio tracks, while mSrcStream is set. // enabled audio tracks, while mSrcStream is set.
RefPtr<MediaStreamRenderer> mMediaStreamRenderer; RefPtr<MediaStreamRenderer> mMediaStreamRenderer;
// True once mSrcStream's initial set of tracks are known.
bool mSrcStreamTracksAvailable = false;
// True once PlaybackEnded() is called and we're playing a MediaStream. // True once PlaybackEnded() is called and we're playing a MediaStream.
// Reset to false if we start playing mSrcStream again. // Reset to false if we start playing mSrcStream again.
Watchable<bool> mSrcStreamPlaybackEnded = { Watchable<bool> mSrcStreamPlaybackEnded = {
@ -1366,12 +1352,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// writing to. // writing to.
nsTArray<OutputMediaStream> mOutputStreams; nsTArray<OutputMediaStream> mOutputStreams;
// Mapping for output tracks, from dom::MediaTrack ids to the
// MediaElementTrackSource that represents the source of all corresponding
// MediaStreamTracks captured from this element.
nsRefPtrHashtable<nsStringHashKey, MediaElementTrackSource>
mOutputTrackSources;
// Holds a reference to the first-frame-getting track listener attached to // Holds a reference to the first-frame-getting track listener attached to
// mSelectedVideoStreamTrack. // mSelectedVideoStreamTrack.
RefPtr<FirstFrameListener> mFirstFrameListener; RefPtr<FirstFrameListener> mFirstFrameListener;
@ -1563,7 +1543,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// Playback of the video is paused either due to calling the // Playback of the video is paused either due to calling the
// 'Pause' method, or playback not yet having started. // 'Pause' method, or playback not yet having started.
Watchable<bool> mPaused = {true, "HTMLMediaElement::mPaused"}; Watchable<bool> mPaused;
// The following two fields are here for the private storage of the builtin // The following two fields are here for the private storage of the builtin
// video controls, and control 'casting' of the video to external devices // video controls, and control 'casting' of the video to external devices
@ -1573,14 +1553,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// True if currently casting this video // True if currently casting this video
bool mIsCasting = false; bool mIsCasting = false;
// Set while there are some OutputMediaStreams this media element's enabled
// and selected tracks are captured into. When set, all tracks are captured
// into the graph of this dummy track.
// NB: This is a SharedDummyTrack to allow non-default graphs (AudioContexts
// with an explicit sampleRate defined) to capture this element. When
// cross-graph tracks are supported, this can become a bool.
Watchable<RefPtr<SharedDummyTrack>> mTracksCaptured;
// True if the sound is being captured. // True if the sound is being captured.
bool mAudioCaptured = false; bool mAudioCaptured = false;
@ -1682,8 +1654,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
EncryptionInfo mPendingEncryptedInitData; EncryptionInfo mPendingEncryptedInitData;
// True if the media's channel's download has been suspended. // True if the media's channel's download has been suspended.
Watchable<bool> mDownloadSuspendedByCache = { bool mDownloadSuspendedByCache = false;
false, "HTMLMediaElement::mDownloadSuspendedByCache"};
// Disable the video playback by track selection. This flag might not be // Disable the video playback by track selection. This flag might not be
// enough if we ever expand the ability of supporting multi-tracks video // enough if we ever expand the ability of supporting multi-tracks video
@ -1821,8 +1792,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
bool mIsBlessed = false; bool mIsBlessed = false;
// True if the first frame has been successfully loaded. // True if the first frame has been successfully loaded.
Watchable<bool> mFirstFrameLoaded = {false, bool mFirstFrameLoaded = false;
"HTMLMediaElement::mFirstFrameLoaded"};
// Media elements also have a default playback start position, which must // Media elements also have a default playback start position, which must
// initially be set to zero seconds. This time is used to allow the element to // initially be set to zero seconds. This time is used to allow the element to
@ -1837,6 +1807,10 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// For use by mochitests. Enabling pref "media.test.video-suspend" // For use by mochitests. Enabling pref "media.test.video-suspend"
bool mForcedHidden = false; bool mForcedHidden = false;
// True if audio tracks and video tracks are constructed and added into the
// track list, false if all tracks are removed from the track list.
bool mMediaTracksConstructed = false;
Visibility mVisibilityState = Visibility::Untracked; Visibility mVisibilityState = Visibility::Untracked;
UniquePtr<ErrorSink> mErrorSink; UniquePtr<ErrorSink> mErrorSink;

View File

@ -221,27 +221,13 @@ void ChannelMediaDecoder::Shutdown() {
mResourceCallback->Disconnect(); mResourceCallback->Disconnect();
MediaDecoder::Shutdown(); MediaDecoder::Shutdown();
// Force any outstanding seek and byterange requests to complete
// to prevent shutdown from deadlocking.
if (mResource) { if (mResource) {
// Force any outstanding seek and byterange requests to complete mResource->Close();
// to prevent shutdown from deadlocking.
mResourceClosePromise = mResource->Close();
} }
} }
void ChannelMediaDecoder::ShutdownInternal() {
if (!mResourceClosePromise) {
MediaShutdownManager::Instance().Unregister(this);
return;
}
mResourceClosePromise->Then(
AbstractMainThread(), __func__,
[self = RefPtr<ChannelMediaDecoder>(this)] {
MediaShutdownManager::Instance().Unregister(self);
});
return;
}
nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel, nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel,
bool aIsPrivateBrowsing, bool aIsPrivateBrowsing,
nsIStreamListener** aStreamListener) { nsIStreamListener** aStreamListener) {

View File

@ -59,7 +59,6 @@ class ChannelMediaDecoder
}; };
protected: protected:
void ShutdownInternal() override;
void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override; void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override;
void DurationChanged() override; void DurationChanged() override;
void MetadataLoaded(UniquePtr<MediaInfo> aInfo, UniquePtr<MetadataTags> aTags, void MetadataLoaded(UniquePtr<MediaInfo> aInfo, UniquePtr<MetadataTags> aTags,
@ -157,10 +156,6 @@ class ChannelMediaDecoder
// True if we've been notified that the ChannelMediaResource has // True if we've been notified that the ChannelMediaResource has
// a principal. // a principal.
bool mInitialChannelPrincipalKnown = false; bool mInitialChannelPrincipalKnown = false;
// Set in Shutdown() when we start closing mResource, if mResource is set.
// Must resolve before we unregister the shutdown blocker.
RefPtr<GenericPromise> mResourceClosePromise;
}; };
} // namespace mozilla } // namespace mozilla

View File

@ -589,15 +589,15 @@ nsresult ChannelMediaResource::SetupChannelHeaders(int64_t aOffset) {
return NS_OK; return NS_OK;
} }
RefPtr<GenericPromise> ChannelMediaResource::Close() { nsresult ChannelMediaResource::Close() {
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
if (!mClosed) { if (!mClosed) {
CloseChannel(); CloseChannel();
mCacheStream.Close();
mClosed = true; mClosed = true;
return mCacheStream.Close();
} }
return GenericPromise::CreateAndResolve(true, __func__); return NS_OK;
} }
already_AddRefed<nsIPrincipal> ChannelMediaResource::GetCurrentPrincipal() { already_AddRefed<nsIPrincipal> ChannelMediaResource::GetCurrentPrincipal() {

View File

@ -117,7 +117,7 @@ class ChannelMediaResource
// Main thread // Main thread
nsresult Open(nsIStreamListener** aStreamListener) override; nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override; nsresult Close() override;
void Suspend(bool aCloseImmediately) override; void Suspend(bool aCloseImmediately) override;
void Resume() override; void Resume() override;
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override; already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -148,9 +148,7 @@ nsresult CloneableWithRangeMediaResource::Open(
return NS_OK; return NS_OK;
} }
RefPtr<GenericPromise> CloneableWithRangeMediaResource::Close() { nsresult CloneableWithRangeMediaResource::Close() { return NS_OK; }
return GenericPromise::CreateAndResolve(true, __func__);
}
already_AddRefed<nsIPrincipal> already_AddRefed<nsIPrincipal>
CloneableWithRangeMediaResource::GetCurrentPrincipal() { CloneableWithRangeMediaResource::GetCurrentPrincipal() {

View File

@ -27,7 +27,7 @@ class CloneableWithRangeMediaResource : public BaseMediaResource {
// Main thread // Main thread
nsresult Open(nsIStreamListener** aStreamListener) override; nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override; nsresult Close() override;
void Suspend(bool aCloseImmediately) override {} void Suspend(bool aCloseImmediately) override {}
void Resume() override {} void Resume() override {}
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override; already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -373,7 +373,6 @@ already_AddRefed<DOMMediaStream> DOMMediaStream::Clone() {
} }
bool DOMMediaStream::Active() const { return mActive; } bool DOMMediaStream::Active() const { return mActive; }
bool DOMMediaStream::Audible() const { return mAudible; }
MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const { MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const {
for (const auto& track : mTracks) { for (const auto& track : mTracks) {
@ -456,6 +455,20 @@ void DOMMediaStream::UnregisterTrackListener(TrackListener* aListener) {
mTrackListeners.RemoveElement(aListener); mTrackListeners.RemoveElement(aListener);
} }
void DOMMediaStream::SetFinishedOnInactive(bool aFinishedOnInactive) {
MOZ_ASSERT(NS_IsMainThread());
if (mFinishedOnInactive == aFinishedOnInactive) {
return;
}
mFinishedOnInactive = aFinishedOnInactive;
if (mFinishedOnInactive && !ContainsLiveTracks(mTracks)) {
NotifyTrackRemoved(nullptr);
}
}
void DOMMediaStream::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) { void DOMMediaStream::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
@ -504,6 +517,10 @@ void DOMMediaStream::NotifyTrackRemoved(
} }
} }
if (!mFinishedOnInactive) {
return;
}
if (mAudible) { if (mAudible) {
// Check if we became inaudible. // Check if we became inaudible.
if (!ContainsLiveAudioTracks(mTracks)) { if (!ContainsLiveAudioTracks(mTracks)) {

View File

@ -144,9 +144,6 @@ class DOMMediaStream : public DOMEventTargetHelper,
// NON-WebIDL // NON-WebIDL
// Returns true if this stream contains a live audio track.
bool Audible() const;
/** /**
* Returns true if this DOMMediaStream has aTrack in mTracks. * Returns true if this DOMMediaStream has aTrack in mTracks.
*/ */
@ -189,6 +186,10 @@ class DOMMediaStream : public DOMEventTargetHelper,
// a dead pointer. Main thread only. // a dead pointer. Main thread only.
void UnregisterTrackListener(TrackListener* aListener); void UnregisterTrackListener(TrackListener* aListener);
// Tells this MediaStream whether it can go inactive as soon as no tracks
// are live anymore.
void SetFinishedOnInactive(bool aFinishedOnInactive);
protected: protected:
virtual ~DOMMediaStream(); virtual ~DOMMediaStream();
@ -239,6 +240,10 @@ class DOMMediaStream : public DOMEventTargetHelper,
// True if this stream has live audio tracks. // True if this stream has live audio tracks.
bool mAudible = false; bool mAudible = false;
// For compatibility with mozCaptureStream, we in some cases do not go
// inactive until the MediaDecoder lets us. (Remove this in Bug 1302379)
bool mFinishedOnInactive = true;
}; };
NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID) NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID)

View File

@ -95,7 +95,7 @@ nsresult FileMediaResource::Open(nsIStreamListener** aStreamListener) {
return NS_OK; return NS_OK;
} }
RefPtr<GenericPromise> FileMediaResource::Close() { nsresult FileMediaResource::Close() {
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
// Since mChennel is only accessed by main thread, there is no necessary to // Since mChennel is only accessed by main thread, there is no necessary to
@ -105,7 +105,7 @@ RefPtr<GenericPromise> FileMediaResource::Close() {
mChannel = nullptr; mChannel = nullptr;
} }
return GenericPromise::CreateAndResolve(true, __func__); return NS_OK;
} }
already_AddRefed<nsIPrincipal> FileMediaResource::GetCurrentPrincipal() { already_AddRefed<nsIPrincipal> FileMediaResource::GetCurrentPrincipal() {

View File

@ -23,7 +23,7 @@ class FileMediaResource : public BaseMediaResource {
// Main thread // Main thread
nsresult Open(nsIStreamListener** aStreamListener) override; nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override; nsresult Close() override;
void Suspend(bool aCloseImmediately) override {} void Suspend(bool aCloseImmediately) override {}
void Resume() override {} void Resume() override {}
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override; already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -161,7 +161,7 @@ class MediaCache {
// file backing will be provided. // file backing will be provided.
static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength); static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength);
nsISerialEventTarget* OwnerThread() const { return sThread; } nsIEventTarget* OwnerThread() const { return sThread; }
// Brutally flush the cache contents. Main thread only. // Brutally flush the cache contents. Main thread only.
void Flush(); void Flush();
@ -2196,18 +2196,17 @@ bool MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock) {
return true; return true;
} }
RefPtr<GenericPromise> MediaCacheStream::Close() { void MediaCacheStream::Close() {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (!mMediaCache) { if (!mMediaCache) {
return GenericPromise::CreateAndResolve(true, __func__); return;
} }
OwnerThread()->Dispatch(NS_NewRunnableFunction(
return InvokeAsync(OwnerThread(), "MediaCacheStream::Close", "MediaCacheStream::Close",
[this, client = RefPtr<ChannelMediaResource>(mClient)] { [this, client = RefPtr<ChannelMediaResource>(mClient)]() {
AutoLock lock(mMediaCache->Monitor()); AutoLock lock(mMediaCache->Monitor());
CloseInternal(lock); CloseInternal(lock);
return GenericPromise::CreateAndResolve(true, __func__); }));
});
} }
void MediaCacheStream::CloseInternal(AutoLock& aLock) { void MediaCacheStream::CloseInternal(AutoLock& aLock) {
@ -2735,7 +2734,7 @@ void MediaCacheStream::InitAsCloneInternal(MediaCacheStream* aOriginal) {
lock.NotifyAll(); lock.NotifyAll();
} }
nsISerialEventTarget* MediaCacheStream::OwnerThread() const { nsIEventTarget* MediaCacheStream::OwnerThread() const {
return mMediaCache->OwnerThread(); return mMediaCache->OwnerThread();
} }

View File

@ -217,12 +217,12 @@ class MediaCacheStream : public DecoderDoctorLifeLogger<MediaCacheStream> {
// on this class. // on this class.
void InitAsClone(MediaCacheStream* aOriginal); void InitAsClone(MediaCacheStream* aOriginal);
nsISerialEventTarget* OwnerThread() const; nsIEventTarget* OwnerThread() const;
// These are called on the main thread. // These are called on the main thread.
// This must be called (and resolve) before the ChannelMediaResource // This must be called (and return) before the ChannelMediaResource
// used to create this MediaCacheStream is deleted. // used to create this MediaCacheStream is deleted.
RefPtr<GenericPromise> Close(); void Close();
// This returns true when the stream has been closed. // This returns true when the stream has been closed.
bool IsClosed(AutoLock&) const { return mClosed; } bool IsClosed(AutoLock&) const { return mClosed; }
// Returns true when this stream is can be shared by a new resource load. // Returns true when this stream is can be shared by a new resource load.

View File

@ -6,7 +6,6 @@
#include "MediaDecoder.h" #include "MediaDecoder.h"
#include "AudioDeviceInfo.h"
#include "DOMMediaStream.h" #include "DOMMediaStream.h"
#include "DecoderBenchmark.h" #include "DecoderBenchmark.h"
#include "ImageContainer.h" #include "ImageContainer.h"
@ -226,46 +225,36 @@ void MediaDecoder::SetVolume(double aVolume) {
mVolume = aVolume; mVolume = aVolume;
} }
RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) { RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
AbstractThread::AutoEnter context(AbstractMainThread()); AbstractThread::AutoEnter context(AbstractMainThread());
mSinkDevice = aSinkDevice; return GetStateMachine()->InvokeSetSink(aSink);
return GetStateMachine()->InvokeSetSink(aSinkDevice);
} }
void MediaDecoder::SetOutputCaptured(bool aCaptured) { void MediaDecoder::AddOutputStream(DOMMediaStream* aStream,
SharedDummyTrack* aDummyStream) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread()); AbstractThread::AutoEnter context(AbstractMainThread());
mOutputCaptured = aCaptured; mDecoderStateMachine->EnsureOutputStreamManager(aDummyStream);
} if (mInfo) {
mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
void MediaDecoder::AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
tracks.AppendElement(std::move(aTrack));
mOutputTracks = tracks;
}
void MediaDecoder::RemoveOutputTrack(
const RefPtr<ProcessedMediaTrack>& aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
if (tracks.RemoveElement(aTrack)) {
mOutputTracks = tracks;
} }
mDecoderStateMachine->AddOutputStream(aStream);
} }
void MediaDecoder::SetOutputTracksPrincipal( void MediaDecoder::RemoveOutputStream(DOMMediaStream* aStream) {
const RefPtr<nsIPrincipal>& aPrincipal) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load()."); MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread()); AbstractThread::AutoEnter context(AbstractMainThread());
mOutputPrincipal = MakePrincipalHandle(aPrincipal); mDecoderStateMachine->RemoveOutputStream(aStream);
}
void MediaDecoder::SetOutputStreamPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
mDecoderStateMachine->SetOutputStreamPrincipal(aPrincipal);
} }
double MediaDecoder::GetDuration() { double MediaDecoder::GetDuration() {
@ -311,10 +300,6 @@ MediaDecoder::MediaDecoder(MediaDecoderInit& aInit)
INIT_CANONICAL(mVolume, aInit.mVolume), INIT_CANONICAL(mVolume, aInit.mVolume),
INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch), INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
INIT_CANONICAL(mLooping, aInit.mLooping), INIT_CANONICAL(mLooping, aInit.mLooping),
INIT_CANONICAL(mSinkDevice, nullptr),
INIT_CANONICAL(mOutputCaptured, false),
INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING), INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
mSameOriginMedia(false), mSameOriginMedia(false),
mVideoDecodingOberver( mVideoDecodingOberver(
@ -390,11 +375,14 @@ void MediaDecoder::Shutdown() {
nsCOMPtr<nsIRunnable> r = nsCOMPtr<nsIRunnable> r =
NS_NewRunnableFunction("MediaDecoder::Shutdown", [self]() { NS_NewRunnableFunction("MediaDecoder::Shutdown", [self]() {
self->mVideoFrameContainer = nullptr; self->mVideoFrameContainer = nullptr;
self->ShutdownInternal(); MediaShutdownManager::Instance().Unregister(self);
}); });
mAbstractMainThread->Dispatch(r.forget()); mAbstractMainThread->Dispatch(r.forget());
} }
// Ask the owner to remove its audio/video tracks.
GetOwner()->RemoveMediaTracks();
ChangeState(PLAY_STATE_SHUTDOWN); ChangeState(PLAY_STATE_SHUTDOWN);
mVideoDecodingOberver->UnregisterEvent(); mVideoDecodingOberver->UnregisterEvent();
mVideoDecodingOberver = nullptr; mVideoDecodingOberver = nullptr;
@ -531,16 +519,11 @@ void MediaDecoder::OnStoreDecoderBenchmark(const VideoInfo& aInfo) {
} }
} }
void MediaDecoder::ShutdownInternal() {
MOZ_ASSERT(NS_IsMainThread());
MediaShutdownManager::Instance().Unregister(this);
}
void MediaDecoder::FinishShutdown() { void MediaDecoder::FinishShutdown() {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
SetStateMachine(nullptr); SetStateMachine(nullptr);
mVideoFrameContainer = nullptr; mVideoFrameContainer = nullptr;
ShutdownInternal(); MediaShutdownManager::Instance().Unregister(this);
} }
nsresult MediaDecoder::InitializeStateMachine() { nsresult MediaDecoder::InitializeStateMachine() {
@ -659,6 +642,7 @@ double MediaDecoder::GetCurrentTime() {
void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) { void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
AbstractThread::AutoEnter context(AbstractMainThread()); AbstractThread::AutoEnter context(AbstractMainThread());
GetOwner()->RemoveMediaTracks();
MetadataLoaded(MakeUnique<MediaInfo>(*aMetadata.mInfo), MetadataLoaded(MakeUnique<MediaInfo>(*aMetadata.mInfo),
UniquePtr<MetadataTags>(std::move(aMetadata.mTags)), UniquePtr<MetadataTags>(std::move(aMetadata.mTags)),
MediaDecoderEventVisibility::Observable); MediaDecoderEventVisibility::Observable);
@ -681,6 +665,8 @@ void MediaDecoder::MetadataLoaded(
mMediaSeekableOnlyInBufferedRanges = mMediaSeekableOnlyInBufferedRanges =
aInfo->mMediaSeekableOnlyInBufferedRanges; aInfo->mMediaSeekableOnlyInBufferedRanges;
mInfo = aInfo.release(); mInfo = aInfo.release();
GetOwner()->ConstructMediaTracks(mInfo);
mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
// Make sure the element and the frame (if any) are told about // Make sure the element and the frame (if any) are told about
// our new size. // our new size.
@ -871,6 +857,12 @@ void MediaDecoder::ChangeState(PlayState aState) {
DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState)); DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState));
} }
mPlayState = aState; mPlayState = aState;
if (mPlayState == PLAY_STATE_PLAYING) {
GetOwner()->ConstructMediaTracks(mInfo);
} else if (IsEnded()) {
GetOwner()->RemoveMediaTracks();
}
} }
bool MediaDecoder::IsLoopingBack(double aPrevPos, double aCurPos) const { bool MediaDecoder::IsLoopingBack(double aPrevPos, double aCurPos) const {

View File

@ -43,12 +43,12 @@ class MediaMemoryInfo;
class AbstractThread; class AbstractThread;
class DOMMediaStream; class DOMMediaStream;
class DecoderBenchmark; class DecoderBenchmark;
class ProcessedMediaTrack;
class FrameStatistics; class FrameStatistics;
class VideoFrameContainer; class VideoFrameContainer;
class MediaFormatReader; class MediaFormatReader;
class MediaDecoderStateMachine; class MediaDecoderStateMachine;
struct MediaPlaybackEvent; struct MediaPlaybackEvent;
struct SharedDummyTrack;
enum class Visibility : uint8_t; enum class Visibility : uint8_t;
@ -155,7 +155,7 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void SetLooping(bool aLooping); void SetLooping(bool aLooping);
// Set the given device as the output device. // Set the given device as the output device.
RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice); RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
bool GetMinimizePreroll() const { return mMinimizePreroll; } bool GetMinimizePreroll() const { return mMinimizePreroll; }
@ -166,23 +166,15 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
// replaying after the input as ended. In the latter case, the new source is // replaying after the input as ended. In the latter case, the new source is
// not connected to streams created by captureStreamUntilEnded. // not connected to streams created by captureStreamUntilEnded.
// Turn output capturing of this decoder on or off. If it is on, the // Add an output stream. All decoder output will be sent to the stream.
// MediaDecoderStateMachine's media sink will only play after output tracks // The stream is initially blocked. The decoder is responsible for unblocking
// have been set. This is to ensure that it doesn't skip over any data // it while it is playing back.
// while the owner has intended to capture the full output, thus missing to void AddOutputStream(DOMMediaStream* aStream, SharedDummyTrack* aDummyStream);
// capture some of it. The owner of the MediaDecoder is responsible for adding // Remove an output stream added with AddOutputStream.
// output tracks in a timely fashion while the output is captured. void RemoveOutputStream(DOMMediaStream* aStream);
void SetOutputCaptured(bool aCaptured);
// Add an output track. All decoder output for the track's media type will be // Update the principal for any output streams and their tracks.
// sent to the track. void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
// Note that only one audio track and one video track is supported by
// MediaDecoder at this time. Passing in more of one type, or passing in a
// type that metadata says we are not decoding, is an error.
void AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack);
// Remove an output track added with AddOutputTrack.
void RemoveOutputTrack(const RefPtr<ProcessedMediaTrack>& aTrack);
// Update the principal for any output tracks.
void SetOutputTracksPrincipal(const RefPtr<nsIPrincipal>& aPrincipal);
// Return the duration of the video in seconds. // Return the duration of the video in seconds.
virtual double GetDuration(); virtual double GetDuration();
@ -403,11 +395,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void SetStateMachineParameters(); void SetStateMachineParameters();
// Called when MediaDecoder shutdown is finished. Subclasses use this to clean
// up internal structures, and unregister potential shutdown blockers when
// they're done.
virtual void ShutdownInternal();
bool IsShutdown() const; bool IsShutdown() const;
// Called to notify the decoder that the duration has changed. // Called to notify the decoder that the duration has changed.
@ -619,20 +606,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
Canonical<bool> mLooping; Canonical<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether this MediaDecoder's output is captured. When captured, all decoded
// data must be played out through mOutputTracks.
Canonical<bool> mOutputCaptured;
// Tracks that, if set, will get data routed through them.
Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
// PrincipalHandle to be used when feeding data into mOutputTracks.
Canonical<PrincipalHandle> mOutputPrincipal;
// Media duration set explicitly by JS. At present, this is only ever present // Media duration set explicitly by JS. At present, this is only ever present
// for MSE. // for MSE.
Maybe<double> mExplicitDuration; Maybe<double> mExplicitDuration;
@ -665,19 +638,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
return &mPreservesPitch; return &mPreservesPitch;
} }
AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; } AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
return &mSinkDevice;
}
AbstractCanonical<bool>* CanonicalOutputCaptured() {
return &mOutputCaptured;
}
AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
CanonicalOutputTracks() {
return &mOutputTracks;
}
AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
return &mOutputPrincipal;
}
AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; } AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
private: private:

View File

@ -139,6 +139,14 @@ class MediaDecoderOwner {
virtual void DispatchEncrypted(const nsTArray<uint8_t>& aInitData, virtual void DispatchEncrypted(const nsTArray<uint8_t>& aInitData,
const nsAString& aInitDataType) = 0; const nsAString& aInitDataType) = 0;
// Called by the media decoder to create audio/video tracks and add to its
// owner's track list.
virtual void ConstructMediaTracks(const MediaInfo* aInfo) = 0;
// Called by the media decoder to removes all audio/video tracks from its
// owner's track list.
virtual void RemoveMediaTracks() = 0;
// Notified by the decoder that a decryption key is required before emitting // Notified by the decoder that a decryption key is required before emitting
// further output. // further output.
virtual void NotifyWaitingForKey() {} virtual void NotifyWaitingForKey() {}

View File

@ -11,6 +11,7 @@
#include "mediasink/AudioSink.h" #include "mediasink/AudioSink.h"
#include "mediasink/AudioSinkWrapper.h" #include "mediasink/AudioSinkWrapper.h"
#include "mediasink/DecodedStream.h" #include "mediasink/DecodedStream.h"
#include "mediasink/OutputStreamManager.h"
#include "mediasink/VideoSink.h" #include "mediasink/VideoSink.h"
#include "mozilla/Logging.h" #include "mozilla/Logging.h"
#include "mozilla/MathAlgorithms.h" #include "mozilla/MathAlgorithms.h"
@ -2591,10 +2592,6 @@ RefPtr<ShutdownPromise> MediaDecoderStateMachine::ShutdownState::Enter() {
master->mVolume.DisconnectIfConnected(); master->mVolume.DisconnectIfConnected();
master->mPreservesPitch.DisconnectIfConnected(); master->mPreservesPitch.DisconnectIfConnected();
master->mLooping.DisconnectIfConnected(); master->mLooping.DisconnectIfConnected();
master->mSinkDevice.DisconnectIfConnected();
master->mOutputCaptured.DisconnectIfConnected();
master->mOutputTracks.DisconnectIfConnected();
master->mOutputPrincipal.DisconnectIfConnected();
master->mDuration.DisconnectAll(); master->mDuration.DisconnectAll();
master->mCurrentPosition.DisconnectAll(); master->mCurrentPosition.DisconnectAll();
@ -2630,10 +2627,12 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
mReader(new ReaderProxy(mTaskQueue, aReader)), mReader(new ReaderProxy(mTaskQueue, aReader)),
mPlaybackRate(1.0), mPlaybackRate(1.0),
mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD), mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
mAudioCaptured(false),
mMinimizePreroll(aDecoder->GetMinimizePreroll()), mMinimizePreroll(aDecoder->GetMinimizePreroll()),
mSentFirstFrameLoadedEvent(false), mSentFirstFrameLoadedEvent(false),
mVideoDecodeSuspended(false), mVideoDecodeSuspended(false),
mVideoDecodeSuspendTimer(mTaskQueue), mVideoDecodeSuspendTimer(mTaskQueue),
mOutputStreamManager(nullptr),
mVideoDecodeMode(VideoDecodeMode::Normal), mVideoDecodeMode(VideoDecodeMode::Normal),
mIsMSE(aDecoder->IsMSE()), mIsMSE(aDecoder->IsMSE()),
mSeamlessLoopingAllowed(false), mSeamlessLoopingAllowed(false),
@ -2642,16 +2641,10 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
INIT_MIRROR(mVolume, 1.0), INIT_MIRROR(mVolume, 1.0),
INIT_MIRROR(mPreservesPitch, true), INIT_MIRROR(mPreservesPitch, true),
INIT_MIRROR(mLooping, false), INIT_MIRROR(mLooping, false),
INIT_MIRROR(mSinkDevice, nullptr),
INIT_MIRROR(mOutputCaptured, false),
INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mCanonicalOutputTracks,
nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mDuration, NullableTimeUnit()), INIT_CANONICAL(mDuration, NullableTimeUnit()),
INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()), INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
INIT_CANONICAL(mIsAudioDataAudible, false) { INIT_CANONICAL(mIsAudioDataAudible, false),
mSetSinkRequestsCount(0) {
MOZ_COUNT_CTOR(MediaDecoderStateMachine); MOZ_COUNT_CTOR(MediaDecoderStateMachine);
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@ -2678,10 +2671,6 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
mVolume.Connect(aDecoder->CanonicalVolume()); mVolume.Connect(aDecoder->CanonicalVolume());
mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch()); mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
mLooping.Connect(aDecoder->CanonicalLooping()); mLooping.Connect(aDecoder->CanonicalLooping());
mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
// Initialize watchers. // Initialize watchers.
mWatchManager.Watch(mBuffered, mWatchManager.Watch(mBuffered,
@ -2691,16 +2680,6 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
&MediaDecoderStateMachine::PreservesPitchChanged); &MediaDecoderStateMachine::PreservesPitchChanged);
mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged); mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged); mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged);
mWatchManager.Watch(mOutputCaptured,
&MediaDecoderStateMachine::UpdateOutputCaptured);
mWatchManager.Watch(mOutputTracks,
&MediaDecoderStateMachine::UpdateOutputCaptured);
mWatchManager.Watch(mOutputTracks,
&MediaDecoderStateMachine::OutputTracksChanged);
mWatchManager.Watch(mOutputPrincipal,
&MediaDecoderStateMachine::OutputPrincipalChanged);
mMediaSink = CreateMediaSink();
MOZ_ASSERT(!mStateObj); MOZ_ASSERT(!mStateObj);
auto* s = new DecodeMetadataState(this); auto* s = new DecodeMetadataState(this);
@ -2718,24 +2697,23 @@ MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
MOZ_ASSERT(self->OnTaskQueue()); MOZ_ASSERT(self->OnTaskQueue());
AudioSink* audioSink = AudioSink* audioSink =
new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(), new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
self->Info().mAudio, self->mSinkDevice.Ref()); self->Info().mAudio);
self->mAudibleListener = audioSink->AudibleEvent().Connect( self->mAudibleListener = audioSink->AudibleEvent().Connect(
self->mTaskQueue, self.get(), self->mTaskQueue, self.get(),
&MediaDecoderStateMachine::AudioAudibleChanged); &MediaDecoderStateMachine::AudioAudibleChanged);
return audioSink; return audioSink;
}; };
return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator, return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
mVolume, mPlaybackRate, mPreservesPitch);
} }
already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() { already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
MOZ_ASSERT(OnTaskQueue()); bool aAudioCaptured, OutputStreamManager* aManager) {
MOZ_ASSERT_IF(aAudioCaptured, aManager);
RefPtr<MediaSink> audioSink = RefPtr<MediaSink> audioSink =
mOutputCaptured aAudioCaptured ? new DecodedStream(mTaskQueue, mAbstractMainThread,
? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate, mAudioQueue, mVideoQueue, aManager)
mPreservesPitch, mAudioQueue, mVideoQueue) : CreateAudioSink();
: CreateAudioSink();
RefPtr<MediaSink> mediaSink = RefPtr<MediaSink> mediaSink =
new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer, new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
@ -2825,6 +2803,8 @@ nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) {
mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect( mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable); OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
mMediaSink = CreateMediaSink(mAudioCaptured, mOutputStreamManager);
nsresult rv = mReader->Init(); nsresult rv = mReader->Init();
NS_ENSURE_SUCCESS(rv, rv); NS_ENSURE_SUCCESS(rv, rv);
@ -3360,6 +3340,9 @@ void MediaDecoderStateMachine::FinishDecodeFirstFrame() {
RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() { RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (mOutputStreamManager) {
mOutputStreamManager->Disconnect();
}
return InvokeAsync(OwnerThread(), this, __func__, return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::Shutdown); &MediaDecoderStateMachine::Shutdown);
} }
@ -3448,7 +3431,7 @@ void MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() {
} }
} }
// Note we have to update playback position before releasing the monitor. // Note we have to update playback position before releasing the monitor.
// Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
// the monitor and get a staled value from GetCurrentTimeUs() which hits the // the monitor and get a staled value from GetCurrentTimeUs() which hits the
// assertion in GetClock(). // assertion in GetClock().
@ -3534,75 +3517,47 @@ void MediaDecoderStateMachine::LoopingChanged() {
} }
} }
void MediaDecoderStateMachine::UpdateOutputCaptured() {
MOZ_ASSERT(OnTaskQueue());
// Reset these flags so they are consistent with the status of the sink.
// TODO: Move these flags into MediaSink to improve cohesion so we don't need
// to reset these flags when switching MediaSinks.
mAudioCompleted = false;
mVideoCompleted = false;
// Stop and shut down the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether output is captured.
mMediaSink = CreateMediaSink();
// Don't buffer as much when audio is captured because we don't need to worry
// about high latency audio devices.
mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
: detail::AMPLE_AUDIO_THRESHOLD;
mStateObj->HandleAudioCaptured();
}
void MediaDecoderStateMachine::OutputTracksChanged() {
MOZ_ASSERT(OnTaskQueue());
LOG("OutputTracksChanged, tracks=%zu", mOutputTracks.Ref().Length());
mCanonicalOutputTracks = mOutputTracks;
}
void MediaDecoderStateMachine::OutputPrincipalChanged() {
MOZ_ASSERT(OnTaskQueue());
mCanonicalOutputPrincipal = mOutputPrincipal;
}
RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink( RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
RefPtr<AudioDeviceInfo> aSink) { RefPtr<AudioDeviceInfo> aSink) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aSink); MOZ_ASSERT(aSink);
Unused << ++mSetSinkRequestsCount;
return InvokeAsync(OwnerThread(), this, __func__, return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::SetSink, aSink); &MediaDecoderStateMachine::SetSink, aSink);
} }
RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink( RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
RefPtr<AudioDeviceInfo> aSinkDevice) { RefPtr<AudioDeviceInfo> aSink) {
MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(OnTaskQueue());
if (mOutputCaptured) { if (mAudioCaptured) {
// Not supported yet. // Not supported yet.
return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__); return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
} }
if (mSinkDevice.Ref() != aSinkDevice) { // Backup current playback parameters.
// A new sink was set before this ran. bool wasPlaying = mMediaSink->IsPlaying();
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
if (--mSetSinkRequestsCount > 0) {
MOZ_ASSERT(mSetSinkRequestsCount > 0);
return GenericPromise::CreateAndResolve(wasPlaying, __func__);
} }
if (mMediaSink->AudioDevice() == aSinkDevice) { MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
// The sink has not changed. params.mSink = std::move(aSink);
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
}
const bool wasPlaying = IsPlaying(); if (!mMediaSink->IsStarted()) {
mMediaSink->SetPlaybackParams(params);
return GenericPromise::CreateAndResolve(false, __func__);
}
// Stop and shutdown the existing sink. // Stop and shutdown the existing sink.
StopMediaSink(); StopMediaSink();
mMediaSink->Shutdown(); mMediaSink->Shutdown();
// Create a new sink according to whether audio is captured. // Create a new sink according to whether audio is captured.
mMediaSink = CreateMediaSink(); mMediaSink = CreateMediaSink(false);
// Restore playback parameters.
mMediaSink->SetPlaybackParams(params);
// Start the new sink // Start the new sink
if (wasPlaying) { if (wasPlaying) {
nsresult rv = StartMediaSink(); nsresult rv = StartMediaSink();
@ -3701,6 +3656,43 @@ void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult) {
DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
} }
void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured,
OutputStreamManager* aManager) {
MOZ_ASSERT(OnTaskQueue());
if (aCaptured == mAudioCaptured) {
return;
}
// Rest these flags so they are consistent with the status of the sink.
// TODO: Move these flags into MediaSink to improve cohesion so we don't need
// to reset these flags when switching MediaSinks.
mAudioCompleted = false;
mVideoCompleted = false;
// Backup current playback parameters.
MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
// Stop and shut down the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether audio is captured.
mMediaSink = CreateMediaSink(aCaptured, aManager);
// Restore playback parameters.
mMediaSink->SetPlaybackParams(params);
mAudioCaptured = aCaptured;
// Don't buffer as much when audio is captured because we don't need to worry
// about high latency audio devices.
mAmpleAudioThreshold = mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
: detail::AMPLE_AUDIO_THRESHOLD;
mStateObj->HandleAudioCaptured();
}
uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const { uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(OnTaskQueue());
return mReader->VideoIsHardwareAccelerated() return mReader->VideoIsHardwareAccelerated()
@ -3744,6 +3736,86 @@ RefPtr<GenericPromise> MediaDecoderStateMachine::RequestDebugInfo(
return p.forget(); return p.forget();
} }
void MediaDecoderStateMachine::SetOutputStreamPrincipal(
nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
mOutputStreamPrincipal = aPrincipal;
if (mOutputStreamManager) {
mOutputStreamManager->SetPrincipal(mOutputStreamPrincipal);
}
}
void MediaDecoderStateMachine::AddOutputStream(DOMMediaStream* aStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG("AddOutputStream aStream=%p!", aStream);
mOutputStreamManager->Add(aStream);
nsCOMPtr<nsIRunnable> r =
NS_NewRunnableFunction("MediaDecoderStateMachine::SetAudioCaptured",
[self = RefPtr<MediaDecoderStateMachine>(this),
manager = mOutputStreamManager]() {
self->SetAudioCaptured(true, manager);
});
nsresult rv = OwnerThread()->Dispatch(r.forget());
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void MediaDecoderStateMachine::RemoveOutputStream(DOMMediaStream* aStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG("RemoveOutputStream=%p!", aStream);
mOutputStreamManager->Remove(aStream);
if (mOutputStreamManager->IsEmpty()) {
mOutputStreamManager->Disconnect();
mOutputStreamManager = nullptr;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(
"MediaDecoderStateMachine::SetAudioCaptured",
[self = RefPtr<MediaDecoderStateMachine>(this)]() {
self->SetAudioCaptured(false);
});
nsresult rv = OwnerThread()->Dispatch(r.forget());
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}
void MediaDecoderStateMachine::EnsureOutputStreamManager(
SharedDummyTrack* aDummyStream) {
MOZ_ASSERT(NS_IsMainThread());
if (mOutputStreamManager) {
return;
}
mOutputStreamManager = new OutputStreamManager(
aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
}
void MediaDecoderStateMachine::EnsureOutputStreamManagerHasTracks(
const MediaInfo& aLoadedInfo) {
MOZ_ASSERT(NS_IsMainThread());
if (!mOutputStreamManager) {
return;
}
if ((!aLoadedInfo.HasAudio() ||
mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
(!aLoadedInfo.HasVideo() ||
mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
return;
}
if (aLoadedInfo.HasAudio()) {
MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
RefPtr<SourceMediaTrack> dummy =
mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
LOG("Pre-created audio track with underlying track %p", dummy.get());
Unused << dummy;
}
if (aLoadedInfo.HasVideo()) {
MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
RefPtr<SourceMediaTrack> dummy =
mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
LOG("Pre-created video track with underlying track %p", dummy.get());
Unused << dummy;
}
}
class VideoQueueMemoryFunctor : public nsDequeFunctor { class VideoQueueMemoryFunctor : public nsDequeFunctor {
public: public:
VideoQueueMemoryFunctor() : mSize(0) {} VideoQueueMemoryFunctor() : mSize(0) {}

View File

@ -106,6 +106,7 @@ class AbstractThread;
class AudioSegment; class AudioSegment;
class DecodedStream; class DecodedStream;
class DOMMediaStream; class DOMMediaStream;
class OutputStreamManager;
class ReaderProxy; class ReaderProxy;
class TaskQueue; class TaskQueue;
@ -185,6 +186,19 @@ class MediaDecoderStateMachine
RefPtr<GenericPromise> RequestDebugInfo( RefPtr<GenericPromise> RequestDebugInfo(
dom::MediaDecoderStateMachineDebugInfo& aInfo); dom::MediaDecoderStateMachineDebugInfo& aInfo);
void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
// If an OutputStreamManager does not exist, one will be created.
void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
// If an OutputStreamManager exists, tracks matching aLoadedInfo will be
// created unless they already exist in the manager.
void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
// Add an output stream to the output stream manager. The manager must have
// been created through EnsureOutputStreamManager() before this.
void AddOutputStream(DOMMediaStream* aStream);
// Remove an output stream added with AddOutputStream. If the last output
// stream was removed, we will also tear down the OutputStreamManager.
void RemoveOutputStream(DOMMediaStream* aStream);
// Seeks to the decoder to aTarget asynchronously. // Seeks to the decoder to aTarget asynchronously.
RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget); RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
@ -302,6 +316,11 @@ class MediaDecoderStateMachine
// constructor immediately after the task queue is created. // constructor immediately after the task queue is created.
void InitializationTask(MediaDecoder* aDecoder); void InitializationTask(MediaDecoder* aDecoder);
// Sets the audio-captured state and recreates the media sink if needed.
// A manager must be passed in if setting the audio-captured state to true.
void SetAudioCaptured(bool aCaptured,
OutputStreamManager* aManager = nullptr);
RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget); RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget);
RefPtr<ShutdownPromise> Shutdown(); RefPtr<ShutdownPromise> Shutdown();
@ -375,9 +394,6 @@ class MediaDecoderStateMachine
void SetPlaybackRate(double aPlaybackRate); void SetPlaybackRate(double aPlaybackRate);
void PreservesPitchChanged(); void PreservesPitchChanged();
void LoopingChanged(); void LoopingChanged();
void UpdateOutputCaptured();
void OutputTracksChanged();
void OutputPrincipalChanged();
MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; } MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; } MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
@ -422,9 +438,10 @@ class MediaDecoderStateMachine
MediaSink* CreateAudioSink(); MediaSink* CreateAudioSink();
// Always create mediasink which contains an AudioSink or DecodedStream // Always create mediasink which contains an AudioSink or StreamSink inside.
// inside. // A manager must be passed in if aAudioCaptured is true.
already_AddRefed<MediaSink> CreateMediaSink(); already_AddRefed<MediaSink> CreateMediaSink(
bool aAudioCaptured, OutputStreamManager* aManager = nullptr);
// Stops the media sink and shut it down. // Stops the media sink and shut it down.
// The decoder monitor must be held with exactly one lock count. // The decoder monitor must be held with exactly one lock count.
@ -609,6 +626,11 @@ class MediaDecoderStateMachine
bool mIsLiveStream = false; bool mIsLiveStream = false;
// True if we shouldn't play our audio (but still write it to any capturing
// streams). When this is true, the audio thread will never start again after
// it has stopped.
bool mAudioCaptured;
// True if all audio frames are already rendered. // True if all audio frames are already rendered.
bool mAudioCompleted = false; bool mAudioCompleted = false;
@ -650,6 +672,13 @@ class MediaDecoderStateMachine
// Track enabling video decode suspension via timer // Track enabling video decode suspension via timer
DelayedScheduler mVideoDecodeSuspendTimer; DelayedScheduler mVideoDecodeSuspendTimer;
// Data about MediaStreams that are being fed by the decoder.
// Main thread only.
RefPtr<OutputStreamManager> mOutputStreamManager;
// Principal used by output streams. Main thread only.
nsCOMPtr<nsIPrincipal> mOutputStreamPrincipal;
// Track the current video decode mode. // Track the current video decode mode.
VideoDecodeMode mVideoDecodeMode; VideoDecodeMode mVideoDecodeMode;
@ -704,23 +733,6 @@ class MediaDecoderStateMachine
// upon reaching the end. // upon reaching the end.
Mirror<bool> mLooping; Mirror<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether all output should be captured into mOutputTracks. While true, the
// media sink will only play if there are output tracks.
Mirror<bool> mOutputCaptured;
// Tracks to capture data into.
Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
// PrincipalHandle to feed with data captured into mOutputTracks.
Mirror<PrincipalHandle> mOutputPrincipal;
Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mCanonicalOutputTracks;
Canonical<PrincipalHandle> mCanonicalOutputPrincipal;
// Duration of the media. This is guaranteed to be non-null after we finish // Duration of the media. This is guaranteed to be non-null after we finish
// decoding the first frame. // decoding the first frame.
Canonical<media::NullableTimeUnit> mDuration; Canonical<media::NullableTimeUnit> mDuration;
@ -733,16 +745,12 @@ class MediaDecoderStateMachine
// Used to distinguish whether the audio is producing sound. // Used to distinguish whether the audio is producing sound.
Canonical<bool> mIsAudioDataAudible; Canonical<bool> mIsAudioDataAudible;
// Used to count the number of pending requests to set a new sink.
Atomic<int> mSetSinkRequestsCount;
public: public:
AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const; AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
CanonicalOutputTracks() {
return &mCanonicalOutputTracks;
}
AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
return &mCanonicalOutputPrincipal;
}
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() { AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
return &mDuration; return &mDuration;
} }

View File

@ -60,11 +60,8 @@ class MediaResource : public DecoderDoctorLifeLogger<MediaResource> {
// Close the resource, stop any listeners, channels, etc. // Close the resource, stop any listeners, channels, etc.
// Cancels any currently blocking Read request and forces that request to // Cancels any currently blocking Read request and forces that request to
// return an error. This must be called (and resolve) before the MediaResource // return an error.
// is deleted. virtual nsresult Close() { return NS_OK; }
virtual RefPtr<GenericPromise> Close() {
return GenericPromise::CreateAndResolve(true, __func__);
}
// These methods are called off the main thread. // These methods are called off the main thread.
// Read up to aCount bytes from the stream. The read starts at // Read up to aCount bytes from the stream. The read starts at

View File

@ -308,7 +308,7 @@ class MediaStreamTrackSource : public nsISupports {
} }
// Principal identifying who may access the contents of this source. // Principal identifying who may access the contents of this source.
RefPtr<nsIPrincipal> mPrincipal; nsCOMPtr<nsIPrincipal> mPrincipal;
// Currently registered sinks. // Currently registered sinks.
nsTArray<WeakPtr<Sink>> mSinks; nsTArray<WeakPtr<Sink>> mSinks;

View File

@ -6,7 +6,6 @@
#include "AudioSink.h" #include "AudioSink.h"
#include "AudioConverter.h" #include "AudioConverter.h"
#include "AudioDeviceInfo.h"
#include "MediaQueue.h" #include "MediaQueue.h"
#include "VideoUtils.h" #include "VideoUtils.h"
#include "mozilla/CheckedInt.h" #include "mozilla/CheckedInt.h"
@ -35,11 +34,9 @@ using media::TimeUnit;
AudioSink::AudioSink(AbstractThread* aThread, AudioSink::AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue, MediaQueue<AudioData>& aAudioQueue,
const TimeUnit& aStartTime, const AudioInfo& aInfo, const TimeUnit& aStartTime, const AudioInfo& aInfo)
AudioDeviceInfo* aAudioDevice)
: mStartTime(aStartTime), : mStartTime(aStartTime),
mInfo(aInfo), mInfo(aInfo),
mAudioDevice(aAudioDevice),
mPlaying(true), mPlaying(true),
mMonitor("AudioSink"), mMonitor("AudioSink"),
mWritten(0), mWritten(0),
@ -186,7 +183,7 @@ nsresult AudioSink::InitializeAudioStream(const PlaybackParams& aParams) {
// StaticPrefs::accessibility_monoaudio_enable() or // StaticPrefs::accessibility_monoaudio_enable() or
// StaticPrefs::media_forcestereo_enabled() is applied. // StaticPrefs::media_forcestereo_enabled() is applied.
nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate, nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
mAudioDevice); aParams.mSink);
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
mAudioStream->Shutdown(); mAudioStream->Shutdown();
mAudioStream = nullptr; mAudioStream = nullptr;

View File

@ -23,20 +23,11 @@ namespace mozilla {
class AudioConverter; class AudioConverter;
class AudioSink : private AudioStream::DataSource { class AudioSink : private AudioStream::DataSource {
public: using PlaybackParams = MediaSink::PlaybackParams;
struct PlaybackParams {
PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
: mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
};
public:
AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue, AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
const media::TimeUnit& aStartTime, const AudioInfo& aInfo, const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
AudioDeviceInfo* aAudioDevice);
~AudioSink(); ~AudioSink();
@ -68,8 +59,6 @@ class AudioSink : private AudioStream::DataSource {
void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo); void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
private: private:
// Allocate and initialize mAudioStream. Returns NS_OK on success. // Allocate and initialize mAudioStream. Returns NS_OK on success.
nsresult InitializeAudioStream(const PlaybackParams& aParams); nsresult InitializeAudioStream(const PlaybackParams& aParams);
@ -98,10 +87,6 @@ class AudioSink : private AudioStream::DataSource {
const AudioInfo mInfo; const AudioInfo mInfo;
// The output device this AudioSink is playing data to. The system's default
// device is used if this is null.
const RefPtr<AudioDeviceInfo> mAudioDevice;
// Used on the task queue of MDSM only. // Used on the task queue of MDSM only.
bool mPlaying; bool mPlaying;

View File

@ -21,6 +21,21 @@ void AudioSinkWrapper::Shutdown() {
mCreator = nullptr; mCreator = nullptr;
} }
const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
if (mAudioSink) {
mAudioSink->SetVolume(aParams.mVolume);
mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
}
mParams = aParams;
}
RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) { RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
AssertOwnerThread(); AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts."); MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
@ -139,11 +154,6 @@ void AudioSinkWrapper::SetPlaying(bool aPlaying) {
} }
} }
double AudioSinkWrapper::PlaybackRate() const {
AssertOwnerThread();
return mParams.mPlaybackRate;
}
nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime, nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
const MediaInfo& aInfo) { const MediaInfo& aInfo) {
AssertOwnerThread(); AssertOwnerThread();

View File

@ -24,8 +24,6 @@ class MediaQueue;
* A wrapper around AudioSink to provide the interface of MediaSink. * A wrapper around AudioSink to provide the interface of MediaSink.
*/ */
class AudioSinkWrapper : public MediaSink { class AudioSinkWrapper : public MediaSink {
using PlaybackParams = AudioSink::PlaybackParams;
// An AudioSink factory. // An AudioSink factory.
class Creator { class Creator {
public: public:
@ -48,18 +46,19 @@ class AudioSinkWrapper : public MediaSink {
template <typename Function> template <typename Function>
AudioSinkWrapper(AbstractThread* aOwnerThread, AudioSinkWrapper(AbstractThread* aOwnerThread,
const MediaQueue<AudioData>& aAudioQueue, const MediaQueue<AudioData>& aAudioQueue,
const Function& aFunc, double aVolume, double aPlaybackRate, const Function& aFunc)
bool aPreservesPitch)
: mOwnerThread(aOwnerThread), : mOwnerThread(aOwnerThread),
mCreator(new CreatorImpl<Function>(aFunc)), mCreator(new CreatorImpl<Function>(aFunc)),
mIsStarted(false), mIsStarted(false),
mParams(aVolume, aPlaybackRate, aPreservesPitch),
// Give an invalid value to facilitate debug if used before playback // Give an invalid value to facilitate debug if used before playback
// starts. // starts.
mPlayDuration(media::TimeUnit::Invalid()), mPlayDuration(media::TimeUnit::Invalid()),
mAudioEnded(true), mAudioEnded(true),
mAudioQueue(aAudioQueue) {} mAudioQueue(aAudioQueue) {}
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override; RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override; media::TimeUnit GetEndTime(TrackType aType) const override;
media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override; media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
@ -70,8 +69,6 @@ class AudioSinkWrapper : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override; void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override; void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime, nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override; const MediaInfo& aInfo) override;
void Stop() override; void Stop() override;

View File

@ -7,10 +7,10 @@
#include "DecodedStream.h" #include "DecodedStream.h"
#include "AudioSegment.h" #include "AudioSegment.h"
#include "MediaData.h" #include "MediaData.h"
#include "MediaDecoderStateMachine.h"
#include "MediaQueue.h" #include "MediaQueue.h"
#include "MediaTrackGraph.h" #include "MediaTrackGraph.h"
#include "MediaTrackListener.h" #include "MediaTrackListener.h"
#include "OutputStreamManager.h"
#include "SharedBuffer.h" #include "SharedBuffer.h"
#include "VideoSegment.h" #include "VideoSegment.h"
#include "VideoUtils.h" #include "VideoUtils.h"
@ -54,32 +54,34 @@ class DecodedStreamGraphListener {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener) NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
public: public:
DecodedStreamGraphListener( DecodedStreamGraphListener(
SourceMediaTrack* aAudioTrack, SourceMediaTrack* aAudioStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder, MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
SourceMediaTrack* aVideoTrack, SourceMediaTrack* aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder) MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
AbstractThread* aMainThread)
: mAudioTrackListener( : mAudioTrackListener(
aAudioTrack aAudioStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack) ? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
: nullptr), : nullptr),
mAudioEndedHolder(std::move(aAudioEndedHolder)), mAudioEndedHolder(std::move(aAudioEndedHolder)),
mVideoTrackListener( mVideoTrackListener(
aVideoTrack aVideoStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack) ? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
: nullptr), : nullptr),
mVideoEndedHolder(std::move(aVideoEndedHolder)), mVideoEndedHolder(std::move(aVideoEndedHolder)),
mAudioTrack(aAudioTrack), mAudioStream(aAudioStream),
mVideoTrack(aVideoTrack) { mVideoStream(aVideoStream),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener) { if (mAudioTrackListener) {
mAudioTrack->AddListener(mAudioTrackListener); mAudioStream->AddListener(mAudioTrackListener);
} else { } else {
mAudioEnded = true; mAudioEnded = true;
mAudioEndedHolder.ResolveIfExists(true, __func__); mAudioEndedHolder.ResolveIfExists(true, __func__);
} }
if (mVideoTrackListener) { if (mVideoTrackListener) {
mVideoTrack->AddListener(mVideoTrackListener); mVideoStream->AddListener(mVideoTrackListener);
} else { } else {
mVideoEnded = true; mVideoEnded = true;
mVideoEndedHolder.ResolveIfExists(true, __func__); mVideoEndedHolder.ResolveIfExists(true, __func__);
@ -87,30 +89,30 @@ class DecodedStreamGraphListener {
} }
void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) { void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
if (aTrack == mAudioTrack) { if (aTrack == mAudioStream) {
if (aCurrentTrackTime >= mAudioEnd) { if (aCurrentTrackTime >= mAudioEnd) {
mAudioTrack->End(); mAudioStream->End();
} }
} else if (aTrack == mVideoTrack) { } else if (aTrack == mVideoStream) {
if (aCurrentTrackTime >= mVideoEnd) { if (aCurrentTrackTime >= mVideoEnd) {
mVideoTrack->End(); mVideoStream->End();
} }
} else { } else {
MOZ_CRASH("Unexpected source track"); MOZ_CRASH("Unexpected source track");
} }
if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) { if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
// Only audio playout drives the clock forward, if present and live. // Only audio playout drives the clock forward, if present and live.
return; return;
} }
MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded); MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded); MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime)); mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
} }
void NotifyEnded(SourceMediaTrack* aTrack) { void NotifyEnded(SourceMediaTrack* aTrack) {
if (aTrack == mAudioTrack) { if (aTrack == mAudioStream) {
mAudioEnded = true; mAudioEnded = true;
} else if (aTrack == mVideoTrack) { } else if (aTrack == mVideoStream) {
mVideoEnded = true; mVideoEnded = true;
} else { } else {
MOZ_CRASH("Unexpected source track"); MOZ_CRASH("Unexpected source track");
@ -143,9 +145,9 @@ class DecodedStreamGraphListener {
* Callable from any thread. * Callable from any thread.
*/ */
void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) { void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
if (aTrack == mAudioTrack) { if (aTrack == mAudioStream) {
mAudioEnd = aEnd; mAudioEnd = aEnd;
} else if (aTrack == mVideoTrack) { } else if (aTrack == mVideoStream) {
mVideoEnd = aEnd; mVideoEnd = aEnd;
} else { } else {
MOZ_CRASH("Unexpected source track"); MOZ_CRASH("Unexpected source track");
@ -154,9 +156,9 @@ class DecodedStreamGraphListener {
void DoNotifyTrackEnded(SourceMediaTrack* aTrack) { void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (aTrack == mAudioTrack) { if (aTrack == mAudioStream) {
mAudioEndedHolder.ResolveIfExists(true, __func__); mAudioEndedHolder.ResolveIfExists(true, __func__);
} else if (aTrack == mVideoTrack) { } else if (aTrack == mVideoStream) {
mVideoEndedHolder.ResolveIfExists(true, __func__); mVideoEndedHolder.ResolveIfExists(true, __func__);
} else { } else {
MOZ_CRASH("Unexpected source track"); MOZ_CRASH("Unexpected source track");
@ -166,16 +168,16 @@ class DecodedStreamGraphListener {
void Forget() { void Forget() {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) { if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
mAudioTrack->End(); mAudioStream->End();
mAudioTrack->RemoveListener(mAudioTrackListener); mAudioStream->RemoveListener(mAudioTrackListener);
} }
mAudioTrackListener = nullptr; mAudioTrackListener = nullptr;
mAudioEndedHolder.ResolveIfExists(false, __func__); mAudioEndedHolder.ResolveIfExists(false, __func__);
if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) { if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
mVideoTrack->End(); mVideoStream->End();
mVideoTrack->RemoveListener(mVideoTrackListener); mVideoStream->RemoveListener(mVideoTrackListener);
} }
mVideoTrackListener = nullptr; mVideoTrackListener = nullptr;
mVideoEndedHolder.ResolveIfExists(false, __func__); mVideoEndedHolder.ResolveIfExists(false, __func__);
@ -202,10 +204,11 @@ class DecodedStreamGraphListener {
bool mVideoEnded = false; bool mVideoEnded = false;
// Any thread. // Any thread.
const RefPtr<SourceMediaTrack> mAudioTrack; const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoTrack; const RefPtr<SourceMediaTrack> mVideoStream;
Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX}; Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX}; Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
const RefPtr<AbstractThread> mAbstractMainThread;
}; };
DecodedStreamTrackListener::DecodedStreamTrackListener( DecodedStreamTrackListener::DecodedStreamTrackListener(
@ -223,7 +226,7 @@ void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
/** /**
* All MediaStream-related data is protected by the decoder's monitor. We have * All MediaStream-related data is protected by the decoder's monitor. We have
* at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as * at most one DecodedStreamData per MediaDecoder. Its tracks are used as
* inputs for all output tracks created by OutputStreamManager after calls to * inputs for all output tracks created by OutputStreamManager after calls to
* captureStream/UntilEnded. Seeking creates new source tracks, as does * captureStream/UntilEnded. Seeking creates new source tracks, as does
* replaying after the input as ended. In the latter case, the new sources are * replaying after the input as ended. In the latter case, the new sources are
@ -232,11 +235,12 @@ void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
class DecodedStreamData final { class DecodedStreamData final {
public: public:
DecodedStreamData( DecodedStreamData(
PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph, OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<ProcessedMediaTrack> aAudioOutputTrack, RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<ProcessedMediaTrack> aVideoOutputTrack, RefPtr<SourceMediaTrack> aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise, MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise); MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread);
~DecodedStreamData(); ~DecodedStreamData();
MediaEventSource<int64_t>& OnOutput(); MediaEventSource<int64_t>& OnOutput();
void Forget(); void Forget();
@ -254,9 +258,9 @@ class DecodedStreamData final {
// Count of audio frames written to the track // Count of audio frames written to the track
int64_t mAudioFramesWritten; int64_t mAudioFramesWritten;
// Count of video frames written to the track in the track's rate // Count of video frames written to the track in the track's rate
TrackTime mVideoTrackWritten; TrackTime mVideoStreamWritten;
// Count of audio frames written to the track in the track's rate // Count of audio frames written to the track in the track's rate
TrackTime mAudioTrackWritten; TrackTime mAudioStreamWritten;
// mNextAudioTime is the end timestamp for the last packet sent to the track. // mNextAudioTime is the end timestamp for the last packet sent to the track.
// Therefore audio packets starting at or after this time need to be copied // Therefore audio packets starting at or after this time need to be copied
// to the output track. // to the output track.
@ -279,66 +283,42 @@ class DecodedStreamData final {
bool mHaveSentFinishAudio; bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo; bool mHaveSentFinishVideo;
const RefPtr<SourceMediaTrack> mAudioTrack; const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoTrack; const RefPtr<SourceMediaTrack> mVideoStream;
const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
const RefPtr<MediaInputPort> mAudioPort;
const RefPtr<MediaInputPort> mVideoPort;
const RefPtr<DecodedStreamGraphListener> mListener; const RefPtr<DecodedStreamGraphListener> mListener;
const RefPtr<OutputStreamManager> mOutputStreamManager;
const RefPtr<AbstractThread> mAbstractMainThread;
}; };
DecodedStreamData::DecodedStreamData( DecodedStreamData::DecodedStreamData(
PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph, OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<ProcessedMediaTrack> aAudioOutputTrack, RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<ProcessedMediaTrack> aVideoOutputTrack, RefPtr<SourceMediaTrack> aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise, MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise) MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread)
: mAudioFramesWritten(0), : mAudioFramesWritten(0),
mVideoTrackWritten(0), mVideoStreamWritten(0),
mAudioTrackWritten(0), mAudioStreamWritten(0),
mNextAudioTime(aInit.mStartTime), mNextAudioTime(aInit.mStartTime),
mHaveSentFinishAudio(false), mHaveSentFinishAudio(false),
mHaveSentFinishVideo(false), mHaveSentFinishVideo(false),
mAudioTrack(aInit.mInfo.HasAudio() mAudioStream(std::move(aAudioStream)),
? aGraph->CreateSourceTrack(MediaSegment::AUDIO) mVideoStream(std::move(aVideoStream)),
: nullptr),
mVideoTrack(aInit.mInfo.HasVideo()
? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
: nullptr),
mAudioOutputTrack(std::move(aAudioOutputTrack)),
mVideoOutputTrack(std::move(aVideoOutputTrack)),
mAudioPort((mAudioOutputTrack && mAudioTrack)
? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
: nullptr),
mVideoPort((mVideoOutputTrack && mVideoTrack)
? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
: nullptr),
// DecodedStreamGraphListener will resolve these promises. // DecodedStreamGraphListener will resolve these promises.
mListener(MakeRefPtr<DecodedStreamGraphListener>( mListener(MakeRefPtr<DecodedStreamGraphListener>(
mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack, mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
std::move(aVideoEndedPromise))) { std::move(aVideoEndedPromise), aMainThread)),
mOutputStreamManager(aOutputStreamManager),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrack) { MOZ_DIAGNOSTIC_ASSERT(
mAudioTrack->SetAppendDataSourceRate(aInit.mInfo.mAudio.mRate); mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
} "Tracks must be pre-created on main thread");
} }
DecodedStreamData::~DecodedStreamData() { DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrack) {
mAudioTrack->Destroy();
}
if (mVideoTrack) {
mVideoTrack->Destroy();
}
if (mAudioPort) {
mAudioPort->Destroy();
}
if (mVideoPort) {
mVideoPort->Destroy();
}
}
MediaEventSource<int64_t>& DecodedStreamData::OnOutput() { MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
return mListener->OnOutput(); return mListener->OnOutput();
@ -349,7 +329,7 @@ void DecodedStreamData::Forget() { mListener->Forget(); }
void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) { void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this)); aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
aInfo.mAudioFramesWritten = mAudioFramesWritten; aInfo.mAudioFramesWritten = mAudioFramesWritten;
aInfo.mStreamAudioWritten = mAudioTrackWritten; aInfo.mStreamAudioWritten = mAudioStreamWritten;
aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds(); aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
aInfo.mLastVideoStartTime = aInfo.mLastVideoStartTime =
mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1)) mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
@ -361,29 +341,40 @@ void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo; aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
} }
DecodedStream::DecodedStream( DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
MediaDecoderStateMachine* aStateMachine, AbstractThread* aMainThread,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume, MediaQueue<AudioData>& aAudioQueue,
double aPlaybackRate, bool aPreservesPitch, MediaQueue<VideoData>& aVideoQueue,
MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue) OutputStreamManager* aOutputStreamManager)
: mOwnerThread(aStateMachine->OwnerThread()), : mOwnerThread(aOwnerThread),
mAbstractMainThread(aMainThread),
mOutputStreamManager(aOutputStreamManager),
mWatchManager(this, mOwnerThread), mWatchManager(this, mOwnerThread),
mPlaying(false, "DecodedStream::mPlaying"), mPlaying(false, "DecodedStream::mPlaying"),
mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE, mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
"DecodedStream::mPrincipalHandle (Mirror)"), "DecodedStream::mPrincipalHandle (Mirror)"),
mOutputTracks(std::move(aOutputTracks)),
mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch),
mAudioQueue(aAudioQueue), mAudioQueue(aAudioQueue),
mVideoQueue(aVideoQueue) { mVideoQueue(aVideoQueue) {
mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal()); mPrincipalHandle.Connect(mOutputStreamManager->CanonicalPrincipalHandle());
mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged); mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
PlayingChanged(); // Notify of the initial state
} }
DecodedStream::~DecodedStream() { DecodedStream::~DecodedStream() {
MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended."); MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
NS_ProxyRelease("DecodedStream::mOutputStreamManager", mAbstractMainThread,
do_AddRef(mOutputStreamManager));
}
const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mParams = aParams;
} }
RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) { RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
@ -402,7 +393,6 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
const MediaInfo& aInfo) { const MediaInfo& aInfo) {
AssertOwnerThread(); AssertOwnerThread();
MOZ_ASSERT(mStartTime.isNothing(), "playback already started."); MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
MOZ_DIAGNOSTIC_ASSERT(!mOutputTracks.IsEmpty());
mStartTime.emplace(aStartTime); mStartTime.emplace(aStartTime);
mLastOutputTime = TimeUnit::Zero(); mLastOutputTime = TimeUnit::Zero();
@ -414,55 +404,58 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
typedef MozPromiseHolder<MediaSink::EndedPromise> Promise; typedef MozPromiseHolder<MediaSink::EndedPromise> Promise;
public: public:
R(PlaybackInfoInit&& aInit, R(PlaybackInfoInit&& aInit, Promise&& aAudioEndedPromise,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, Promise&& aVideoEndedPromise, OutputStreamManager* aManager,
Promise&& aAudioEndedPromise, Promise&& aVideoEndedPromise) AbstractThread* aMainThread)
: Runnable("CreateDecodedStreamData"), : Runnable("CreateDecodedStreamData"),
mInit(std::move(aInit)), mInit(std::move(aInit)),
mOutputTracks(std::move(aOutputTracks)),
mAudioEndedPromise(std::move(aAudioEndedPromise)), mAudioEndedPromise(std::move(aAudioEndedPromise)),
mVideoEndedPromise(std::move(aVideoEndedPromise)) {} mVideoEndedPromise(std::move(aVideoEndedPromise)),
mOutputStreamManager(aManager),
mAbstractMainThread(aMainThread) {}
NS_IMETHOD Run() override { NS_IMETHOD Run() override {
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
RefPtr<ProcessedMediaTrack> audioOutputTrack; // No need to create a source track when there are no output tracks.
RefPtr<ProcessedMediaTrack> videoOutputTrack; // This happens when RemoveOutput() is called immediately after
for (const auto& track : mOutputTracks) { // StartPlayback().
if (track->mType == MediaSegment::AUDIO) { if (mOutputStreamManager->IsEmpty()) {
MOZ_DIAGNOSTIC_ASSERT( // Resolve the promise to indicate the end of playback.
!audioOutputTrack, mAudioEndedPromise.Resolve(true, __func__);
"We only support capturing to one output track per kind"); mVideoEndedPromise.Resolve(true, __func__);
audioOutputTrack = track;
} else if (track->mType == MediaSegment::VIDEO) {
MOZ_DIAGNOSTIC_ASSERT(
!videoOutputTrack,
"We only support capturing to one output track per kind");
videoOutputTrack = track;
} else {
MOZ_CRASH("Unknown media type");
}
}
if ((!audioOutputTrack && !videoOutputTrack) ||
(audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
(videoOutputTrack && videoOutputTrack->IsDestroyed())) {
// No output tracks yet, or they're going away. Halt playback by not
// creating DecodedStreamData. MDSM will try again with a new
// DecodedStream sink when tracks are available.
return NS_OK; return NS_OK;
} }
RefPtr<SourceMediaTrack> audioStream =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
if (mInit.mInfo.HasAudio() && !audioStream) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
}
if (audioStream) {
audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
}
RefPtr<SourceMediaTrack> videoStream =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
if (mInit.mInfo.HasVideo() && !videoStream) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
}
mData = MakeUnique<DecodedStreamData>( mData = MakeUnique<DecodedStreamData>(
std::move(mInit), mOutputTracks[0]->Graph(), mOutputStreamManager, std::move(mInit), std::move(audioStream),
std::move(audioOutputTrack), std::move(videoOutputTrack), std::move(videoStream), std::move(mAudioEndedPromise),
std::move(mAudioEndedPromise), std::move(mVideoEndedPromise)); std::move(mVideoEndedPromise), mAbstractMainThread);
return NS_OK; return NS_OK;
} }
UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); } UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
private: private:
PlaybackInfoInit mInit; PlaybackInfoInit mInit;
const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
Promise mAudioEndedPromise; Promise mAudioEndedPromise;
Promise mVideoEndedPromise; Promise mVideoEndedPromise;
RefPtr<OutputStreamManager> mOutputStreamManager;
UniquePtr<DecodedStreamData> mData; UniquePtr<DecodedStreamData> mData;
const RefPtr<AbstractThread> mAbstractMainThread;
}; };
MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder; MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
@ -470,9 +463,9 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder; MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
mVideoEndedPromise = videoEndedHolder.Ensure(__func__); mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
PlaybackInfoInit init{aStartTime, aInfo}; PlaybackInfoInit init{aStartTime, aInfo};
nsCOMPtr<nsIRunnable> r = new R( nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
std::move(init), nsTArray<RefPtr<ProcessedMediaTrack>>(mOutputTracks), std::move(videoEndedHolder),
std::move(audioEndedHolder), std::move(videoEndedHolder)); mOutputStreamManager, mAbstractMainThread);
SyncRunnable::DispatchToThread( SyncRunnable::DispatchToThread(
SystemGroup::EventTargetFor(TaskCategory::Other), r); SystemGroup::EventTargetFor(TaskCategory::Other), r);
mData = static_cast<R*>(r.get())->ReleaseData(); mData = static_cast<R*>(r.get())->ReleaseData();
@ -525,9 +518,12 @@ void DecodedStream::DestroyData(UniquePtr<DecodedStreamData>&& aData) {
mOutputListener.Disconnect(); mOutputListener.Disconnect();
NS_DispatchToMainThread( NS_DispatchToMainThread(NS_NewRunnableFunction(
NS_NewRunnableFunction("DecodedStream::DestroyData", "DecodedStream::DestroyData",
[data = std::move(aData)]() { data->Forget(); })); [data = std::move(aData), manager = mOutputStreamManager]() {
data->Forget();
manager->RemoveTracks();
}));
} }
void DecodedStream::SetPlaying(bool aPlaying) { void DecodedStream::SetPlaying(bool aPlaying) {
@ -543,22 +539,17 @@ void DecodedStream::SetPlaying(bool aPlaying) {
void DecodedStream::SetVolume(double aVolume) { void DecodedStream::SetVolume(double aVolume) {
AssertOwnerThread(); AssertOwnerThread();
mVolume = aVolume; mParams.mVolume = aVolume;
} }
void DecodedStream::SetPlaybackRate(double aPlaybackRate) { void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
AssertOwnerThread(); AssertOwnerThread();
mPlaybackRate = aPlaybackRate; mParams.mPlaybackRate = aPlaybackRate;
} }
void DecodedStream::SetPreservesPitch(bool aPreservesPitch) { void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
AssertOwnerThread(); AssertOwnerThread();
mPreservesPitch = aPreservesPitch; mParams.mPreservesPitch = aPreservesPitch;
}
double DecodedStream::PlaybackRate() const {
AssertOwnerThread();
return mPlaybackRate;
} }
static void SendStreamAudio(DecodedStreamData* aStream, static void SendStreamAudio(DecodedStreamData* aStream,
@ -637,11 +628,12 @@ void DecodedStream::SendAudio(double aVolume,
// |mNextAudioTime| is updated as we process each audio sample in // |mNextAudioTime| is updated as we process each audio sample in
// SendStreamAudio(). // SendStreamAudio().
if (output.GetDuration() > 0) { if (output.GetDuration() > 0) {
mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output); mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
} }
if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) { if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten); mData->mListener->EndTrackAt(mData->mAudioStream,
mData->mAudioStreamWritten);
mData->mHaveSentFinishAudio = true; mData->mHaveSentFinishAudio = true;
} }
} }
@ -652,9 +644,9 @@ void DecodedStreamData::WriteVideoToSegment(
VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) { VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
RefPtr<layers::Image> image = aImage; RefPtr<layers::Image> image = aImage;
auto end = auto end =
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds()); mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
auto start = auto start =
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds()); mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false, aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
aTimeStamp); aTimeStamp);
// Extend this so we get accurate durations for all frames. // Extend this so we get accurate durations for all frames.
@ -700,7 +692,7 @@ void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
// for video tracks as part of bug 1493618. // for video tracks as part of bug 1493618.
resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize, resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
aPrincipalHandle, false, currentTime); aPrincipalHandle, false, currentTime);
mData->mVideoTrack->AppendData(&resetter); mData->mVideoStream->AppendData(&resetter);
// Consumer buffers have been reset. We now set the next time to the start // Consumer buffers have been reset. We now set the next time to the start
// time of the current frame, so that it can be displayed again on resuming. // time of the current frame, so that it can be displayed again on resuming.
@ -780,7 +772,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
TimeUnit end = std::max( TimeUnit end = std::max(
v->GetEndTime(), v->GetEndTime(),
lastEnd + TimeUnit::FromMicroseconds( lastEnd + TimeUnit::FromMicroseconds(
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1)); mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
mData->mLastVideoImage = v->mImage; mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay; mData->mLastVideoImageDisplaySize = v->mDisplay;
mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t, mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
@ -796,7 +788,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
} }
if (output.GetDuration() > 0) { if (output.GetDuration() > 0) {
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output); mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
} }
if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) { if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
@ -818,7 +810,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
// We round the nr of microseconds up, because WriteVideoToSegment // We round the nr of microseconds up, because WriteVideoToSegment
// will round the conversion from microseconds to TrackTime down. // will round the conversion from microseconds to TrackTime down.
auto deviation = TimeUnit::FromMicroseconds( auto deviation = TimeUnit::FromMicroseconds(
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1); mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref()); auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
mData->WriteVideoToSegment( mData->WriteVideoToSegment(
mData->mLastVideoImage, start, start + deviation, mData->mLastVideoImage, start, start + deviation,
@ -829,9 +821,11 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
if (forceBlack) { if (forceBlack) {
endSegment.ReplaceWithDisabled(); endSegment.ReplaceWithDisabled();
} }
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment); mData->mVideoStreamWritten +=
mData->mVideoStream->AppendData(&endSegment);
} }
mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten); mData->mListener->EndTrackAt(mData->mVideoStream,
mData->mVideoStreamWritten);
mData->mHaveSentFinishVideo = true; mData->mHaveSentFinishVideo = true;
} }
} }
@ -848,7 +842,7 @@ void DecodedStream::SendData() {
return; return;
} }
SendAudio(mVolume, mPrincipalHandle); SendAudio(mParams.mVolume, mPrincipalHandle);
SendVideo(mPrincipalHandle); SendVideo(mPrincipalHandle);
} }
@ -902,6 +896,10 @@ void DecodedStream::PlayingChanged() {
// On seek or pause we discard future frames. // On seek or pause we discard future frames.
ResetVideo(mPrincipalHandle); ResetVideo(mPrincipalHandle);
} }
mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
"OutputStreamManager::SetPlaying", mOutputStreamManager,
&OutputStreamManager::SetPlaying, mPlaying));
} }
void DecodedStream::ConnectListener() { void DecodedStream::ConnectListener() {

View File

@ -22,9 +22,9 @@
namespace mozilla { namespace mozilla {
class DecodedStreamData; class DecodedStreamData;
class MediaDecoderStateMachine;
class AudioData; class AudioData;
class VideoData; class VideoData;
class OutputStreamManager;
struct PlaybackInfoInit; struct PlaybackInfoInit;
class ProcessedMediaTrack; class ProcessedMediaTrack;
class TimeStamp; class TimeStamp;
@ -33,12 +33,17 @@ template <class T>
class MediaQueue; class MediaQueue;
class DecodedStream : public MediaSink { class DecodedStream : public MediaSink {
using MediaSink::PlaybackParams;
public: public:
DecodedStream(MediaDecoderStateMachine* aStateMachine, DecodedStream(AbstractThread* aOwnerThread, AbstractThread* aMainThread,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
double aVolume, double aPlaybackRate, bool aPreservesPitch,
MediaQueue<AudioData>& aAudioQueue, MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue); MediaQueue<VideoData>& aVideoQueue,
OutputStreamManager* aOutputStreamManager);
// MediaSink functions.
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override; RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override; media::TimeUnit GetEndTime(TrackType aType) const override;
@ -53,8 +58,6 @@ class DecodedStream : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override; void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override; void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime, nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override; const MediaInfo& aInfo) override;
void Stop() override; void Stop() override;
@ -85,6 +88,14 @@ class DecodedStream : public MediaSink {
const RefPtr<AbstractThread> mOwnerThread; const RefPtr<AbstractThread> mOwnerThread;
const RefPtr<AbstractThread> mAbstractMainThread;
/*
* Main thread only members.
*/
// Data about MediaStreams that are being fed by the decoder.
const RefPtr<OutputStreamManager> mOutputStreamManager;
/* /*
* Worker thread only members. * Worker thread only members.
*/ */
@ -95,11 +106,8 @@ class DecodedStream : public MediaSink {
Watchable<bool> mPlaying; Watchable<bool> mPlaying;
Mirror<PrincipalHandle> mPrincipalHandle; Mirror<PrincipalHandle> mPrincipalHandle;
const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
double mVolume; PlaybackParams mParams;
double mPlaybackRate;
bool mPreservesPitch;
media::NullableTimeUnit mStartTime; media::NullableTimeUnit mStartTime;
media::TimeUnit mLastOutputTime; media::TimeUnit mLastOutputTime;

View File

@ -7,6 +7,7 @@
#ifndef MediaSink_h_ #ifndef MediaSink_h_
#define MediaSink_h_ #define MediaSink_h_
#include "AudioDeviceInfo.h"
#include "MediaInfo.h" #include "MediaInfo.h"
#include "mozilla/MozPromise.h" #include "mozilla/MozPromise.h"
#include "mozilla/RefPtr.h" #include "mozilla/RefPtr.h"
@ -38,6 +39,23 @@ class MediaSink {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink); NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
typedef mozilla::TrackInfo::TrackType TrackType; typedef mozilla::TrackInfo::TrackType TrackType;
struct PlaybackParams {
PlaybackParams()
: mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
RefPtr<AudioDeviceInfo> mSink;
};
// Return the playback parameters of this sink.
// Can be called in any state.
virtual const PlaybackParams& GetPlaybackParams() const = 0;
// Set the playback parameters of this sink.
// Can be called in any state.
virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
// EndedPromise needs to be a non-exclusive promise as it is shared between // EndedPromise needs to be a non-exclusive promise as it is shared between
// both the AudioSink and VideoSink. // both the AudioSink and VideoSink.
typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise; typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
@ -82,10 +100,6 @@ class MediaSink {
// Pause/resume the playback. Only work after playback starts. // Pause/resume the playback. Only work after playback starts.
virtual void SetPlaying(bool aPlaying) = 0; virtual void SetPlaying(bool aPlaying) = 0;
// Get the playback rate.
// Can be called in any state.
virtual double PlaybackRate() const = 0;
// Single frame rendering operation may need to be done before playback // Single frame rendering operation may need to be done before playback
// started (1st frame) or right after seek completed or playback stopped. // started (1st frame) or right after seek completed or playback stopped.
// Do nothing if this sink has no video track. Can be called in any state. // Do nothing if this sink has no video track. Can be called in any state.
@ -108,10 +122,6 @@ class MediaSink {
// Can be called in any state. // Can be called in any state.
virtual bool IsPlaying() const = 0; virtual bool IsPlaying() const = 0;
// The audio output device this MediaSink is playing audio data to. The
// default device is used if this returns null.
virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
// Called on the state machine thread to shut down the sink. All resources // Called on the state machine thread to shut down the sink. All resources
// allocated by this sink should be released. // allocated by this sink should be released.
// Must be called after playback stopped. // Must be called after playback stopped.

View File

@ -0,0 +1,357 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "OutputStreamManager.h"
#include "DOMMediaStream.h"
#include "../MediaTrackGraph.h"
#include "mozilla/dom/MediaStreamTrack.h"
#include "mozilla/dom/AudioStreamTrack.h"
#include "mozilla/dom/VideoStreamTrack.h"
#include "nsContentUtils.h"
namespace mozilla {
#define LOG(level, msg, ...) \
MOZ_LOG(gMediaDecoderLog, level, (msg, ##__VA_ARGS__))
class DecodedStreamTrackSource : public dom::MediaStreamTrackSource {
public:
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DecodedStreamTrackSource,
dom::MediaStreamTrackSource)
explicit DecodedStreamTrackSource(SourceMediaTrack* aSourceStream,
nsIPrincipal* aPrincipal)
: dom::MediaStreamTrackSource(aPrincipal, nsString()),
mTrack(aSourceStream->Graph()->CreateForwardedInputTrack(
aSourceStream->mType)),
mPort(mTrack->AllocateInputPort(aSourceStream)) {
MOZ_ASSERT(NS_IsMainThread());
}
dom::MediaSourceEnum GetMediaSource() const override {
return dom::MediaSourceEnum::Other;
}
void Stop() override {
MOZ_ASSERT(NS_IsMainThread());
// We don't notify the source that a track was stopped since it will keep
// producing tracks until the element ends. The decoder also needs the
// tracks it created to be live at the source since the decoder's clock is
// based on MediaStreams during capture. We do however, disconnect this
// track's underlying track.
if (!mTrack->IsDestroyed()) {
mTrack->Destroy();
mPort->Destroy();
}
}
void Disable() override {}
void Enable() override {}
void SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
mPrincipal = aPrincipal;
PrincipalChanged();
}
void ForceEnded() { OverrideEnded(); }
const RefPtr<ProcessedMediaTrack> mTrack;
const RefPtr<MediaInputPort> mPort;
protected:
virtual ~DecodedStreamTrackSource() {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mTrack->IsDestroyed());
}
};
NS_IMPL_ADDREF_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
NS_IMPL_RELEASE_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DecodedStreamTrackSource)
NS_INTERFACE_MAP_END_INHERITING(dom::MediaStreamTrackSource)
NS_IMPL_CYCLE_COLLECTION_INHERITED(DecodedStreamTrackSource,
dom::MediaStreamTrackSource)
OutputStreamData::OutputStreamData(OutputStreamManager* aManager,
AbstractThread* aAbstractMainThread,
DOMMediaStream* aDOMStream)
: mManager(aManager),
mAbstractMainThread(aAbstractMainThread),
mDOMStream(aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
}
OutputStreamData::~OutputStreamData() = default;
void OutputStreamData::AddTrack(SourceMediaTrack* aTrack,
MediaSegment::Type aType,
nsIPrincipal* aPrincipal, bool aAsyncAddTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
LOG(LogLevel::Debug,
"Adding output %s track sourced from track %p to MediaStream %p%s",
aType == MediaSegment::AUDIO ? "audio" : "video", aTrack,
mDOMStream.get(), aAsyncAddTrack ? " (async)" : "");
auto source = MakeRefPtr<DecodedStreamTrackSource>(aTrack, aPrincipal);
RefPtr<dom::MediaStreamTrack> track;
if (aType == MediaSegment::AUDIO) {
track = new dom::AudioStreamTrack(mDOMStream->GetParentObject(),
source->mTrack, source);
} else {
MOZ_ASSERT(aType == MediaSegment::VIDEO);
track = new dom::VideoStreamTrack(mDOMStream->GetParentObject(),
source->mTrack, source);
}
mTracks.AppendElement(track.get());
if (aAsyncAddTrack) {
GetMainThreadEventTarget()->Dispatch(
NewRunnableMethod<RefPtr<dom::MediaStreamTrack>>(
"DOMMediaStream::AddTrackInternal", mDOMStream.get(),
&DOMMediaStream::AddTrackInternal, track));
} else {
mDOMStream->AddTrackInternal(track);
}
}
void OutputStreamData::RemoveTrack(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
LOG(LogLevel::Debug,
"Removing output track sourced by track %p from MediaStream %p", aTrack,
mDOMStream.get());
for (const auto& t : nsTArray<WeakPtr<dom::MediaStreamTrack>>(mTracks)) {
mTracks.RemoveElement(t);
if (!t || t->Ended()) {
continue;
}
DecodedStreamTrackSource& source =
static_cast<DecodedStreamTrackSource&>(t->GetSource());
GetMainThreadEventTarget()->Dispatch(
NewRunnableMethod("DecodedStreamTrackSource::ForceEnded", &source,
&DecodedStreamTrackSource::ForceEnded));
}
}
void OutputStreamData::SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
for (const WeakPtr<dom::MediaStreamTrack>& track : mTracks) {
if (!track || track->Ended()) {
continue;
}
DecodedStreamTrackSource& source =
static_cast<DecodedStreamTrackSource&>(track->GetSource());
source.SetPrincipal(aPrincipal);
}
}
OutputStreamManager::OutputStreamManager(SharedDummyTrack* aDummyStream,
nsIPrincipal* aPrincipal,
AbstractThread* aAbstractMainThread)
: mAbstractMainThread(aAbstractMainThread),
mDummyStream(aDummyStream),
mPrincipalHandle(
aAbstractMainThread,
aPrincipal ? MakePrincipalHandle(aPrincipal) : PRINCIPAL_HANDLE_NONE,
"OutputStreamManager::mPrincipalHandle (Canonical)") {
MOZ_ASSERT(NS_IsMainThread());
}
void OutputStreamManager::Add(DOMMediaStream* aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Adding MediaStream %p", aDOMStream);
OutputStreamData* p = mStreams
.AppendElement(new OutputStreamData(
this, mAbstractMainThread, aDOMStream))
->get();
for (const auto& lt : mLiveTracks) {
p->AddTrack(lt->mSourceTrack, lt->mType, mPrincipalHandle.Ref(), false);
}
}
void OutputStreamManager::Remove(DOMMediaStream* aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Removing MediaStream %p", aDOMStream);
AutoRemoveDestroyedStreams();
mStreams.ApplyIf(
aDOMStream, 0, StreamComparator(),
[&](const UniquePtr<OutputStreamData>& aData) {
for (const auto& lt : mLiveTracks) {
aData->RemoveTrack(lt->mSourceTrack);
}
},
[]() { MOZ_ASSERT_UNREACHABLE("Didn't exist"); });
DebugOnly<bool> rv = mStreams.RemoveElement(aDOMStream, StreamComparator());
MOZ_ASSERT(rv);
}
bool OutputStreamManager::HasTrackType(MediaSegment::Type aType) {
MOZ_ASSERT(NS_IsMainThread());
return mLiveTracks.Contains(aType, TrackTypeComparator());
}
bool OutputStreamManager::HasTracks(SourceMediaTrack* aAudioStream,
SourceMediaTrack* aVideoStream) {
MOZ_ASSERT(NS_IsMainThread());
size_t nrExpectedTracks = 0;
bool asExpected = true;
if (aAudioStream) {
Unused << ++nrExpectedTracks;
asExpected = asExpected && mLiveTracks.Contains(
MakePair(aAudioStream, MediaSegment::AUDIO),
TrackComparator());
}
if (aVideoStream) {
Unused << ++nrExpectedTracks;
asExpected = asExpected && mLiveTracks.Contains(
MakePair(aVideoStream, MediaSegment::VIDEO),
TrackComparator());
}
asExpected = asExpected && mLiveTracks.Length() == nrExpectedTracks;
return asExpected;
}
SourceMediaTrack* OutputStreamManager::GetPrecreatedTrackOfType(
MediaSegment::Type aType) const {
auto i = mLiveTracks.IndexOf(aType, 0, PrecreatedTrackTypeComparator());
return i == nsTArray<UniquePtr<LiveTrack>>::NoIndex
? nullptr
: mLiveTracks[i]->mSourceTrack.get();
}
size_t OutputStreamManager::NumberOfTracks() {
MOZ_ASSERT(NS_IsMainThread());
return mLiveTracks.Length();
}
already_AddRefed<SourceMediaTrack> OutputStreamManager::AddTrack(
MediaSegment::Type aType) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(!HasTrackType(aType),
"Cannot have two tracks of the same type at the same time");
RefPtr<SourceMediaTrack> track =
mDummyStream->mTrack->Graph()->CreateSourceTrack(aType);
if (!mPlaying) {
track->Suspend();
}
LOG(LogLevel::Info, "Adding %s track sourced by track %p",
aType == MediaSegment::AUDIO ? "audio" : "video", track.get());
mLiveTracks.AppendElement(MakeUnique<LiveTrack>(track, aType));
AutoRemoveDestroyedStreams();
for (const auto& data : mStreams) {
data->AddTrack(track, aType, mPrincipalHandle.Ref(), true);
}
return track.forget();
}
OutputStreamManager::LiveTrack::LiveTrack(SourceMediaTrack* aSourceTrack,
MediaSegment::Type aType)
: mSourceTrack(aSourceTrack), mType(aType) {}
OutputStreamManager::LiveTrack::~LiveTrack() { mSourceTrack->Destroy(); }
void OutputStreamManager::AutoRemoveDestroyedStreams() {
MOZ_ASSERT(NS_IsMainThread());
for (size_t i = mStreams.Length(); i > 0; --i) {
const auto& data = mStreams[i - 1];
if (!data->mDOMStream) {
// If the mDOMStream WeakPtr is now null, mDOMStream has been destructed.
mStreams.RemoveElementAt(i - 1);
}
}
}
void OutputStreamManager::RemoveTrack(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Removing track with source track %p", aTrack);
DebugOnly<bool> rv =
mLiveTracks.RemoveElement(aTrack, TrackStreamComparator());
MOZ_ASSERT(rv);
AutoRemoveDestroyedStreams();
for (const auto& data : mStreams) {
data->RemoveTrack(aTrack);
}
}
void OutputStreamManager::RemoveTracks() {
MOZ_ASSERT(NS_IsMainThread());
for (size_t i = mLiveTracks.Length(); i > 0; --i) {
RemoveTrack(mLiveTracks[i - 1]->mSourceTrack);
}
}
void OutputStreamManager::Disconnect() {
MOZ_ASSERT(NS_IsMainThread());
RemoveTracks();
MOZ_ASSERT(mLiveTracks.IsEmpty());
AutoRemoveDestroyedStreams();
nsTArray<RefPtr<DOMMediaStream>> domStreams(mStreams.Length());
for (const auto& data : mStreams) {
domStreams.AppendElement(data->mDOMStream);
}
for (auto& domStream : domStreams) {
Remove(domStream);
}
MOZ_ASSERT(mStreams.IsEmpty());
}
AbstractCanonical<PrincipalHandle>*
OutputStreamManager::CanonicalPrincipalHandle() {
return &mPrincipalHandle;
}
void OutputStreamManager::SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIPrincipal> principal = GetPrincipalFromHandle(mPrincipalHandle);
if (nsContentUtils::CombineResourcePrincipals(&principal, aPrincipal)) {
AutoRemoveDestroyedStreams();
for (const UniquePtr<OutputStreamData>& data : mStreams) {
data->SetPrincipal(principal);
}
mPrincipalHandle = MakePrincipalHandle(principal);
}
}
void OutputStreamManager::SetPlaying(bool aPlaying) {
MOZ_ASSERT(NS_IsMainThread());
if (mPlaying == aPlaying) {
return;
}
mPlaying = aPlaying;
for (auto& lt : mLiveTracks) {
if (mPlaying) {
lt->mSourceTrack->Resume();
lt->mEverPlayed = true;
} else {
lt->mSourceTrack->Suspend();
}
}
}
OutputStreamManager::~OutputStreamManager() = default;
#undef LOG
} // namespace mozilla

View File

@ -0,0 +1,161 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef OutputStreamManager_h
#define OutputStreamManager_h
#include "mozilla/RefPtr.h"
#include "mozilla/StateMirroring.h"
#include "mozilla/WeakPtr.h"
#include "nsTArray.h"
namespace mozilla {
class DOMMediaStream;
class MediaInputPort;
class OutputStreamManager;
class ProcessedMediaTrack;
class SourceMediaTrack;
namespace dom {
class MediaStreamTrack;
}
class OutputStreamData {
public:
OutputStreamData(OutputStreamManager* aManager,
AbstractThread* aAbstractMainThread,
DOMMediaStream* aDOMStream);
OutputStreamData(const OutputStreamData& aOther) = delete;
OutputStreamData(OutputStreamData&& aOther) = delete;
~OutputStreamData();
// Creates and adds a MediaStreamTrack to mDOMStream so that we can feed data
// to it. For a true aAsyncAddTrack we will dispatch a task to add the
// created track to mDOMStream, as is required by spec for the "addtrack"
// event.
void AddTrack(SourceMediaTrack* aTrack, MediaSegment::Type aType,
nsIPrincipal* aPrincipal, bool aAsyncAddTrack);
// Ends any MediaStreamTracks sourced from aTrack.
void RemoveTrack(SourceMediaTrack* aTrack);
void SetPrincipal(nsIPrincipal* aPrincipal);
const RefPtr<OutputStreamManager> mManager;
const RefPtr<AbstractThread> mAbstractMainThread;
// The DOMMediaStream we add tracks to and represent.
const WeakPtr<DOMMediaStream> mDOMStream;
private:
// Tracks that have been added and not yet removed.
nsTArray<WeakPtr<dom::MediaStreamTrack>> mTracks;
};
class OutputStreamManager {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
public:
OutputStreamManager(SharedDummyTrack* aDummyStream, nsIPrincipal* aPrincipal,
AbstractThread* aAbstractMainThread);
// Add the output stream to the collection.
void Add(DOMMediaStream* aDOMStream);
// Remove the output stream from the collection.
void Remove(DOMMediaStream* aDOMStream);
// Returns true if there's a live track of the given type.
bool HasTrackType(MediaSegment::Type aType);
// Returns true if the given tracks are sourcing all currently live tracks.
// Use nullptr to make it ignored for that type.
bool HasTracks(SourceMediaTrack* aAudioStream,
SourceMediaTrack* aVideoStream);
// Gets the underlying track for the given type if it has never been played,
// or nullptr if there is none.
SourceMediaTrack* GetPrecreatedTrackOfType(MediaSegment::Type aType) const;
// Returns the number of live tracks.
size_t NumberOfTracks();
// Add a track sourced to all output tracks and return the MediaTrack that
// sources it.
already_AddRefed<SourceMediaTrack> AddTrack(MediaSegment::Type aType);
// Remove all currently live tracks.
void RemoveTracks();
// Remove all currently live tracks and all output streams.
void Disconnect();
// The principal handle for the underlying decoder.
AbstractCanonical<PrincipalHandle>* CanonicalPrincipalHandle();
// Called when the underlying decoder's principal has changed.
void SetPrincipal(nsIPrincipal* aPrincipal);
// Called by DecodedStream when its playing state changes. While not playing
// we suspend mSourceTrack.
void SetPlaying(bool aPlaying);
// Return true if the collection of output streams is empty.
bool IsEmpty() const {
MOZ_ASSERT(NS_IsMainThread());
return mStreams.IsEmpty();
}
const RefPtr<AbstractThread> mAbstractMainThread;
private:
~OutputStreamManager();
class LiveTrack {
public:
LiveTrack(SourceMediaTrack* aSourceTrack, MediaSegment::Type aType);
~LiveTrack();
const RefPtr<SourceMediaTrack> mSourceTrack;
const MediaSegment::Type mType;
bool mEverPlayed = false;
};
struct StreamComparator {
static bool Equals(const UniquePtr<OutputStreamData>& aData,
DOMMediaStream* aStream) {
return aData->mDOMStream == aStream;
}
};
struct TrackStreamComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
SourceMediaTrack* aTrack) {
return aLiveTrack->mSourceTrack == aTrack;
}
};
struct TrackTypeComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
MediaSegment::Type aType) {
return aLiveTrack->mType == aType;
}
};
struct PrecreatedTrackTypeComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
MediaSegment::Type aType) {
return !aLiveTrack->mEverPlayed && aLiveTrack->mType == aType;
}
};
struct TrackComparator {
static bool Equals(
const UniquePtr<LiveTrack>& aLiveTrack,
const Pair<SourceMediaTrack*, MediaSegment::Type>& aOther) {
return aLiveTrack->mSourceTrack == aOther.first() &&
aLiveTrack->mType == aOther.second();
}
};
// Goes through mStreams and removes any entries that have been destroyed.
void AutoRemoveDestroyedStreams();
// Remove tracks sourced from aTrack from all output tracks.
void RemoveTrack(SourceMediaTrack* aTrack);
const RefPtr<SharedDummyTrack> mDummyStream;
nsTArray<UniquePtr<OutputStreamData>> mStreams;
nsTArray<UniquePtr<LiveTrack>> mLiveTracks;
Canonical<PrincipalHandle> mPrincipalHandle;
bool mPlaying = false;
};
} // namespace mozilla
#endif // OutputStreamManager_h

View File

@ -156,6 +156,18 @@ VideoSink::~VideoSink() {
#endif #endif
} }
const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
AssertOwnerThread();
return mAudioSink->GetPlaybackParams();
}
void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mAudioSink->SetPlaybackParams(aParams);
}
RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) { RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
AssertOwnerThread(); AssertOwnerThread();
MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts."); MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
@ -211,12 +223,6 @@ void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
mAudioSink->SetPreservesPitch(aPreservesPitch); mAudioSink->SetPreservesPitch(aPreservesPitch);
} }
double VideoSink::PlaybackRate() const {
AssertOwnerThread();
return mAudioSink->PlaybackRate();
}
void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() { void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
#ifdef XP_WIN #ifdef XP_WIN
const bool needed = IsPlaying(); const bool needed = IsPlaying();
@ -434,8 +440,8 @@ void VideoSink::TryUpdateRenderedVideoFrames() {
// If we send this future frame to the compositor now, it will be rendered // If we send this future frame to the compositor now, it will be rendered
// immediately and break A/V sync. Instead, we schedule a timer to send it // immediately and break A/V sync. Instead, we schedule a timer to send it
// later. // later.
int64_t delta = int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
(v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate(); mAudioSink->GetPlaybackParams().mPlaybackRate;
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta); TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
RefPtr<VideoSink> self = this; RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure( mUpdateScheduler.Ensure(
@ -475,7 +481,7 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
AutoTArray<ImageContainer::NonOwningImage, 16> images; AutoTArray<ImageContainer::NonOwningImage, 16> images;
TimeStamp lastFrameTime; TimeStamp lastFrameTime;
double playbackRate = mAudioSink->PlaybackRate(); MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
for (uint32_t i = 0; i < frames.Length(); ++i) { for (uint32_t i = 0; i < frames.Length(); ++i) {
VideoData* frame = frames[i]; VideoData* frame = frames[i];
bool wasSent = frame->IsSentToCompositor(); bool wasSent = frame->IsSentToCompositor();
@ -493,8 +499,8 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
MOZ_ASSERT(!aClockTimeStamp.IsNull()); MOZ_ASSERT(!aClockTimeStamp.IsNull());
int64_t delta = frame->mTime.ToMicroseconds() - aClockTime; int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
TimeStamp t = TimeStamp t = aClockTimeStamp +
aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate); TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
if (!lastFrameTime.IsNull() && t <= lastFrameTime) { if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
// Timestamps out of order; drop the new frame. In theory we should // Timestamps out of order; drop the new frame. In theory we should
// probably replace the previous frame with the new frame if the // probably replace the previous frame with the new frame if the
@ -607,8 +613,9 @@ void VideoSink::UpdateRenderedVideoFrames() {
int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds(); int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(), int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
MIN_UPDATE_INTERVAL_US); MIN_UPDATE_INTERVAL_US);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds( TimeStamp target =
delta / mAudioSink->PlaybackRate()); nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
RefPtr<VideoSink> self = this; RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure( mUpdateScheduler.Ensure(
@ -640,7 +647,7 @@ void VideoSink::MaybeResolveEndPromise() {
"end promise. clockTime=%" PRId64 ", endTime=%" PRId64, "end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds()); clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() / int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
mAudioSink->PlaybackRate(); mAudioSink->GetPlaybackParams().mPlaybackRate;
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta); TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() { auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
self->mEndPromiseHolder.ResolveIfExists(true, __func__); self->mEndPromiseHolder.ResolveIfExists(true, __func__);

View File

@ -32,6 +32,10 @@ class VideoSink : public MediaSink {
MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer, MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize); FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override; RefPtr<EndedPromise> OnEnded(TrackType aType) override;
TimeUnit GetEndTime(TrackType aType) const override; TimeUnit GetEndTime(TrackType aType) const override;
@ -48,8 +52,6 @@ class VideoSink : public MediaSink {
void SetPlaying(bool aPlaying) override; void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
void Redraw(const VideoInfo& aInfo) override; void Redraw(const VideoInfo& aInfo) override;
nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override; nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;

View File

@ -8,6 +8,7 @@ UNIFIED_SOURCES += [
'AudioSink.cpp', 'AudioSink.cpp',
'AudioSinkWrapper.cpp', 'AudioSinkWrapper.cpp',
'DecodedStream.cpp', 'DecodedStream.cpp',
'OutputStreamManager.cpp',
'VideoSink.cpp', 'VideoSink.cpp',
] ]

View File

@ -24,11 +24,11 @@ mozilla::LogModule* GetSourceBufferResourceLog() {
namespace mozilla { namespace mozilla {
RefPtr<GenericPromise> SourceBufferResource::Close() { nsresult SourceBufferResource::Close() {
MOZ_ASSERT(OnThread()); MOZ_ASSERT(OnThread());
SBR_DEBUG("Close"); SBR_DEBUG("Close");
mClosed = true; mClosed = true;
return GenericPromise::CreateAndResolve(true, __func__); return NS_OK;
} }
nsresult SourceBufferResource::ReadAt(int64_t aOffset, char* aBuffer, nsresult SourceBufferResource::ReadAt(int64_t aOffset, char* aBuffer,

View File

@ -36,7 +36,7 @@ class SourceBufferResource final
public DecoderDoctorLifeLogger<SourceBufferResource> { public DecoderDoctorLifeLogger<SourceBufferResource> {
public: public:
SourceBufferResource(); SourceBufferResource();
RefPtr<GenericPromise> Close() override; nsresult Close() override;
nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount, nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
uint32_t* aBytes) override; uint32_t* aBytes) override;
// Memory-based and no locks, caching discouraged. // Memory-based and no locks, caching discouraged.

View File

@ -10,19 +10,19 @@
<pre id="test"> <pre id="test">
<script class="testbody" type="text/javascript"> <script class="testbody" type="text/javascript">
const manager = new MediaTestManager; var manager = new MediaTestManager;
function startTest(test, token) { function startTest(test, token) {
const elemType = getMajorMimeType(test.type); var elemType = getMajorMimeType(test.type);
const element = document.createElement(elemType); var element = document.createElement(elemType);
let audioOnchange = 0; var audioOnchange = 0;
let audioOnaddtrack = 0; var audioOnaddtrack = 0;
let audioOnremovetrack = 0; var audioOnremovetrack = 0;
let videoOnchange = 0; var videoOnchange = 0;
let videoOnaddtrack = 0; var videoOnaddtrack = 0;
let videoOnremovetrack = 0; var videoOnremovetrack = 0;
let isPlaying = false; var isPlaying = false;
isnot(element.audioTracks, undefined, isnot(element.audioTracks, undefined,
'HTMLMediaElement::AudioTracks() property should be available.'); 'HTMLMediaElement::AudioTracks() property should be available.');
@ -53,43 +53,26 @@ function startTest(test, token) {
videoOnchange++; videoOnchange++;
} }
function checkTrackNotRemoved() {
is(audioOnremovetrack, 0, 'Should have no calls of onremovetrack on audioTracks.');
is(videoOnremovetrack, 0, 'Should have no calls of onremovetrack on videoTracks.');
if (isPlaying) {
is(element.audioTracks.length, test.hasAudio ? 1 : 0,
'Expected length of audioTracks.');
is(element.videoTracks.length, test.hasVideo ? 1 : 0,
'Expected length of videoTracks.');
}
}
function checkTrackRemoved() { function checkTrackRemoved() {
is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
if (isPlaying) { if (isPlaying) {
is(audioOnremovetrack, test.hasAudio ? 1 : 0, if (test.hasAudio) {
'Expected calls of onremovetrack on audioTracks.'); is(audioOnremovetrack, 1, 'Calls of onremovetrack on audioTracks should be 1.');
is(videoOnremovetrack, test.hasVideo ? 1 : 0, is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
'Expected calls of onremovetrack on videoTracks.'); }
if (test.hasVideo) {
is(videoOnremovetrack, 1, 'Calls of onremovetrack on videoTracks should be 1.');
is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
}
} }
} }
function onended() { function onended() {
ok(true, 'Event ended is expected to be fired on element.'); ok(true, 'Event ended is expected to be fired on element.');
checkTrackNotRemoved(); checkTrackRemoved();
element.onended = null; element.onended = null;
element.onplaying = null; element.onplaying = null;
element.onpause = null; element.onpause = null;
element.src = ""; manager.finished(element.token);
is(element.audioTracks.length, 0, 'audioTracks have been forgotten');
is(element.videoTracks.length, 0, 'videoTracks have been forgotten');
is(audioOnremovetrack, 0, 'No audio removetrack events yet');
is(videoOnremovetrack, 0, 'No video removetrack events yet');
setTimeout(() => {
checkTrackRemoved();
manager.finished(element.token);
}, 100);
} }
function checkTrackAdded() { function checkTrackAdded() {

View File

@ -10,29 +10,28 @@
<pre id="test"> <pre id="test">
<script class="testbody" type="text/javascript"> <script class="testbody" type="text/javascript">
const manager = new MediaTestManager; var manager = new MediaTestManager;
function startTest(test, token) { function startTest(test, token) {
// Scenario to test: // Scenario to test:
// 1. Audio tracks and video tracks should be added to the track list when // 1. Audio tracks and video tracks should be added to the track list when
// metadata has loaded, and all tracks should remain even after we seek to // playing, and all tracks should be removed from the list after we seek
// the end. // to the end.
// 2. No tracks should be added back to the list if we replay from the end, // 2. All tracks should be added back to the list if we replay from the end,
// and no tracks should be removed from the list after we seek to the end. // and all tracks should be removed from the list after we seek to the end.
// 3. After seek to the middle from end of playback, all tracks should remain // 3. After seek to the middle from end of playback, all tracks should be
// in the list if we play from here, and no tracks should be removed from // added back to the list if we play from here, and all tracks should be
// the list after we seek to the end. // removed from the list after we seek to the end.
// 4. Unsetting the media element's src attribute should remove all tracks.
const elemType = getMajorMimeType(test.type); var elemType = getMajorMimeType(test.type);
const element = document.createElement(elemType); var element = document.createElement(elemType);
let audioOnaddtrack = 0; var audioOnaddtrack = 0;
let audioOnremovetrack = 0; var audioOnremovetrack = 0;
let videoOnaddtrack = 0; var videoOnaddtrack = 0;
let videoOnremovetrack = 0; var videoOnremovetrack = 0;
let isPlaying = false; var isPlaying = false;
let steps = 0; var steps = 0;
element.audioTracks.onaddtrack = function(e) { element.audioTracks.onaddtrack = function(e) {
audioOnaddtrack++; audioOnaddtrack++;
@ -50,23 +49,16 @@ function startTest(test, token) {
videoOnremovetrack++; videoOnremovetrack++;
} }
function testExpectedAddtrack(expectedCalls) { function testTrackEventCalls(expectedCalls) {
if (test.hasAudio) { if (test.hasAudio) {
is(audioOnaddtrack, expectedCalls, is(audioOnaddtrack, expectedCalls,
'Calls of onaddtrack on audioTracks should be '+expectedCalls+' times.'); 'Calls of onaddtrack on audioTracks should be '+expectedCalls+' times.');
}
if (test.hasVideo) {
is(videoOnaddtrack, expectedCalls,
'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
}
}
function testExpectedRemovetrack(expectedCalls) {
if (test.hasAudio) {
is(audioOnremovetrack, expectedCalls, is(audioOnremovetrack, expectedCalls,
'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.'); 'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.');
} }
if (test.hasVideo) { if (test.hasVideo) {
is(videoOnaddtrack, expectedCalls,
'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
is(videoOnremovetrack, expectedCalls, is(videoOnremovetrack, expectedCalls,
'Calls of onremovetrack on videoTracks should be '+expectedCalls+' times.'); 'Calls of onremovetrack on videoTracks should be '+expectedCalls+' times.');
} }
@ -84,29 +76,21 @@ function startTest(test, token) {
if (isPlaying) { if (isPlaying) {
switch(steps) { switch(steps) {
case 1: case 1:
testExpectedAddtrack(1); testTrackEventCalls(1);
testExpectedRemovetrack(0);
element.onplaying = onplaying; element.onplaying = onplaying;
element.play(); element.play();
steps++; steps++;
break; break;
case 2: case 2:
testExpectedAddtrack(1); testTrackEventCalls(2);
testExpectedRemovetrack(0);
element.currentTime = element.duration * 0.5; element.currentTime = element.duration * 0.5;
element.onplaying = onplaying; element.onplaying = onplaying;
element.play(); element.play();
steps++; steps++;
break; break;
case 3: case 3:
testExpectedAddtrack(1); testTrackEventCalls(3);
testExpectedRemovetrack(0); finishTesting();
element.src = "";
setTimeout(() => {
testExpectedAddtrack(1);
testExpectedRemovetrack(1);
finishTesting();
}, 0);
break; break;
} }
} else { } else {

View File

@ -1,39 +1,35 @@
<!DOCTYPE HTML> <!DOCTYPE HTML>
<html> <html>
<head> <head>
<title>Test that reloading and seeking in a media element that's being captured behaves as expected</title> <title>Test that reloading and seeking in a media element that's being captured doesn't crash</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script> <script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
<script src="manifest.js"></script> <script type="text/javascript" src="manifest.js"></script>
</head> </head>
<body> <body>
<video id="v"></video> <video id="v"></video>
<video id="vout"></video> <video id="vout"></video>
<video id="vout_untilended"></video> <video id="vout_untilended"></video>
<pre id="test"> <pre id="test">
<script> <script class="testbody" type="text/javascript">
const v = document.getElementById('v'); SimpleTest.waitForExplicitFinish();
const vout = document.getElementById('vout');
const vout_untilended = document.getElementById('vout_untilended'); var v = document.getElementById('v');
var vout = document.getElementById('vout');
var vout_untilended = document.getElementById('vout_untilended');
function dumpEvent(event) { function dumpEvent(event) {
const video = event.target; var video = event.target;
info( info(video.name + " GOT EVENT " + event.type +
`${video.name}:${video.id} GOT EVENT ${event.type} ` + " currentTime=" + video.currentTime +
`currentTime=${video.currentTime} paused=${video.paused} ` + " paused=" + video.paused +
`ended=${video.ended} readyState=${video.readyState}` " ended=" + video.ended +
); " readyState=" + video.readyState);
} }
function unexpected(event) { var events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
ok(false, `${event.type} event received on ${event.target.id} unexpectedly`); for (var i = 0; i < events.length; ++i) {
}; v.addEventListener(events[i], dumpEvent);
const events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
for (const e of events) {
v.addEventListener(e, dumpEvent);
vout.addEventListener(e, dumpEvent);
vout_untilended.addEventListener(e, dumpEvent);
} }
function isWithinEps(a, b, msg) { function isWithinEps(a, b, msg) {
@ -46,91 +42,92 @@ function isGreaterThanOrEqualEps(a, b, msg) {
"Got " + a + ", expected at least " + b + "; " + msg); "Got " + a + ", expected at least " + b + "; " + msg);
} }
async function startTest(test) { function startTest(test) {
const seekTime = test.duration/2; var seekTime = test.duration/2;
function endedAfterReplay() {
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at third 'ended' event");
isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration*2,
"checking vout.currentTime after seeking, playing through and reloading");
SimpleTest.finish();
};
function endedAfterSeek() {
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at second 'ended' event");
isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration,
"checking vout.currentTime after seeking and playing through again");
v.removeEventListener("ended", endedAfterSeek);
v.addEventListener("ended", endedAfterReplay);
v.src = test.name + "?1";
v.play();
};
function seeked() {
isGreaterThanOrEqualEps(v.currentTime, seekTime, "Finished seeking");
isGreaterThanOrEqualEps(vout.currentTime, test.duration,
"checking vout.currentTime has not changed after seeking");
v.removeEventListener("seeked", seeked);
function dontPlayAgain() {
ok(false, "vout_untilended should not play again");
}
vout_untilended.addEventListener("playing", dontPlayAgain);
vout_untilended.addEventListener("ended", dontPlayAgain);
v.addEventListener("ended", endedAfterSeek);
v.play();
};
function ended() {
// Don't compare current time until both v and vout_untilended are ended,
// otherwise, current time could be smaller than the duration.
if (!v.ended || !vout_untilended.ended) {
return;
}
isGreaterThanOrEqualEps(vout.currentTime, test.duration, "checking vout.currentTime at first 'ended' event");
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at first 'ended' event");
is(vout.ended, false, "checking vout has not ended");
is(vout_untilended.ended, true, "checking vout_untilended has actually ended");
v.removeEventListener("ended", ended);
vout_untilended.removeEventListener("ended", ended);
v.pause();
v.currentTime = seekTime;
v.addEventListener("seeked", seeked);
};
v.addEventListener("ended", ended);
vout_untilended.addEventListener("ended", ended);
function checkNoEnded() {
ok(false, "ended event received unexpectedly");
};
vout.addEventListener("ended", checkNoEnded);
v.src = test.name; v.src = test.name;
v.name = test.name; v.name = test.name;
vout.name = test.name;
vout_untilended.name = test.name;
v.preload = "metadata"; v.preload = "metadata";
await new Promise(r => v.onloadedmetadata = r);
vout.srcObject = v.mozCaptureStream(); function loadedmetadata() {
vout.play(); vout.srcObject = v.mozCaptureStream();
vout.play();
vout_untilended.srcObject = v.mozCaptureStreamUntilEnded(); vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
vout_untilended.play(); vout_untilended.play();
v.play(); v.play();
};
await new Promise(r => v.onended = r); v.addEventListener("loadedmetadata", loadedmetadata, {once: true});
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at first 'ended' event");
await Promise.all([
new Promise(r => vout.onended = r),
new Promise(r => vout_untilended.onended = r),
]);
isGreaterThanOrEqualEps(vout.currentTime, test.duration,
"checking vout.currentTime at first 'ended' event");
ok(vout.ended, "checking vout has actually ended");
ok(vout_untilended.ended, "checking vout_untilended has actually ended");
vout_untilended.srcObject.onaddtrack = unexpected;
vout_untilended.onplaying = unexpected;
vout_untilended.onended = unexpected;
const voutPreSeekCurrentTime = vout.currentTime;
v.currentTime = seekTime;
await new Promise(r => v.onseeked = r);
is(v.currentTime, seekTime, "Finished seeking");
is(vout.currentTime, voutPreSeekCurrentTime,
"checking vout.currentTime has not changed after seeking");
v.play();
vout.play();
await new Promise(r => v.onended = r);
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at second 'ended' event");
await new Promise(r => vout.onended = r);
isGreaterThanOrEqualEps(vout.currentTime,
(test.duration - seekTime) + test.duration,
"checking vout.currentTime after seeking and playing through again");
v.src = test.name + "?1";
v.play();
vout.play();
await new Promise(r => v.onended = r);
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at third 'ended' event");
await new Promise(r => vout.onended = r);
isGreaterThanOrEqualEps(vout.currentTime,
(test.duration - seekTime) + test.duration*2,
"checking vout.currentTime after seeking, playing through and reloading");
} }
(async () => { var testVideo = getPlayableVideo(gSmallTests);
SimpleTest.waitForExplicitFinish(); if (testVideo) {
try { startTest(testVideo);
const testVideo = getPlayableVideo(gSmallTests); } else {
if (testVideo) { todo(false, "No playable video");
await startTest(testVideo); }
} else {
todo(false, "No playable video");
}
} catch(e) {
ok(false, `Error: ${e}`);
} finally {
SimpleTest.finish();
}
})();
</script> </script>
</pre> </pre>
</body> </body>

View File

@ -9,7 +9,6 @@
#include "AudioDestinationNode.h" #include "AudioDestinationNode.h"
#include "nsIScriptError.h" #include "nsIScriptError.h"
#include "AudioNodeTrack.h" #include "AudioNodeTrack.h"
#include "MediaStreamTrack.h"
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {

View File

@ -76,8 +76,8 @@ void MediaStreamAudioSourceNode::Init(DOMMediaStream* aMediaStream,
mInputStream->AddConsumerToKeepAlive(ToSupports(this)); mInputStream->AddConsumerToKeepAlive(ToSupports(this));
mInputStream->RegisterTrackListener(this); mInputStream->RegisterTrackListener(this);
if (mInputStream->Audible()) { if (mInputStream->Active()) {
NotifyAudible(); NotifyActive();
} }
AttachToRightTrack(mInputStream, aRv); AttachToRightTrack(mInputStream, aRv);
} }
@ -119,7 +119,6 @@ void MediaStreamAudioSourceNode::AttachToTrack(
mInputPort = mInputTrack->ForwardTrackContentsTo(outputTrack); mInputPort = mInputTrack->ForwardTrackContentsTo(outputTrack);
PrincipalChanged(mInputTrack); // trigger enabling/disabling of the connector PrincipalChanged(mInputTrack); // trigger enabling/disabling of the connector
mInputTrack->AddPrincipalChangeObserver(this); mInputTrack->AddPrincipalChangeObserver(this);
MarkActive();
} }
void MediaStreamAudioSourceNode::DetachFromTrack() { void MediaStreamAudioSourceNode::DetachFromTrack() {
@ -166,6 +165,7 @@ void MediaStreamAudioSourceNode::AttachToRightTrack(
if (!track->Ended()) { if (!track->Ended()) {
AttachToTrack(track, aRv); AttachToTrack(track, aRv);
MarkActive();
} }
return; return;
} }
@ -202,7 +202,7 @@ void MediaStreamAudioSourceNode::NotifyTrackRemoved(
} }
} }
void MediaStreamAudioSourceNode::NotifyAudible() { void MediaStreamAudioSourceNode::NotifyActive() {
MOZ_ASSERT(mInputStream); MOZ_ASSERT(mInputStream);
Context()->StartBlockedAudioContextIfAllowed(); Context()->StartBlockedAudioContextIfAllowed();
} }

View File

@ -91,7 +91,7 @@ class MediaStreamAudioSourceNode
// From DOMMediaStream::TrackListener. // From DOMMediaStream::TrackListener.
void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override; void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override; void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
void NotifyAudible() override; void NotifyActive() override;
// From PrincipalChangeObserver<MediaStreamTrack>. // From PrincipalChangeObserver<MediaStreamTrack>.
void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override; void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;

View File

@ -0,0 +1,8 @@
[mediaElementAudioSourceToScriptProcessorTest.html]
disabled:
if (os == "mac") and (version == "OS X 10.14"): new platform
if (os == "android") and debug: https://bugzilla.mozilla.org/show_bug.cgi?id=1546756
[All data processed correctly]
expected:
if processor == "aarch64": ["PASS", "FAIL"]