Backed out 15 changesets (bug 1500049, bug 1172394, bug 1546756, bug 1302379) for failures on browser_disabledForMediaStreamVideos.js. CLOSED TREE

Backed out changeset 355f090421a6 (bug 1500049)
Backed out changeset 306341d0b586 (bug 1302379)
Backed out changeset 3ff0d72d23a2 (bug 1546756)
Backed out changeset a4f256e68cef (bug 1172394)
Backed out changeset d0aa43657e8c (bug 1172394)
Backed out changeset edff95b6f724 (bug 1172394)
Backed out changeset 94bd21d9b396 (bug 1172394)
Backed out changeset 7e7baa73e1ef (bug 1172394)
Backed out changeset c3bd415507e8 (bug 1172394)
Backed out changeset 1c45b135318d (bug 1172394)
Backed out changeset c57c41e8c39e (bug 1172394)
Backed out changeset a796541fe5ef (bug 1172394)
Backed out changeset 89ad0b553b0f (bug 1172394)
Backed out changeset 744fb77a5833 (bug 1172394)
Backed out changeset afb4b226ff04 (bug 1172394)
This commit is contained in:
Csoregi Natalia 2019-11-14 00:32:51 +02:00
parent 6db46a2b36
commit 6ba30843e8
42 changed files with 1586 additions and 1148 deletions

File diff suppressed because it is too large Load Diff

View File

@ -113,26 +113,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
typedef mozilla::MediaDecoderOwner MediaDecoderOwner;
typedef mozilla::MetadataTags MetadataTags;
// Helper struct to keep track of the MediaStreams returned by
// mozCaptureStream(). For each OutputMediaStream, dom::MediaTracks get
// captured into MediaStreamTracks which get added to
// OutputMediaStream::mStream.
struct OutputMediaStream {
OutputMediaStream(RefPtr<DOMMediaStream> aStream, bool aCapturingAudioOnly,
bool aFinishWhenEnded);
~OutputMediaStream();
RefPtr<DOMMediaStream> mStream;
const bool mCapturingAudioOnly;
const bool mFinishWhenEnded;
// If mFinishWhenEnded is true, this is the URI of the first resource
// mStream got tracks for, if not a MediaStream.
nsCOMPtr<nsIURI> mFinishWhenEndedLoadingSrc;
// If mFinishWhenEnded is true, this is the first MediaStream mStream got
// tracks for, if not a resource.
RefPtr<DOMMediaStream> mFinishWhenEndedAttrStream;
};
MOZ_DECLARE_WEAKREFERENCE_TYPENAME(HTMLMediaElement)
NS_DECL_NSIMUTATIONOBSERVER_CONTENTREMOVED
@ -271,9 +251,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
void DispatchAsyncEvent(const nsAString& aName) final;
// Triggers a recomputation of readyState.
void UpdateReadyState() override {
mWatchManager.ManualNotify(&HTMLMediaElement::UpdateReadyStateInternal);
}
void UpdateReadyState() override { UpdateReadyStateInternal(); }
// Dispatch events that were raised while in the bfcache
nsresult DispatchPendingMediaEvents();
@ -715,6 +693,10 @@ class HTMLMediaElement : public nsGenericHTMLElement,
Document* GetDocument() const override;
void ConstructMediaTracks(const MediaInfo* aInfo) override;
void RemoveMediaTracks() override;
already_AddRefed<GMPCrashHelper> CreateGMPCrashHelper() override;
nsISerialEventTarget* MainThreadEventTarget() {
@ -747,17 +729,37 @@ class HTMLMediaElement : public nsGenericHTMLElement,
class AudioChannelAgentCallback;
class ChannelLoader;
class ErrorSink;
class MediaElementTrackSource;
class MediaLoadListener;
class MediaStreamRenderer;
class MediaStreamTrackListener;
class FirstFrameListener;
class ShutdownObserver;
class StreamCaptureTrackSource;
MediaDecoderOwner::NextFrameStatus NextFrameStatus();
void SetDecoder(MediaDecoder* aDecoder);
// Holds references to the DOM wrappers for the MediaStreams that we're
// writing to.
struct OutputMediaStream {
OutputMediaStream();
~OutputMediaStream();
RefPtr<DOMMediaStream> mStream;
// Dummy stream to keep mGraph from shutting down when MediaDecoder shuts
// down. Shared across all OutputMediaStreams as one stream is enough to
// keep the graph alive.
RefPtr<SharedDummyTrack> mGraphKeepAliveDummyStream;
bool mFinishWhenEnded;
bool mCapturingAudioOnly;
bool mCapturingDecoder;
bool mCapturingMediaStream;
// The following members are keeping state for a captured MediaStream.
nsTArray<Pair<nsString, RefPtr<MediaStreamTrackSource>>> mTracks;
};
void PlayInternal(bool aHandlingUserInput);
/** Use this method to change the mReadyState member, so required
@ -852,35 +854,28 @@ class HTMLMediaElement : public nsGenericHTMLElement,
*/
void NotifyMediaStreamTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack);
/**
* Convenience method to get in a single list all enabled AudioTracks and, if
* this is a video element, the selected VideoTrack.
*/
void GetAllEnabledMediaTracks(nsTArray<RefPtr<MediaTrack>>& aTracks);
/**
* Enables or disables all tracks forwarded from mSrcStream to all
* OutputMediaStreams. We do this for muting the tracks when pausing,
* and unmuting when playing the media element again.
*
* If mSrcStream is unset, this does nothing.
*/
void SetCapturedOutputStreamsEnabled(bool aEnabled);
/**
* Create a new MediaStreamTrack for the TrackSource corresponding to aTrack
* and add it to the DOMMediaStream in aOutputStream. This automatically sets
* the output track to enabled or disabled depending on our current playing
* state.
* Create a new MediaStreamTrack for aTrack and add it to the DOMMediaStream
* in aOutputStream. This automatically sets the output track to enabled or
* disabled depending on our current playing state.
*/
enum class AddTrackMode { ASYNC, SYNC };
void AddOutputTrackSourceToOutputStream(
MediaElementTrackSource* aSource, OutputMediaStream& aOutputStream,
AddTrackMode aMode = AddTrackMode::ASYNC);
void AddCaptureMediaTrackToOutputStream(dom::MediaTrack* aTrack,
OutputMediaStream& aOutputStream,
bool aAsyncAddtrack = true);
/**
* Creates output track sources when this media element is captured, tracks
* exist, playback is not ended and readyState is >= HAVE_METADATA.
* Discard all output streams that are flagged to finish when playback ends.
*/
void UpdateOutputTrackSources();
void DiscardFinishWhenEndedOutputStreams();
/**
* Returns an DOMMediaStream containing the played contents of this
@ -894,8 +889,8 @@ class HTMLMediaElement : public nsGenericHTMLElement,
* reaching the stream. No video tracks will be captured in this case.
*/
already_AddRefed<DOMMediaStream> CaptureStreamInternal(
StreamCaptureBehavior aFinishBehavior,
StreamCaptureType aStreamCaptureType, MediaTrackGraph* aGraph);
StreamCaptureBehavior aBehavior, StreamCaptureType aType,
MediaTrackGraph* aGraph);
/**
* Initialize a decoder as a clone of an existing decoder in another
@ -1254,18 +1249,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// Pass information for deciding the video decode mode to decoder.
void NotifyDecoderActivityChanges() const;
// Constructs an AudioTrack in mAudioTrackList if aInfo reports that audio is
// available, and a VideoTrack in mVideoTrackList if aInfo reports that video
// is available.
void ConstructMediaTracks(const MediaInfo* aInfo);
// Removes all MediaTracks from mAudioTrackList and mVideoTrackList and fires
// "removetrack" on the lists accordingly.
// Note that by spec, this should not fire "removetrack". However, it appears
// other user agents do, per
// https://wpt.fyi/results/media-source/mediasource-avtracks.html.
void RemoveMediaTracks();
// Mark the decoder owned by the element as tainted so that the
// suspend-video-decoder is disabled.
void MarkAsTainted();
@ -1347,6 +1330,9 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// enabled audio tracks, while mSrcStream is set.
RefPtr<MediaStreamRenderer> mMediaStreamRenderer;
// True once mSrcStream's initial set of tracks are known.
bool mSrcStreamTracksAvailable = false;
// True once PlaybackEnded() is called and we're playing a MediaStream.
// Reset to false if we start playing mSrcStream again.
Watchable<bool> mSrcStreamPlaybackEnded = {
@ -1366,12 +1352,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// writing to.
nsTArray<OutputMediaStream> mOutputStreams;
// Mapping for output tracks, from dom::MediaTrack ids to the
// MediaElementTrackSource that represents the source of all corresponding
// MediaStreamTracks captured from this element.
nsRefPtrHashtable<nsStringHashKey, MediaElementTrackSource>
mOutputTrackSources;
// Holds a reference to the first-frame-getting track listener attached to
// mSelectedVideoStreamTrack.
RefPtr<FirstFrameListener> mFirstFrameListener;
@ -1563,7 +1543,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// Playback of the video is paused either due to calling the
// 'Pause' method, or playback not yet having started.
Watchable<bool> mPaused = {true, "HTMLMediaElement::mPaused"};
Watchable<bool> mPaused;
// The following two fields are here for the private storage of the builtin
// video controls, and control 'casting' of the video to external devices
@ -1573,14 +1553,6 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// True if currently casting this video
bool mIsCasting = false;
// Set while there are some OutputMediaStreams this media element's enabled
// and selected tracks are captured into. When set, all tracks are captured
// into the graph of this dummy track.
// NB: This is a SharedDummyTrack to allow non-default graphs (AudioContexts
// with an explicit sampleRate defined) to capture this element. When
// cross-graph tracks are supported, this can become a bool.
Watchable<RefPtr<SharedDummyTrack>> mTracksCaptured;
// True if the sound is being captured.
bool mAudioCaptured = false;
@ -1682,8 +1654,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
EncryptionInfo mPendingEncryptedInitData;
// True if the media's channel's download has been suspended.
Watchable<bool> mDownloadSuspendedByCache = {
false, "HTMLMediaElement::mDownloadSuspendedByCache"};
bool mDownloadSuspendedByCache = false;
// Disable the video playback by track selection. This flag might not be
// enough if we ever expand the ability of supporting multi-tracks video
@ -1821,8 +1792,7 @@ class HTMLMediaElement : public nsGenericHTMLElement,
bool mIsBlessed = false;
// True if the first frame has been successfully loaded.
Watchable<bool> mFirstFrameLoaded = {false,
"HTMLMediaElement::mFirstFrameLoaded"};
bool mFirstFrameLoaded = false;
// Media elements also have a default playback start position, which must
// initially be set to zero seconds. This time is used to allow the element to
@ -1837,6 +1807,10 @@ class HTMLMediaElement : public nsGenericHTMLElement,
// For use by mochitests. Enabling pref "media.test.video-suspend"
bool mForcedHidden = false;
// True if audio tracks and video tracks are constructed and added into the
// track list, false if all tracks are removed from the track list.
bool mMediaTracksConstructed = false;
Visibility mVisibilityState = Visibility::Untracked;
UniquePtr<ErrorSink> mErrorSink;

View File

@ -221,27 +221,13 @@ void ChannelMediaDecoder::Shutdown() {
mResourceCallback->Disconnect();
MediaDecoder::Shutdown();
// Force any outstanding seek and byterange requests to complete
// to prevent shutdown from deadlocking.
if (mResource) {
// Force any outstanding seek and byterange requests to complete
// to prevent shutdown from deadlocking.
mResourceClosePromise = mResource->Close();
mResource->Close();
}
}
void ChannelMediaDecoder::ShutdownInternal() {
if (!mResourceClosePromise) {
MediaShutdownManager::Instance().Unregister(this);
return;
}
mResourceClosePromise->Then(
AbstractMainThread(), __func__,
[self = RefPtr<ChannelMediaDecoder>(this)] {
MediaShutdownManager::Instance().Unregister(self);
});
return;
}
nsresult ChannelMediaDecoder::Load(nsIChannel* aChannel,
bool aIsPrivateBrowsing,
nsIStreamListener** aStreamListener) {

View File

@ -59,7 +59,6 @@ class ChannelMediaDecoder
};
protected:
void ShutdownInternal() override;
void OnPlaybackEvent(MediaPlaybackEvent&& aEvent) override;
void DurationChanged() override;
void MetadataLoaded(UniquePtr<MediaInfo> aInfo, UniquePtr<MetadataTags> aTags,
@ -157,10 +156,6 @@ class ChannelMediaDecoder
// True if we've been notified that the ChannelMediaResource has
// a principal.
bool mInitialChannelPrincipalKnown = false;
// Set in Shutdown() when we start closing mResource, if mResource is set.
// Must resolve before we unregister the shutdown blocker.
RefPtr<GenericPromise> mResourceClosePromise;
};
} // namespace mozilla

View File

@ -589,15 +589,15 @@ nsresult ChannelMediaResource::SetupChannelHeaders(int64_t aOffset) {
return NS_OK;
}
RefPtr<GenericPromise> ChannelMediaResource::Close() {
nsresult ChannelMediaResource::Close() {
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
if (!mClosed) {
CloseChannel();
mCacheStream.Close();
mClosed = true;
return mCacheStream.Close();
}
return GenericPromise::CreateAndResolve(true, __func__);
return NS_OK;
}
already_AddRefed<nsIPrincipal> ChannelMediaResource::GetCurrentPrincipal() {

View File

@ -117,7 +117,7 @@ class ChannelMediaResource
// Main thread
nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override;
nsresult Close() override;
void Suspend(bool aCloseImmediately) override;
void Resume() override;
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -148,9 +148,7 @@ nsresult CloneableWithRangeMediaResource::Open(
return NS_OK;
}
RefPtr<GenericPromise> CloneableWithRangeMediaResource::Close() {
return GenericPromise::CreateAndResolve(true, __func__);
}
nsresult CloneableWithRangeMediaResource::Close() { return NS_OK; }
already_AddRefed<nsIPrincipal>
CloneableWithRangeMediaResource::GetCurrentPrincipal() {

View File

@ -27,7 +27,7 @@ class CloneableWithRangeMediaResource : public BaseMediaResource {
// Main thread
nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override;
nsresult Close() override;
void Suspend(bool aCloseImmediately) override {}
void Resume() override {}
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -373,7 +373,6 @@ already_AddRefed<DOMMediaStream> DOMMediaStream::Clone() {
}
bool DOMMediaStream::Active() const { return mActive; }
bool DOMMediaStream::Audible() const { return mAudible; }
MediaStreamTrack* DOMMediaStream::GetTrackById(const nsAString& aId) const {
for (const auto& track : mTracks) {
@ -456,6 +455,20 @@ void DOMMediaStream::UnregisterTrackListener(TrackListener* aListener) {
mTrackListeners.RemoveElement(aListener);
}
void DOMMediaStream::SetFinishedOnInactive(bool aFinishedOnInactive) {
MOZ_ASSERT(NS_IsMainThread());
if (mFinishedOnInactive == aFinishedOnInactive) {
return;
}
mFinishedOnInactive = aFinishedOnInactive;
if (mFinishedOnInactive && !ContainsLiveTracks(mTracks)) {
NotifyTrackRemoved(nullptr);
}
}
void DOMMediaStream::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) {
MOZ_ASSERT(NS_IsMainThread());
@ -504,6 +517,10 @@ void DOMMediaStream::NotifyTrackRemoved(
}
}
if (!mFinishedOnInactive) {
return;
}
if (mAudible) {
// Check if we became inaudible.
if (!ContainsLiveAudioTracks(mTracks)) {

View File

@ -144,9 +144,6 @@ class DOMMediaStream : public DOMEventTargetHelper,
// NON-WebIDL
// Returns true if this stream contains a live audio track.
bool Audible() const;
/**
* Returns true if this DOMMediaStream has aTrack in mTracks.
*/
@ -189,6 +186,10 @@ class DOMMediaStream : public DOMEventTargetHelper,
// a dead pointer. Main thread only.
void UnregisterTrackListener(TrackListener* aListener);
// Tells this MediaStream whether it can go inactive as soon as no tracks
// are live anymore.
void SetFinishedOnInactive(bool aFinishedOnInactive);
protected:
virtual ~DOMMediaStream();
@ -239,6 +240,10 @@ class DOMMediaStream : public DOMEventTargetHelper,
// True if this stream has live audio tracks.
bool mAudible = false;
// For compatibility with mozCaptureStream, we in some cases do not go
// inactive until the MediaDecoder lets us. (Remove this in Bug 1302379)
bool mFinishedOnInactive = true;
};
NS_DEFINE_STATIC_IID_ACCESSOR(DOMMediaStream, NS_DOMMEDIASTREAM_IID)

View File

@ -95,7 +95,7 @@ nsresult FileMediaResource::Open(nsIStreamListener** aStreamListener) {
return NS_OK;
}
RefPtr<GenericPromise> FileMediaResource::Close() {
nsresult FileMediaResource::Close() {
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
// Since mChennel is only accessed by main thread, there is no necessary to
@ -105,7 +105,7 @@ RefPtr<GenericPromise> FileMediaResource::Close() {
mChannel = nullptr;
}
return GenericPromise::CreateAndResolve(true, __func__);
return NS_OK;
}
already_AddRefed<nsIPrincipal> FileMediaResource::GetCurrentPrincipal() {

View File

@ -23,7 +23,7 @@ class FileMediaResource : public BaseMediaResource {
// Main thread
nsresult Open(nsIStreamListener** aStreamListener) override;
RefPtr<GenericPromise> Close() override;
nsresult Close() override;
void Suspend(bool aCloseImmediately) override {}
void Resume() override {}
already_AddRefed<nsIPrincipal> GetCurrentPrincipal() override;

View File

@ -161,7 +161,7 @@ class MediaCache {
// file backing will be provided.
static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength);
nsISerialEventTarget* OwnerThread() const { return sThread; }
nsIEventTarget* OwnerThread() const { return sThread; }
// Brutally flush the cache contents. Main thread only.
void Flush();
@ -2196,18 +2196,17 @@ bool MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock) {
return true;
}
RefPtr<GenericPromise> MediaCacheStream::Close() {
void MediaCacheStream::Close() {
MOZ_ASSERT(NS_IsMainThread());
if (!mMediaCache) {
return GenericPromise::CreateAndResolve(true, __func__);
return;
}
return InvokeAsync(OwnerThread(), "MediaCacheStream::Close",
[this, client = RefPtr<ChannelMediaResource>(mClient)] {
AutoLock lock(mMediaCache->Monitor());
CloseInternal(lock);
return GenericPromise::CreateAndResolve(true, __func__);
});
OwnerThread()->Dispatch(NS_NewRunnableFunction(
"MediaCacheStream::Close",
[this, client = RefPtr<ChannelMediaResource>(mClient)]() {
AutoLock lock(mMediaCache->Monitor());
CloseInternal(lock);
}));
}
void MediaCacheStream::CloseInternal(AutoLock& aLock) {
@ -2735,7 +2734,7 @@ void MediaCacheStream::InitAsCloneInternal(MediaCacheStream* aOriginal) {
lock.NotifyAll();
}
nsISerialEventTarget* MediaCacheStream::OwnerThread() const {
nsIEventTarget* MediaCacheStream::OwnerThread() const {
return mMediaCache->OwnerThread();
}

View File

@ -217,12 +217,12 @@ class MediaCacheStream : public DecoderDoctorLifeLogger<MediaCacheStream> {
// on this class.
void InitAsClone(MediaCacheStream* aOriginal);
nsISerialEventTarget* OwnerThread() const;
nsIEventTarget* OwnerThread() const;
// These are called on the main thread.
// This must be called (and resolve) before the ChannelMediaResource
// This must be called (and return) before the ChannelMediaResource
// used to create this MediaCacheStream is deleted.
RefPtr<GenericPromise> Close();
void Close();
// This returns true when the stream has been closed.
bool IsClosed(AutoLock&) const { return mClosed; }
// Returns true when this stream is can be shared by a new resource load.

View File

@ -6,7 +6,6 @@
#include "MediaDecoder.h"
#include "AudioDeviceInfo.h"
#include "DOMMediaStream.h"
#include "DecoderBenchmark.h"
#include "ImageContainer.h"
@ -226,46 +225,36 @@ void MediaDecoder::SetVolume(double aVolume) {
mVolume = aVolume;
}
RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSinkDevice) {
RefPtr<GenericPromise> MediaDecoder::SetSink(AudioDeviceInfo* aSink) {
MOZ_ASSERT(NS_IsMainThread());
AbstractThread::AutoEnter context(AbstractMainThread());
mSinkDevice = aSinkDevice;
return GetStateMachine()->InvokeSetSink(aSinkDevice);
return GetStateMachine()->InvokeSetSink(aSink);
}
void MediaDecoder::SetOutputCaptured(bool aCaptured) {
void MediaDecoder::AddOutputStream(DOMMediaStream* aStream,
SharedDummyTrack* aDummyStream) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
mOutputCaptured = aCaptured;
}
void MediaDecoder::AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
tracks.AppendElement(std::move(aTrack));
mOutputTracks = tracks;
}
void MediaDecoder::RemoveOutputTrack(
const RefPtr<ProcessedMediaTrack>& aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
nsTArray<RefPtr<ProcessedMediaTrack>> tracks = mOutputTracks;
if (tracks.RemoveElement(aTrack)) {
mOutputTracks = tracks;
mDecoderStateMachine->EnsureOutputStreamManager(aDummyStream);
if (mInfo) {
mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
}
mDecoderStateMachine->AddOutputStream(aStream);
}
void MediaDecoder::SetOutputTracksPrincipal(
const RefPtr<nsIPrincipal>& aPrincipal) {
void MediaDecoder::RemoveOutputStream(DOMMediaStream* aStream) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
mOutputPrincipal = MakePrincipalHandle(aPrincipal);
mDecoderStateMachine->RemoveOutputStream(aStream);
}
void MediaDecoder::SetOutputStreamPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
AbstractThread::AutoEnter context(AbstractMainThread());
mDecoderStateMachine->SetOutputStreamPrincipal(aPrincipal);
}
double MediaDecoder::GetDuration() {
@ -311,10 +300,6 @@ MediaDecoder::MediaDecoder(MediaDecoderInit& aInit)
INIT_CANONICAL(mVolume, aInit.mVolume),
INIT_CANONICAL(mPreservesPitch, aInit.mPreservesPitch),
INIT_CANONICAL(mLooping, aInit.mLooping),
INIT_CANONICAL(mSinkDevice, nullptr),
INIT_CANONICAL(mOutputCaptured, false),
INIT_CANONICAL(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_CANONICAL(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mPlayState, PLAY_STATE_LOADING),
mSameOriginMedia(false),
mVideoDecodingOberver(
@ -390,11 +375,14 @@ void MediaDecoder::Shutdown() {
nsCOMPtr<nsIRunnable> r =
NS_NewRunnableFunction("MediaDecoder::Shutdown", [self]() {
self->mVideoFrameContainer = nullptr;
self->ShutdownInternal();
MediaShutdownManager::Instance().Unregister(self);
});
mAbstractMainThread->Dispatch(r.forget());
}
// Ask the owner to remove its audio/video tracks.
GetOwner()->RemoveMediaTracks();
ChangeState(PLAY_STATE_SHUTDOWN);
mVideoDecodingOberver->UnregisterEvent();
mVideoDecodingOberver = nullptr;
@ -531,16 +519,11 @@ void MediaDecoder::OnStoreDecoderBenchmark(const VideoInfo& aInfo) {
}
}
void MediaDecoder::ShutdownInternal() {
MOZ_ASSERT(NS_IsMainThread());
MediaShutdownManager::Instance().Unregister(this);
}
void MediaDecoder::FinishShutdown() {
MOZ_ASSERT(NS_IsMainThread());
SetStateMachine(nullptr);
mVideoFrameContainer = nullptr;
ShutdownInternal();
MediaShutdownManager::Instance().Unregister(this);
}
nsresult MediaDecoder::InitializeStateMachine() {
@ -659,6 +642,7 @@ double MediaDecoder::GetCurrentTime() {
void MediaDecoder::OnMetadataUpdate(TimedMetadata&& aMetadata) {
MOZ_ASSERT(NS_IsMainThread());
AbstractThread::AutoEnter context(AbstractMainThread());
GetOwner()->RemoveMediaTracks();
MetadataLoaded(MakeUnique<MediaInfo>(*aMetadata.mInfo),
UniquePtr<MetadataTags>(std::move(aMetadata.mTags)),
MediaDecoderEventVisibility::Observable);
@ -681,6 +665,8 @@ void MediaDecoder::MetadataLoaded(
mMediaSeekableOnlyInBufferedRanges =
aInfo->mMediaSeekableOnlyInBufferedRanges;
mInfo = aInfo.release();
GetOwner()->ConstructMediaTracks(mInfo);
mDecoderStateMachine->EnsureOutputStreamManagerHasTracks(*mInfo);
// Make sure the element and the frame (if any) are told about
// our new size.
@ -871,6 +857,12 @@ void MediaDecoder::ChangeState(PlayState aState) {
DDLOG(DDLogCategory::Property, "play_state", ToPlayStateStr(aState));
}
mPlayState = aState;
if (mPlayState == PLAY_STATE_PLAYING) {
GetOwner()->ConstructMediaTracks(mInfo);
} else if (IsEnded()) {
GetOwner()->RemoveMediaTracks();
}
}
bool MediaDecoder::IsLoopingBack(double aPrevPos, double aCurPos) const {

View File

@ -43,12 +43,12 @@ class MediaMemoryInfo;
class AbstractThread;
class DOMMediaStream;
class DecoderBenchmark;
class ProcessedMediaTrack;
class FrameStatistics;
class VideoFrameContainer;
class MediaFormatReader;
class MediaDecoderStateMachine;
struct MediaPlaybackEvent;
struct SharedDummyTrack;
enum class Visibility : uint8_t;
@ -155,7 +155,7 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void SetLooping(bool aLooping);
// Set the given device as the output device.
RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSinkDevice);
RefPtr<GenericPromise> SetSink(AudioDeviceInfo* aSink);
bool GetMinimizePreroll() const { return mMinimizePreroll; }
@ -166,23 +166,15 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
// replaying after the input as ended. In the latter case, the new source is
// not connected to streams created by captureStreamUntilEnded.
// Turn output capturing of this decoder on or off. If it is on, the
// MediaDecoderStateMachine's media sink will only play after output tracks
// have been set. This is to ensure that it doesn't skip over any data
// while the owner has intended to capture the full output, thus missing to
// capture some of it. The owner of the MediaDecoder is responsible for adding
// output tracks in a timely fashion while the output is captured.
void SetOutputCaptured(bool aCaptured);
// Add an output track. All decoder output for the track's media type will be
// sent to the track.
// Note that only one audio track and one video track is supported by
// MediaDecoder at this time. Passing in more of one type, or passing in a
// type that metadata says we are not decoding, is an error.
void AddOutputTrack(RefPtr<ProcessedMediaTrack> aTrack);
// Remove an output track added with AddOutputTrack.
void RemoveOutputTrack(const RefPtr<ProcessedMediaTrack>& aTrack);
// Update the principal for any output tracks.
void SetOutputTracksPrincipal(const RefPtr<nsIPrincipal>& aPrincipal);
// Add an output stream. All decoder output will be sent to the stream.
// The stream is initially blocked. The decoder is responsible for unblocking
// it while it is playing back.
void AddOutputStream(DOMMediaStream* aStream, SharedDummyTrack* aDummyStream);
// Remove an output stream added with AddOutputStream.
void RemoveOutputStream(DOMMediaStream* aStream);
// Update the principal for any output streams and their tracks.
void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
// Return the duration of the video in seconds.
virtual double GetDuration();
@ -403,11 +395,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
void SetStateMachineParameters();
// Called when MediaDecoder shutdown is finished. Subclasses use this to clean
// up internal structures, and unregister potential shutdown blockers when
// they're done.
virtual void ShutdownInternal();
bool IsShutdown() const;
// Called to notify the decoder that the duration has changed.
@ -619,20 +606,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
Canonical<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Canonical<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether this MediaDecoder's output is captured. When captured, all decoded
// data must be played out through mOutputTracks.
Canonical<bool> mOutputCaptured;
// Tracks that, if set, will get data routed through them.
Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
// PrincipalHandle to be used when feeding data into mOutputTracks.
Canonical<PrincipalHandle> mOutputPrincipal;
// Media duration set explicitly by JS. At present, this is only ever present
// for MSE.
Maybe<double> mExplicitDuration;
@ -665,19 +638,6 @@ class MediaDecoder : public DecoderDoctorLifeLogger<MediaDecoder> {
return &mPreservesPitch;
}
AbstractCanonical<bool>* CanonicalLooping() { return &mLooping; }
AbstractCanonical<RefPtr<AudioDeviceInfo>>* CanonicalSinkDevice() {
return &mSinkDevice;
}
AbstractCanonical<bool>* CanonicalOutputCaptured() {
return &mOutputCaptured;
}
AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
CanonicalOutputTracks() {
return &mOutputTracks;
}
AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
return &mOutputPrincipal;
}
AbstractCanonical<PlayState>* CanonicalPlayState() { return &mPlayState; }
private:

View File

@ -139,6 +139,14 @@ class MediaDecoderOwner {
virtual void DispatchEncrypted(const nsTArray<uint8_t>& aInitData,
const nsAString& aInitDataType) = 0;
// Called by the media decoder to create audio/video tracks and add to its
// owner's track list.
virtual void ConstructMediaTracks(const MediaInfo* aInfo) = 0;
// Called by the media decoder to removes all audio/video tracks from its
// owner's track list.
virtual void RemoveMediaTracks() = 0;
// Notified by the decoder that a decryption key is required before emitting
// further output.
virtual void NotifyWaitingForKey() {}

View File

@ -11,6 +11,7 @@
#include "mediasink/AudioSink.h"
#include "mediasink/AudioSinkWrapper.h"
#include "mediasink/DecodedStream.h"
#include "mediasink/OutputStreamManager.h"
#include "mediasink/VideoSink.h"
#include "mozilla/Logging.h"
#include "mozilla/MathAlgorithms.h"
@ -2591,10 +2592,6 @@ RefPtr<ShutdownPromise> MediaDecoderStateMachine::ShutdownState::Enter() {
master->mVolume.DisconnectIfConnected();
master->mPreservesPitch.DisconnectIfConnected();
master->mLooping.DisconnectIfConnected();
master->mSinkDevice.DisconnectIfConnected();
master->mOutputCaptured.DisconnectIfConnected();
master->mOutputTracks.DisconnectIfConnected();
master->mOutputPrincipal.DisconnectIfConnected();
master->mDuration.DisconnectAll();
master->mCurrentPosition.DisconnectAll();
@ -2630,10 +2627,12 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
mReader(new ReaderProxy(mTaskQueue, aReader)),
mPlaybackRate(1.0),
mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD),
mAudioCaptured(false),
mMinimizePreroll(aDecoder->GetMinimizePreroll()),
mSentFirstFrameLoadedEvent(false),
mVideoDecodeSuspended(false),
mVideoDecodeSuspendTimer(mTaskQueue),
mOutputStreamManager(nullptr),
mVideoDecodeMode(VideoDecodeMode::Normal),
mIsMSE(aDecoder->IsMSE()),
mSeamlessLoopingAllowed(false),
@ -2642,16 +2641,10 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
INIT_MIRROR(mVolume, 1.0),
INIT_MIRROR(mPreservesPitch, true),
INIT_MIRROR(mLooping, false),
INIT_MIRROR(mSinkDevice, nullptr),
INIT_MIRROR(mOutputCaptured, false),
INIT_MIRROR(mOutputTracks, nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_MIRROR(mOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mCanonicalOutputTracks,
nsTArray<RefPtr<ProcessedMediaTrack>>()),
INIT_CANONICAL(mCanonicalOutputPrincipal, PRINCIPAL_HANDLE_NONE),
INIT_CANONICAL(mDuration, NullableTimeUnit()),
INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()),
INIT_CANONICAL(mIsAudioDataAudible, false) {
INIT_CANONICAL(mIsAudioDataAudible, false),
mSetSinkRequestsCount(0) {
MOZ_COUNT_CTOR(MediaDecoderStateMachine);
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
@ -2678,10 +2671,6 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
mVolume.Connect(aDecoder->CanonicalVolume());
mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch());
mLooping.Connect(aDecoder->CanonicalLooping());
mSinkDevice.Connect(aDecoder->CanonicalSinkDevice());
mOutputCaptured.Connect(aDecoder->CanonicalOutputCaptured());
mOutputTracks.Connect(aDecoder->CanonicalOutputTracks());
mOutputPrincipal.Connect(aDecoder->CanonicalOutputPrincipal());
// Initialize watchers.
mWatchManager.Watch(mBuffered,
@ -2691,16 +2680,6 @@ void MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) {
&MediaDecoderStateMachine::PreservesPitchChanged);
mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged);
mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged);
mWatchManager.Watch(mOutputCaptured,
&MediaDecoderStateMachine::UpdateOutputCaptured);
mWatchManager.Watch(mOutputTracks,
&MediaDecoderStateMachine::UpdateOutputCaptured);
mWatchManager.Watch(mOutputTracks,
&MediaDecoderStateMachine::OutputTracksChanged);
mWatchManager.Watch(mOutputPrincipal,
&MediaDecoderStateMachine::OutputPrincipalChanged);
mMediaSink = CreateMediaSink();
MOZ_ASSERT(!mStateObj);
auto* s = new DecodeMetadataState(this);
@ -2718,24 +2697,23 @@ MediaSink* MediaDecoderStateMachine::CreateAudioSink() {
MOZ_ASSERT(self->OnTaskQueue());
AudioSink* audioSink =
new AudioSink(self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
self->Info().mAudio, self->mSinkDevice.Ref());
self->Info().mAudio);
self->mAudibleListener = audioSink->AudibleEvent().Connect(
self->mTaskQueue, self.get(),
&MediaDecoderStateMachine::AudioAudibleChanged);
return audioSink;
};
return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator,
mVolume, mPlaybackRate, mPreservesPitch);
return new AudioSinkWrapper(mTaskQueue, mAudioQueue, audioSinkCreator);
}
already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink() {
MOZ_ASSERT(OnTaskQueue());
already_AddRefed<MediaSink> MediaDecoderStateMachine::CreateMediaSink(
bool aAudioCaptured, OutputStreamManager* aManager) {
MOZ_ASSERT_IF(aAudioCaptured, aManager);
RefPtr<MediaSink> audioSink =
mOutputCaptured
? new DecodedStream(this, mOutputTracks, mVolume, mPlaybackRate,
mPreservesPitch, mAudioQueue, mVideoQueue)
: CreateAudioSink();
aAudioCaptured ? new DecodedStream(mTaskQueue, mAbstractMainThread,
mAudioQueue, mVideoQueue, aManager)
: CreateAudioSink();
RefPtr<MediaSink> mediaSink =
new VideoSink(mTaskQueue, audioSink, mVideoQueue, mVideoFrameContainer,
@ -2825,6 +2803,8 @@ nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) {
mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect(
OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable);
mMediaSink = CreateMediaSink(mAudioCaptured, mOutputStreamManager);
nsresult rv = mReader->Init();
NS_ENSURE_SUCCESS(rv, rv);
@ -3360,6 +3340,9 @@ void MediaDecoderStateMachine::FinishDecodeFirstFrame() {
RefPtr<ShutdownPromise> MediaDecoderStateMachine::BeginShutdown() {
MOZ_ASSERT(NS_IsMainThread());
if (mOutputStreamManager) {
mOutputStreamManager->Disconnect();
}
return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::Shutdown);
}
@ -3448,7 +3431,7 @@ void MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() {
}
}
// Note we have to update playback position before releasing the monitor.
// Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
// Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside
// the monitor and get a staled value from GetCurrentTimeUs() which hits the
// assertion in GetClock().
@ -3534,75 +3517,47 @@ void MediaDecoderStateMachine::LoopingChanged() {
}
}
void MediaDecoderStateMachine::UpdateOutputCaptured() {
MOZ_ASSERT(OnTaskQueue());
// Reset these flags so they are consistent with the status of the sink.
// TODO: Move these flags into MediaSink to improve cohesion so we don't need
// to reset these flags when switching MediaSinks.
mAudioCompleted = false;
mVideoCompleted = false;
// Stop and shut down the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether output is captured.
mMediaSink = CreateMediaSink();
// Don't buffer as much when audio is captured because we don't need to worry
// about high latency audio devices.
mAmpleAudioThreshold = mOutputCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
: detail::AMPLE_AUDIO_THRESHOLD;
mStateObj->HandleAudioCaptured();
}
void MediaDecoderStateMachine::OutputTracksChanged() {
MOZ_ASSERT(OnTaskQueue());
LOG("OutputTracksChanged, tracks=%zu", mOutputTracks.Ref().Length());
mCanonicalOutputTracks = mOutputTracks;
}
void MediaDecoderStateMachine::OutputPrincipalChanged() {
MOZ_ASSERT(OnTaskQueue());
mCanonicalOutputPrincipal = mOutputPrincipal;
}
RefPtr<GenericPromise> MediaDecoderStateMachine::InvokeSetSink(
RefPtr<AudioDeviceInfo> aSink) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(aSink);
Unused << ++mSetSinkRequestsCount;
return InvokeAsync(OwnerThread(), this, __func__,
&MediaDecoderStateMachine::SetSink, aSink);
}
RefPtr<GenericPromise> MediaDecoderStateMachine::SetSink(
RefPtr<AudioDeviceInfo> aSinkDevice) {
RefPtr<AudioDeviceInfo> aSink) {
MOZ_ASSERT(OnTaskQueue());
if (mOutputCaptured) {
if (mAudioCaptured) {
// Not supported yet.
return GenericPromise::CreateAndReject(NS_ERROR_ABORT, __func__);
}
if (mSinkDevice.Ref() != aSinkDevice) {
// A new sink was set before this ran.
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
// Backup current playback parameters.
bool wasPlaying = mMediaSink->IsPlaying();
if (--mSetSinkRequestsCount > 0) {
MOZ_ASSERT(mSetSinkRequestsCount > 0);
return GenericPromise::CreateAndResolve(wasPlaying, __func__);
}
if (mMediaSink->AudioDevice() == aSinkDevice) {
// The sink has not changed.
return GenericPromise::CreateAndResolve(IsPlaying(), __func__);
}
MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
params.mSink = std::move(aSink);
const bool wasPlaying = IsPlaying();
if (!mMediaSink->IsStarted()) {
mMediaSink->SetPlaybackParams(params);
return GenericPromise::CreateAndResolve(false, __func__);
}
// Stop and shutdown the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether audio is captured.
mMediaSink = CreateMediaSink();
mMediaSink = CreateMediaSink(false);
// Restore playback parameters.
mMediaSink->SetPlaybackParams(params);
// Start the new sink
if (wasPlaying) {
nsresult rv = StartMediaSink();
@ -3701,6 +3656,43 @@ void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult) {
DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__));
}
void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured,
OutputStreamManager* aManager) {
MOZ_ASSERT(OnTaskQueue());
if (aCaptured == mAudioCaptured) {
return;
}
// Rest these flags so they are consistent with the status of the sink.
// TODO: Move these flags into MediaSink to improve cohesion so we don't need
// to reset these flags when switching MediaSinks.
mAudioCompleted = false;
mVideoCompleted = false;
// Backup current playback parameters.
MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams();
// Stop and shut down the existing sink.
StopMediaSink();
mMediaSink->Shutdown();
// Create a new sink according to whether audio is captured.
mMediaSink = CreateMediaSink(aCaptured, aManager);
// Restore playback parameters.
mMediaSink->SetPlaybackParams(params);
mAudioCaptured = aCaptured;
// Don't buffer as much when audio is captured because we don't need to worry
// about high latency audio devices.
mAmpleAudioThreshold = mAudioCaptured ? detail::AMPLE_AUDIO_THRESHOLD / 2
: detail::AMPLE_AUDIO_THRESHOLD;
mStateObj->HandleAudioCaptured();
}
uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
MOZ_ASSERT(OnTaskQueue());
return mReader->VideoIsHardwareAccelerated()
@ -3744,6 +3736,86 @@ RefPtr<GenericPromise> MediaDecoderStateMachine::RequestDebugInfo(
return p.forget();
}
void MediaDecoderStateMachine::SetOutputStreamPrincipal(
nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
mOutputStreamPrincipal = aPrincipal;
if (mOutputStreamManager) {
mOutputStreamManager->SetPrincipal(mOutputStreamPrincipal);
}
}
void MediaDecoderStateMachine::AddOutputStream(DOMMediaStream* aStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG("AddOutputStream aStream=%p!", aStream);
mOutputStreamManager->Add(aStream);
nsCOMPtr<nsIRunnable> r =
NS_NewRunnableFunction("MediaDecoderStateMachine::SetAudioCaptured",
[self = RefPtr<MediaDecoderStateMachine>(this),
manager = mOutputStreamManager]() {
self->SetAudioCaptured(true, manager);
});
nsresult rv = OwnerThread()->Dispatch(r.forget());
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void MediaDecoderStateMachine::RemoveOutputStream(DOMMediaStream* aStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG("RemoveOutputStream=%p!", aStream);
mOutputStreamManager->Remove(aStream);
if (mOutputStreamManager->IsEmpty()) {
mOutputStreamManager->Disconnect();
mOutputStreamManager = nullptr;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction(
"MediaDecoderStateMachine::SetAudioCaptured",
[self = RefPtr<MediaDecoderStateMachine>(this)]() {
self->SetAudioCaptured(false);
});
nsresult rv = OwnerThread()->Dispatch(r.forget());
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}
void MediaDecoderStateMachine::EnsureOutputStreamManager(
SharedDummyTrack* aDummyStream) {
MOZ_ASSERT(NS_IsMainThread());
if (mOutputStreamManager) {
return;
}
mOutputStreamManager = new OutputStreamManager(
aDummyStream, mOutputStreamPrincipal, mAbstractMainThread);
}
void MediaDecoderStateMachine::EnsureOutputStreamManagerHasTracks(
const MediaInfo& aLoadedInfo) {
MOZ_ASSERT(NS_IsMainThread());
if (!mOutputStreamManager) {
return;
}
if ((!aLoadedInfo.HasAudio() ||
mOutputStreamManager->HasTrackType(MediaSegment::AUDIO)) &&
(!aLoadedInfo.HasVideo() ||
mOutputStreamManager->HasTrackType(MediaSegment::VIDEO))) {
return;
}
if (aLoadedInfo.HasAudio()) {
MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
RefPtr<SourceMediaTrack> dummy =
mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
LOG("Pre-created audio track with underlying track %p", dummy.get());
Unused << dummy;
}
if (aLoadedInfo.HasVideo()) {
MOZ_ASSERT(!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
RefPtr<SourceMediaTrack> dummy =
mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
LOG("Pre-created video track with underlying track %p", dummy.get());
Unused << dummy;
}
}
class VideoQueueMemoryFunctor : public nsDequeFunctor {
public:
VideoQueueMemoryFunctor() : mSize(0) {}

View File

@ -106,6 +106,7 @@ class AbstractThread;
class AudioSegment;
class DecodedStream;
class DOMMediaStream;
class OutputStreamManager;
class ReaderProxy;
class TaskQueue;
@ -185,6 +186,19 @@ class MediaDecoderStateMachine
RefPtr<GenericPromise> RequestDebugInfo(
dom::MediaDecoderStateMachineDebugInfo& aInfo);
void SetOutputStreamPrincipal(nsIPrincipal* aPrincipal);
// If an OutputStreamManager does not exist, one will be created.
void EnsureOutputStreamManager(SharedDummyTrack* aDummyStream);
// If an OutputStreamManager exists, tracks matching aLoadedInfo will be
// created unless they already exist in the manager.
void EnsureOutputStreamManagerHasTracks(const MediaInfo& aLoadedInfo);
// Add an output stream to the output stream manager. The manager must have
// been created through EnsureOutputStreamManager() before this.
void AddOutputStream(DOMMediaStream* aStream);
// Remove an output stream added with AddOutputStream. If the last output
// stream was removed, we will also tear down the OutputStreamManager.
void RemoveOutputStream(DOMMediaStream* aStream);
// Seeks to the decoder to aTarget asynchronously.
RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget);
@ -302,6 +316,11 @@ class MediaDecoderStateMachine
// constructor immediately after the task queue is created.
void InitializationTask(MediaDecoder* aDecoder);
// Sets the audio-captured state and recreates the media sink if needed.
// A manager must be passed in if setting the audio-captured state to true.
void SetAudioCaptured(bool aCaptured,
OutputStreamManager* aManager = nullptr);
RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget);
RefPtr<ShutdownPromise> Shutdown();
@ -375,9 +394,6 @@ class MediaDecoderStateMachine
void SetPlaybackRate(double aPlaybackRate);
void PreservesPitchChanged();
void LoopingChanged();
void UpdateOutputCaptured();
void OutputTracksChanged();
void OutputPrincipalChanged();
MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
@ -422,9 +438,10 @@ class MediaDecoderStateMachine
MediaSink* CreateAudioSink();
// Always create mediasink which contains an AudioSink or DecodedStream
// inside.
already_AddRefed<MediaSink> CreateMediaSink();
// Always create mediasink which contains an AudioSink or StreamSink inside.
// A manager must be passed in if aAudioCaptured is true.
already_AddRefed<MediaSink> CreateMediaSink(
bool aAudioCaptured, OutputStreamManager* aManager = nullptr);
// Stops the media sink and shut it down.
// The decoder monitor must be held with exactly one lock count.
@ -609,6 +626,11 @@ class MediaDecoderStateMachine
bool mIsLiveStream = false;
// True if we shouldn't play our audio (but still write it to any capturing
// streams). When this is true, the audio thread will never start again after
// it has stopped.
bool mAudioCaptured;
// True if all audio frames are already rendered.
bool mAudioCompleted = false;
@ -650,6 +672,13 @@ class MediaDecoderStateMachine
// Track enabling video decode suspension via timer
DelayedScheduler mVideoDecodeSuspendTimer;
// Data about MediaStreams that are being fed by the decoder.
// Main thread only.
RefPtr<OutputStreamManager> mOutputStreamManager;
// Principal used by output streams. Main thread only.
nsCOMPtr<nsIPrincipal> mOutputStreamPrincipal;
// Track the current video decode mode.
VideoDecodeMode mVideoDecodeMode;
@ -704,23 +733,6 @@ class MediaDecoderStateMachine
// upon reaching the end.
Mirror<bool> mLooping;
// The device used with SetSink, or nullptr if no explicit device has been
// set.
Mirror<RefPtr<AudioDeviceInfo>> mSinkDevice;
// Whether all output should be captured into mOutputTracks. While true, the
// media sink will only play if there are output tracks.
Mirror<bool> mOutputCaptured;
// Tracks to capture data into.
Mirror<nsTArray<RefPtr<ProcessedMediaTrack>>> mOutputTracks;
// PrincipalHandle to feed with data captured into mOutputTracks.
Mirror<PrincipalHandle> mOutputPrincipal;
Canonical<nsTArray<RefPtr<ProcessedMediaTrack>>> mCanonicalOutputTracks;
Canonical<PrincipalHandle> mCanonicalOutputPrincipal;
// Duration of the media. This is guaranteed to be non-null after we finish
// decoding the first frame.
Canonical<media::NullableTimeUnit> mDuration;
@ -733,16 +745,12 @@ class MediaDecoderStateMachine
// Used to distinguish whether the audio is producing sound.
Canonical<bool> mIsAudioDataAudible;
// Used to count the number of pending requests to set a new sink.
Atomic<int> mSetSinkRequestsCount;
public:
AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const;
AbstractCanonical<nsTArray<RefPtr<ProcessedMediaTrack>>>*
CanonicalOutputTracks() {
return &mCanonicalOutputTracks;
}
AbstractCanonical<PrincipalHandle>* CanonicalOutputPrincipal() {
return &mCanonicalOutputPrincipal;
}
AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() {
return &mDuration;
}

View File

@ -60,11 +60,8 @@ class MediaResource : public DecoderDoctorLifeLogger<MediaResource> {
// Close the resource, stop any listeners, channels, etc.
// Cancels any currently blocking Read request and forces that request to
// return an error. This must be called (and resolve) before the MediaResource
// is deleted.
virtual RefPtr<GenericPromise> Close() {
return GenericPromise::CreateAndResolve(true, __func__);
}
// return an error.
virtual nsresult Close() { return NS_OK; }
// These methods are called off the main thread.
// Read up to aCount bytes from the stream. The read starts at

View File

@ -308,7 +308,7 @@ class MediaStreamTrackSource : public nsISupports {
}
// Principal identifying who may access the contents of this source.
RefPtr<nsIPrincipal> mPrincipal;
nsCOMPtr<nsIPrincipal> mPrincipal;
// Currently registered sinks.
nsTArray<WeakPtr<Sink>> mSinks;

View File

@ -6,7 +6,6 @@
#include "AudioSink.h"
#include "AudioConverter.h"
#include "AudioDeviceInfo.h"
#include "MediaQueue.h"
#include "VideoUtils.h"
#include "mozilla/CheckedInt.h"
@ -35,11 +34,9 @@ using media::TimeUnit;
AudioSink::AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
const TimeUnit& aStartTime, const AudioInfo& aInfo,
AudioDeviceInfo* aAudioDevice)
const TimeUnit& aStartTime, const AudioInfo& aInfo)
: mStartTime(aStartTime),
mInfo(aInfo),
mAudioDevice(aAudioDevice),
mPlaying(true),
mMonitor("AudioSink"),
mWritten(0),
@ -186,7 +183,7 @@ nsresult AudioSink::InitializeAudioStream(const PlaybackParams& aParams) {
// StaticPrefs::accessibility_monoaudio_enable() or
// StaticPrefs::media_forcestereo_enabled() is applied.
nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate,
mAudioDevice);
aParams.mSink);
if (NS_FAILED(rv)) {
mAudioStream->Shutdown();
mAudioStream = nullptr;

View File

@ -23,20 +23,11 @@ namespace mozilla {
class AudioConverter;
class AudioSink : private AudioStream::DataSource {
public:
struct PlaybackParams {
PlaybackParams(double aVolume, double aPlaybackRate, bool aPreservesPitch)
: mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
};
using PlaybackParams = MediaSink::PlaybackParams;
public:
AudioSink(AbstractThread* aThread, MediaQueue<AudioData>& aAudioQueue,
const media::TimeUnit& aStartTime, const AudioInfo& aInfo,
AudioDeviceInfo* aAudioDevice);
const media::TimeUnit& aStartTime, const AudioInfo& aInfo);
~AudioSink();
@ -68,8 +59,6 @@ class AudioSink : private AudioStream::DataSource {
void GetDebugInfo(dom::MediaSinkDebugInfo& aInfo);
const RefPtr<AudioDeviceInfo>& AudioDevice() { return mAudioDevice; }
private:
// Allocate and initialize mAudioStream. Returns NS_OK on success.
nsresult InitializeAudioStream(const PlaybackParams& aParams);
@ -98,10 +87,6 @@ class AudioSink : private AudioStream::DataSource {
const AudioInfo mInfo;
// The output device this AudioSink is playing data to. The system's default
// device is used if this is null.
const RefPtr<AudioDeviceInfo> mAudioDevice;
// Used on the task queue of MDSM only.
bool mPlaying;

View File

@ -21,6 +21,21 @@ void AudioSinkWrapper::Shutdown() {
mCreator = nullptr;
}
const MediaSink::PlaybackParams& AudioSinkWrapper::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void AudioSinkWrapper::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
if (mAudioSink) {
mAudioSink->SetVolume(aParams.mVolume);
mAudioSink->SetPlaybackRate(aParams.mPlaybackRate);
mAudioSink->SetPreservesPitch(aParams.mPreservesPitch);
}
mParams = aParams;
}
RefPtr<MediaSink::EndedPromise> AudioSinkWrapper::OnEnded(TrackType aType) {
AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
@ -139,11 +154,6 @@ void AudioSinkWrapper::SetPlaying(bool aPlaying) {
}
}
double AudioSinkWrapper::PlaybackRate() const {
AssertOwnerThread();
return mParams.mPlaybackRate;
}
nsresult AudioSinkWrapper::Start(const TimeUnit& aStartTime,
const MediaInfo& aInfo) {
AssertOwnerThread();

View File

@ -24,8 +24,6 @@ class MediaQueue;
* A wrapper around AudioSink to provide the interface of MediaSink.
*/
class AudioSinkWrapper : public MediaSink {
using PlaybackParams = AudioSink::PlaybackParams;
// An AudioSink factory.
class Creator {
public:
@ -48,18 +46,19 @@ class AudioSinkWrapper : public MediaSink {
template <typename Function>
AudioSinkWrapper(AbstractThread* aOwnerThread,
const MediaQueue<AudioData>& aAudioQueue,
const Function& aFunc, double aVolume, double aPlaybackRate,
bool aPreservesPitch)
const Function& aFunc)
: mOwnerThread(aOwnerThread),
mCreator(new CreatorImpl<Function>(aFunc)),
mIsStarted(false),
mParams(aVolume, aPlaybackRate, aPreservesPitch),
// Give an invalid value to facilitate debug if used before playback
// starts.
mPlayDuration(media::TimeUnit::Invalid()),
mAudioEnded(true),
mAudioQueue(aAudioQueue) {}
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override;
media::TimeUnit GetPosition(TimeStamp* aTimeStamp = nullptr) const override;
@ -70,8 +69,6 @@ class AudioSinkWrapper : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override;
void Stop() override;

View File

@ -7,10 +7,10 @@
#include "DecodedStream.h"
#include "AudioSegment.h"
#include "MediaData.h"
#include "MediaDecoderStateMachine.h"
#include "MediaQueue.h"
#include "MediaTrackGraph.h"
#include "MediaTrackListener.h"
#include "OutputStreamManager.h"
#include "SharedBuffer.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
@ -54,32 +54,34 @@ class DecodedStreamGraphListener {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
public:
DecodedStreamGraphListener(
SourceMediaTrack* aAudioTrack,
SourceMediaTrack* aAudioStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedHolder,
SourceMediaTrack* aVideoTrack,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
SourceMediaTrack* aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder,
AbstractThread* aMainThread)
: mAudioTrackListener(
aAudioTrack
? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioTrack)
aAudioStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aAudioStream)
: nullptr),
mAudioEndedHolder(std::move(aAudioEndedHolder)),
mVideoTrackListener(
aVideoTrack
? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoTrack)
aVideoStream
? MakeRefPtr<DecodedStreamTrackListener>(this, aVideoStream)
: nullptr),
mVideoEndedHolder(std::move(aVideoEndedHolder)),
mAudioTrack(aAudioTrack),
mVideoTrack(aVideoTrack) {
mAudioStream(aAudioStream),
mVideoStream(aVideoStream),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener) {
mAudioTrack->AddListener(mAudioTrackListener);
mAudioStream->AddListener(mAudioTrackListener);
} else {
mAudioEnded = true;
mAudioEndedHolder.ResolveIfExists(true, __func__);
}
if (mVideoTrackListener) {
mVideoTrack->AddListener(mVideoTrackListener);
mVideoStream->AddListener(mVideoTrackListener);
} else {
mVideoEnded = true;
mVideoEndedHolder.ResolveIfExists(true, __func__);
@ -87,30 +89,30 @@ class DecodedStreamGraphListener {
}
void NotifyOutput(SourceMediaTrack* aTrack, TrackTime aCurrentTrackTime) {
if (aTrack == mAudioTrack) {
if (aTrack == mAudioStream) {
if (aCurrentTrackTime >= mAudioEnd) {
mAudioTrack->End();
mAudioStream->End();
}
} else if (aTrack == mVideoTrack) {
} else if (aTrack == mVideoStream) {
if (aCurrentTrackTime >= mVideoEnd) {
mVideoTrack->End();
mVideoStream->End();
}
} else {
MOZ_CRASH("Unexpected source track");
}
if (aTrack != mAudioTrack && mAudioTrack && !mAudioEnded) {
if (aTrack != mAudioStream && mAudioStream && !mAudioEnded) {
// Only audio playout drives the clock forward, if present and live.
return;
}
MOZ_ASSERT_IF(aTrack == mAudioTrack, !mAudioEnded);
MOZ_ASSERT_IF(aTrack == mVideoTrack, !mVideoEnded);
MOZ_ASSERT_IF(aTrack == mAudioStream, !mAudioEnded);
MOZ_ASSERT_IF(aTrack == mVideoStream, !mVideoEnded);
mOnOutput.Notify(aTrack->TrackTimeToMicroseconds(aCurrentTrackTime));
}
void NotifyEnded(SourceMediaTrack* aTrack) {
if (aTrack == mAudioTrack) {
if (aTrack == mAudioStream) {
mAudioEnded = true;
} else if (aTrack == mVideoTrack) {
} else if (aTrack == mVideoStream) {
mVideoEnded = true;
} else {
MOZ_CRASH("Unexpected source track");
@ -143,9 +145,9 @@ class DecodedStreamGraphListener {
* Callable from any thread.
*/
void EndTrackAt(SourceMediaTrack* aTrack, TrackTime aEnd) {
if (aTrack == mAudioTrack) {
if (aTrack == mAudioStream) {
mAudioEnd = aEnd;
} else if (aTrack == mVideoTrack) {
} else if (aTrack == mVideoStream) {
mVideoEnd = aEnd;
} else {
MOZ_CRASH("Unexpected source track");
@ -154,9 +156,9 @@ class DecodedStreamGraphListener {
void DoNotifyTrackEnded(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
if (aTrack == mAudioTrack) {
if (aTrack == mAudioStream) {
mAudioEndedHolder.ResolveIfExists(true, __func__);
} else if (aTrack == mVideoTrack) {
} else if (aTrack == mVideoStream) {
mVideoEndedHolder.ResolveIfExists(true, __func__);
} else {
MOZ_CRASH("Unexpected source track");
@ -166,16 +168,16 @@ class DecodedStreamGraphListener {
void Forget() {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrackListener && !mAudioTrack->IsDestroyed()) {
mAudioTrack->End();
mAudioTrack->RemoveListener(mAudioTrackListener);
if (mAudioTrackListener && !mAudioStream->IsDestroyed()) {
mAudioStream->End();
mAudioStream->RemoveListener(mAudioTrackListener);
}
mAudioTrackListener = nullptr;
mAudioEndedHolder.ResolveIfExists(false, __func__);
if (mVideoTrackListener && !mVideoTrack->IsDestroyed()) {
mVideoTrack->End();
mVideoTrack->RemoveListener(mVideoTrackListener);
if (mVideoTrackListener && !mVideoStream->IsDestroyed()) {
mVideoStream->End();
mVideoStream->RemoveListener(mVideoTrackListener);
}
mVideoTrackListener = nullptr;
mVideoEndedHolder.ResolveIfExists(false, __func__);
@ -202,10 +204,11 @@ class DecodedStreamGraphListener {
bool mVideoEnded = false;
// Any thread.
const RefPtr<SourceMediaTrack> mAudioTrack;
const RefPtr<SourceMediaTrack> mVideoTrack;
const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoStream;
Atomic<TrackTime> mAudioEnd{TRACK_TIME_MAX};
Atomic<TrackTime> mVideoEnd{TRACK_TIME_MAX};
const RefPtr<AbstractThread> mAbstractMainThread;
};
DecodedStreamTrackListener::DecodedStreamTrackListener(
@ -223,7 +226,7 @@ void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
/**
* All MediaStream-related data is protected by the decoder's monitor. We have
* at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
* at most one DecodedStreamData per MediaDecoder. Its tracks are used as
* inputs for all output tracks created by OutputStreamManager after calls to
* captureStream/UntilEnded. Seeking creates new source tracks, as does
* replaying after the input as ended. In the latter case, the new sources are
@ -232,11 +235,12 @@ void DecodedStreamTrackListener::NotifyEnded(MediaTrackGraph* aGraph) {
class DecodedStreamData final {
public:
DecodedStreamData(
PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<SourceMediaTrack> aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise);
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread);
~DecodedStreamData();
MediaEventSource<int64_t>& OnOutput();
void Forget();
@ -254,9 +258,9 @@ class DecodedStreamData final {
// Count of audio frames written to the track
int64_t mAudioFramesWritten;
// Count of video frames written to the track in the track's rate
TrackTime mVideoTrackWritten;
TrackTime mVideoStreamWritten;
// Count of audio frames written to the track in the track's rate
TrackTime mAudioTrackWritten;
TrackTime mAudioStreamWritten;
// mNextAudioTime is the end timestamp for the last packet sent to the track.
// Therefore audio packets starting at or after this time need to be copied
// to the output track.
@ -279,66 +283,42 @@ class DecodedStreamData final {
bool mHaveSentFinishAudio;
bool mHaveSentFinishVideo;
const RefPtr<SourceMediaTrack> mAudioTrack;
const RefPtr<SourceMediaTrack> mVideoTrack;
const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
const RefPtr<MediaInputPort> mAudioPort;
const RefPtr<MediaInputPort> mVideoPort;
const RefPtr<SourceMediaTrack> mAudioStream;
const RefPtr<SourceMediaTrack> mVideoStream;
const RefPtr<DecodedStreamGraphListener> mListener;
const RefPtr<OutputStreamManager> mOutputStreamManager;
const RefPtr<AbstractThread> mAbstractMainThread;
};
DecodedStreamData::DecodedStreamData(
PlaybackInfoInit&& aInit, MediaTrackGraph* aGraph,
RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
RefPtr<ProcessedMediaTrack> aVideoOutputTrack,
OutputStreamManager* aOutputStreamManager, PlaybackInfoInit&& aInit,
RefPtr<SourceMediaTrack> aAudioStream,
RefPtr<SourceMediaTrack> aVideoStream,
MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise)
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
AbstractThread* aMainThread)
: mAudioFramesWritten(0),
mVideoTrackWritten(0),
mAudioTrackWritten(0),
mVideoStreamWritten(0),
mAudioStreamWritten(0),
mNextAudioTime(aInit.mStartTime),
mHaveSentFinishAudio(false),
mHaveSentFinishVideo(false),
mAudioTrack(aInit.mInfo.HasAudio()
? aGraph->CreateSourceTrack(MediaSegment::AUDIO)
: nullptr),
mVideoTrack(aInit.mInfo.HasVideo()
? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
: nullptr),
mAudioOutputTrack(std::move(aAudioOutputTrack)),
mVideoOutputTrack(std::move(aVideoOutputTrack)),
mAudioPort((mAudioOutputTrack && mAudioTrack)
? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
: nullptr),
mVideoPort((mVideoOutputTrack && mVideoTrack)
? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
: nullptr),
mAudioStream(std::move(aAudioStream)),
mVideoStream(std::move(aVideoStream)),
// DecodedStreamGraphListener will resolve these promises.
mListener(MakeRefPtr<DecodedStreamGraphListener>(
mAudioTrack, std::move(aAudioEndedPromise), mVideoTrack,
std::move(aVideoEndedPromise))) {
mAudioStream, std::move(aAudioEndedPromise), mVideoStream,
std::move(aVideoEndedPromise), aMainThread)),
mOutputStreamManager(aOutputStreamManager),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrack) {
mAudioTrack->SetAppendDataSourceRate(aInit.mInfo.mAudio.mRate);
}
MOZ_DIAGNOSTIC_ASSERT(
mOutputStreamManager->HasTracks(mAudioStream, mVideoStream),
"Tracks must be pre-created on main thread");
}
DecodedStreamData::~DecodedStreamData() {
MOZ_ASSERT(NS_IsMainThread());
if (mAudioTrack) {
mAudioTrack->Destroy();
}
if (mVideoTrack) {
mVideoTrack->Destroy();
}
if (mAudioPort) {
mAudioPort->Destroy();
}
if (mVideoPort) {
mVideoPort->Destroy();
}
}
DecodedStreamData::~DecodedStreamData() { MOZ_ASSERT(NS_IsMainThread()); }
MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
return mListener->OnOutput();
@ -349,7 +329,7 @@ void DecodedStreamData::Forget() { mListener->Forget(); }
void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
aInfo.mInstance = NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
aInfo.mAudioFramesWritten = mAudioFramesWritten;
aInfo.mStreamAudioWritten = mAudioTrackWritten;
aInfo.mStreamAudioWritten = mAudioStreamWritten;
aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
aInfo.mLastVideoStartTime =
mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
@ -361,29 +341,40 @@ void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
}
DecodedStream::DecodedStream(
MediaDecoderStateMachine* aStateMachine,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
double aPlaybackRate, bool aPreservesPitch,
MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue)
: mOwnerThread(aStateMachine->OwnerThread()),
DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
AbstractThread* aMainThread,
MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue,
OutputStreamManager* aOutputStreamManager)
: mOwnerThread(aOwnerThread),
mAbstractMainThread(aMainThread),
mOutputStreamManager(aOutputStreamManager),
mWatchManager(this, mOwnerThread),
mPlaying(false, "DecodedStream::mPlaying"),
mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
mPrincipalHandle(aOwnerThread, PRINCIPAL_HANDLE_NONE,
"DecodedStream::mPrincipalHandle (Mirror)"),
mOutputTracks(std::move(aOutputTracks)),
mVolume(aVolume),
mPlaybackRate(aPlaybackRate),
mPreservesPitch(aPreservesPitch),
mAudioQueue(aAudioQueue),
mVideoQueue(aVideoQueue) {
mPrincipalHandle.Connect(aStateMachine->CanonicalOutputPrincipal());
mPrincipalHandle.Connect(mOutputStreamManager->CanonicalPrincipalHandle());
mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
PlayingChanged(); // Notify of the initial state
}
DecodedStream::~DecodedStream() {
MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
NS_ProxyRelease("DecodedStream::mOutputStreamManager", mAbstractMainThread,
do_AddRef(mOutputStreamManager));
}
const MediaSink::PlaybackParams& DecodedStream::GetPlaybackParams() const {
AssertOwnerThread();
return mParams;
}
void DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mParams = aParams;
}
RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
@ -402,7 +393,6 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
const MediaInfo& aInfo) {
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
MOZ_DIAGNOSTIC_ASSERT(!mOutputTracks.IsEmpty());
mStartTime.emplace(aStartTime);
mLastOutputTime = TimeUnit::Zero();
@ -414,55 +404,58 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
typedef MozPromiseHolder<MediaSink::EndedPromise> Promise;
public:
R(PlaybackInfoInit&& aInit,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
Promise&& aAudioEndedPromise, Promise&& aVideoEndedPromise)
R(PlaybackInfoInit&& aInit, Promise&& aAudioEndedPromise,
Promise&& aVideoEndedPromise, OutputStreamManager* aManager,
AbstractThread* aMainThread)
: Runnable("CreateDecodedStreamData"),
mInit(std::move(aInit)),
mOutputTracks(std::move(aOutputTracks)),
mAudioEndedPromise(std::move(aAudioEndedPromise)),
mVideoEndedPromise(std::move(aVideoEndedPromise)) {}
mVideoEndedPromise(std::move(aVideoEndedPromise)),
mOutputStreamManager(aManager),
mAbstractMainThread(aMainThread) {}
NS_IMETHOD Run() override {
MOZ_ASSERT(NS_IsMainThread());
RefPtr<ProcessedMediaTrack> audioOutputTrack;
RefPtr<ProcessedMediaTrack> videoOutputTrack;
for (const auto& track : mOutputTracks) {
if (track->mType == MediaSegment::AUDIO) {
MOZ_DIAGNOSTIC_ASSERT(
!audioOutputTrack,
"We only support capturing to one output track per kind");
audioOutputTrack = track;
} else if (track->mType == MediaSegment::VIDEO) {
MOZ_DIAGNOSTIC_ASSERT(
!videoOutputTrack,
"We only support capturing to one output track per kind");
videoOutputTrack = track;
} else {
MOZ_CRASH("Unknown media type");
}
}
if ((!audioOutputTrack && !videoOutputTrack) ||
(audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
(videoOutputTrack && videoOutputTrack->IsDestroyed())) {
// No output tracks yet, or they're going away. Halt playback by not
// creating DecodedStreamData. MDSM will try again with a new
// DecodedStream sink when tracks are available.
// No need to create a source track when there are no output tracks.
// This happens when RemoveOutput() is called immediately after
// StartPlayback().
if (mOutputStreamManager->IsEmpty()) {
// Resolve the promise to indicate the end of playback.
mAudioEndedPromise.Resolve(true, __func__);
mVideoEndedPromise.Resolve(true, __func__);
return NS_OK;
}
RefPtr<SourceMediaTrack> audioStream =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::AUDIO);
if (mInit.mInfo.HasAudio() && !audioStream) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::AUDIO));
audioStream = mOutputStreamManager->AddTrack(MediaSegment::AUDIO);
}
if (audioStream) {
audioStream->SetAppendDataSourceRate(mInit.mInfo.mAudio.mRate);
}
RefPtr<SourceMediaTrack> videoStream =
mOutputStreamManager->GetPrecreatedTrackOfType(MediaSegment::VIDEO);
if (mInit.mInfo.HasVideo() && !videoStream) {
MOZ_DIAGNOSTIC_ASSERT(
!mOutputStreamManager->HasTrackType(MediaSegment::VIDEO));
videoStream = mOutputStreamManager->AddTrack(MediaSegment::VIDEO);
}
mData = MakeUnique<DecodedStreamData>(
std::move(mInit), mOutputTracks[0]->Graph(),
std::move(audioOutputTrack), std::move(videoOutputTrack),
std::move(mAudioEndedPromise), std::move(mVideoEndedPromise));
mOutputStreamManager, std::move(mInit), std::move(audioStream),
std::move(videoStream), std::move(mAudioEndedPromise),
std::move(mVideoEndedPromise), mAbstractMainThread);
return NS_OK;
}
UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
private:
PlaybackInfoInit mInit;
const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
Promise mAudioEndedPromise;
Promise mVideoEndedPromise;
RefPtr<OutputStreamManager> mOutputStreamManager;
UniquePtr<DecodedStreamData> mData;
const RefPtr<AbstractThread> mAbstractMainThread;
};
MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
@ -470,9 +463,9 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime,
MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
mVideoEndedPromise = videoEndedHolder.Ensure(__func__);
PlaybackInfoInit init{aStartTime, aInfo};
nsCOMPtr<nsIRunnable> r = new R(
std::move(init), nsTArray<RefPtr<ProcessedMediaTrack>>(mOutputTracks),
std::move(audioEndedHolder), std::move(videoEndedHolder));
nsCOMPtr<nsIRunnable> r = new R(std::move(init), std::move(audioEndedHolder),
std::move(videoEndedHolder),
mOutputStreamManager, mAbstractMainThread);
SyncRunnable::DispatchToThread(
SystemGroup::EventTargetFor(TaskCategory::Other), r);
mData = static_cast<R*>(r.get())->ReleaseData();
@ -525,9 +518,12 @@ void DecodedStream::DestroyData(UniquePtr<DecodedStreamData>&& aData) {
mOutputListener.Disconnect();
NS_DispatchToMainThread(
NS_NewRunnableFunction("DecodedStream::DestroyData",
[data = std::move(aData)]() { data->Forget(); }));
NS_DispatchToMainThread(NS_NewRunnableFunction(
"DecodedStream::DestroyData",
[data = std::move(aData), manager = mOutputStreamManager]() {
data->Forget();
manager->RemoveTracks();
}));
}
void DecodedStream::SetPlaying(bool aPlaying) {
@ -543,22 +539,17 @@ void DecodedStream::SetPlaying(bool aPlaying) {
void DecodedStream::SetVolume(double aVolume) {
AssertOwnerThread();
mVolume = aVolume;
mParams.mVolume = aVolume;
}
void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
AssertOwnerThread();
mPlaybackRate = aPlaybackRate;
mParams.mPlaybackRate = aPlaybackRate;
}
void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
AssertOwnerThread();
mPreservesPitch = aPreservesPitch;
}
double DecodedStream::PlaybackRate() const {
AssertOwnerThread();
return mPlaybackRate;
mParams.mPreservesPitch = aPreservesPitch;
}
static void SendStreamAudio(DecodedStreamData* aStream,
@ -637,11 +628,12 @@ void DecodedStream::SendAudio(double aVolume,
// |mNextAudioTime| is updated as we process each audio sample in
// SendStreamAudio().
if (output.GetDuration() > 0) {
mData->mAudioTrackWritten += mData->mAudioTrack->AppendData(&output);
mData->mAudioStreamWritten += mData->mAudioStream->AppendData(&output);
}
if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
mData->mListener->EndTrackAt(mData->mAudioTrack, mData->mAudioTrackWritten);
mData->mListener->EndTrackAt(mData->mAudioStream,
mData->mAudioStreamWritten);
mData->mHaveSentFinishAudio = true;
}
}
@ -652,9 +644,9 @@ void DecodedStreamData::WriteVideoToSegment(
VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle) {
RefPtr<layers::Image> image = aImage;
auto end =
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
mVideoStream->MicrosecondsToTrackTimeRoundDown(aEnd.ToMicroseconds());
auto start =
mVideoTrack->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
mVideoStream->MicrosecondsToTrackTimeRoundDown(aStart.ToMicroseconds());
aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
aTimeStamp);
// Extend this so we get accurate durations for all frames.
@ -700,7 +692,7 @@ void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
// for video tracks as part of bug 1493618.
resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
aPrincipalHandle, false, currentTime);
mData->mVideoTrack->AppendData(&resetter);
mData->mVideoStream->AppendData(&resetter);
// Consumer buffers have been reset. We now set the next time to the start
// time of the current frame, so that it can be displayed again on resuming.
@ -780,7 +772,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
TimeUnit end = std::max(
v->GetEndTime(),
lastEnd + TimeUnit::FromMicroseconds(
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
mData->mVideoStream->TrackTimeToMicroseconds(1) + 1));
mData->mLastVideoImage = v->mImage;
mData->mLastVideoImageDisplaySize = v->mDisplay;
mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
@ -796,7 +788,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
}
if (output.GetDuration() > 0) {
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
mData->mVideoStreamWritten += mData->mVideoStream->AppendData(&output);
}
if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
@ -818,7 +810,7 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
// We round the nr of microseconds up, because WriteVideoToSegment
// will round the conversion from microseconds to TrackTime down.
auto deviation = TimeUnit::FromMicroseconds(
mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1);
mData->mVideoStream->TrackTimeToMicroseconds(1) + 1);
auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
mData->WriteVideoToSegment(
mData->mLastVideoImage, start, start + deviation,
@ -829,9 +821,11 @@ void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
if (forceBlack) {
endSegment.ReplaceWithDisabled();
}
mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
mData->mVideoStreamWritten +=
mData->mVideoStream->AppendData(&endSegment);
}
mData->mListener->EndTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten);
mData->mListener->EndTrackAt(mData->mVideoStream,
mData->mVideoStreamWritten);
mData->mHaveSentFinishVideo = true;
}
}
@ -848,7 +842,7 @@ void DecodedStream::SendData() {
return;
}
SendAudio(mVolume, mPrincipalHandle);
SendAudio(mParams.mVolume, mPrincipalHandle);
SendVideo(mPrincipalHandle);
}
@ -902,6 +896,10 @@ void DecodedStream::PlayingChanged() {
// On seek or pause we discard future frames.
ResetVideo(mPrincipalHandle);
}
mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
"OutputStreamManager::SetPlaying", mOutputStreamManager,
&OutputStreamManager::SetPlaying, mPlaying));
}
void DecodedStream::ConnectListener() {

View File

@ -22,9 +22,9 @@
namespace mozilla {
class DecodedStreamData;
class MediaDecoderStateMachine;
class AudioData;
class VideoData;
class OutputStreamManager;
struct PlaybackInfoInit;
class ProcessedMediaTrack;
class TimeStamp;
@ -33,12 +33,17 @@ template <class T>
class MediaQueue;
class DecodedStream : public MediaSink {
using MediaSink::PlaybackParams;
public:
DecodedStream(MediaDecoderStateMachine* aStateMachine,
nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
double aVolume, double aPlaybackRate, bool aPreservesPitch,
DecodedStream(AbstractThread* aOwnerThread, AbstractThread* aMainThread,
MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue);
MediaQueue<VideoData>& aVideoQueue,
OutputStreamManager* aOutputStreamManager);
// MediaSink functions.
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
media::TimeUnit GetEndTime(TrackType aType) const override;
@ -53,8 +58,6 @@ class DecodedStream : public MediaSink {
void SetPreservesPitch(bool aPreservesPitch) override;
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
nsresult Start(const media::TimeUnit& aStartTime,
const MediaInfo& aInfo) override;
void Stop() override;
@ -85,6 +88,14 @@ class DecodedStream : public MediaSink {
const RefPtr<AbstractThread> mOwnerThread;
const RefPtr<AbstractThread> mAbstractMainThread;
/*
* Main thread only members.
*/
// Data about MediaStreams that are being fed by the decoder.
const RefPtr<OutputStreamManager> mOutputStreamManager;
/*
* Worker thread only members.
*/
@ -95,11 +106,8 @@ class DecodedStream : public MediaSink {
Watchable<bool> mPlaying;
Mirror<PrincipalHandle> mPrincipalHandle;
const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
PlaybackParams mParams;
media::NullableTimeUnit mStartTime;
media::TimeUnit mLastOutputTime;

View File

@ -7,6 +7,7 @@
#ifndef MediaSink_h_
#define MediaSink_h_
#include "AudioDeviceInfo.h"
#include "MediaInfo.h"
#include "mozilla/MozPromise.h"
#include "mozilla/RefPtr.h"
@ -38,6 +39,23 @@ class MediaSink {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaSink);
typedef mozilla::TrackInfo::TrackType TrackType;
struct PlaybackParams {
PlaybackParams()
: mVolume(1.0), mPlaybackRate(1.0), mPreservesPitch(true) {}
double mVolume;
double mPlaybackRate;
bool mPreservesPitch;
RefPtr<AudioDeviceInfo> mSink;
};
// Return the playback parameters of this sink.
// Can be called in any state.
virtual const PlaybackParams& GetPlaybackParams() const = 0;
// Set the playback parameters of this sink.
// Can be called in any state.
virtual void SetPlaybackParams(const PlaybackParams& aParams) = 0;
// EndedPromise needs to be a non-exclusive promise as it is shared between
// both the AudioSink and VideoSink.
typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> EndedPromise;
@ -82,10 +100,6 @@ class MediaSink {
// Pause/resume the playback. Only work after playback starts.
virtual void SetPlaying(bool aPlaying) = 0;
// Get the playback rate.
// Can be called in any state.
virtual double PlaybackRate() const = 0;
// Single frame rendering operation may need to be done before playback
// started (1st frame) or right after seek completed or playback stopped.
// Do nothing if this sink has no video track. Can be called in any state.
@ -108,10 +122,6 @@ class MediaSink {
// Can be called in any state.
virtual bool IsPlaying() const = 0;
// The audio output device this MediaSink is playing audio data to. The
// default device is used if this returns null.
virtual const AudioDeviceInfo* AudioDevice() { return nullptr; }
// Called on the state machine thread to shut down the sink. All resources
// allocated by this sink should be released.
// Must be called after playback stopped.

View File

@ -0,0 +1,357 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "OutputStreamManager.h"
#include "DOMMediaStream.h"
#include "../MediaTrackGraph.h"
#include "mozilla/dom/MediaStreamTrack.h"
#include "mozilla/dom/AudioStreamTrack.h"
#include "mozilla/dom/VideoStreamTrack.h"
#include "nsContentUtils.h"
namespace mozilla {
#define LOG(level, msg, ...) \
MOZ_LOG(gMediaDecoderLog, level, (msg, ##__VA_ARGS__))
class DecodedStreamTrackSource : public dom::MediaStreamTrackSource {
public:
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DecodedStreamTrackSource,
dom::MediaStreamTrackSource)
explicit DecodedStreamTrackSource(SourceMediaTrack* aSourceStream,
nsIPrincipal* aPrincipal)
: dom::MediaStreamTrackSource(aPrincipal, nsString()),
mTrack(aSourceStream->Graph()->CreateForwardedInputTrack(
aSourceStream->mType)),
mPort(mTrack->AllocateInputPort(aSourceStream)) {
MOZ_ASSERT(NS_IsMainThread());
}
dom::MediaSourceEnum GetMediaSource() const override {
return dom::MediaSourceEnum::Other;
}
void Stop() override {
MOZ_ASSERT(NS_IsMainThread());
// We don't notify the source that a track was stopped since it will keep
// producing tracks until the element ends. The decoder also needs the
// tracks it created to be live at the source since the decoder's clock is
// based on MediaStreams during capture. We do however, disconnect this
// track's underlying track.
if (!mTrack->IsDestroyed()) {
mTrack->Destroy();
mPort->Destroy();
}
}
void Disable() override {}
void Enable() override {}
void SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
mPrincipal = aPrincipal;
PrincipalChanged();
}
void ForceEnded() { OverrideEnded(); }
const RefPtr<ProcessedMediaTrack> mTrack;
const RefPtr<MediaInputPort> mPort;
protected:
virtual ~DecodedStreamTrackSource() {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mTrack->IsDestroyed());
}
};
NS_IMPL_ADDREF_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
NS_IMPL_RELEASE_INHERITED(DecodedStreamTrackSource, dom::MediaStreamTrackSource)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DecodedStreamTrackSource)
NS_INTERFACE_MAP_END_INHERITING(dom::MediaStreamTrackSource)
NS_IMPL_CYCLE_COLLECTION_INHERITED(DecodedStreamTrackSource,
dom::MediaStreamTrackSource)
OutputStreamData::OutputStreamData(OutputStreamManager* aManager,
AbstractThread* aAbstractMainThread,
DOMMediaStream* aDOMStream)
: mManager(aManager),
mAbstractMainThread(aAbstractMainThread),
mDOMStream(aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
}
OutputStreamData::~OutputStreamData() = default;
void OutputStreamData::AddTrack(SourceMediaTrack* aTrack,
MediaSegment::Type aType,
nsIPrincipal* aPrincipal, bool aAsyncAddTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
LOG(LogLevel::Debug,
"Adding output %s track sourced from track %p to MediaStream %p%s",
aType == MediaSegment::AUDIO ? "audio" : "video", aTrack,
mDOMStream.get(), aAsyncAddTrack ? " (async)" : "");
auto source = MakeRefPtr<DecodedStreamTrackSource>(aTrack, aPrincipal);
RefPtr<dom::MediaStreamTrack> track;
if (aType == MediaSegment::AUDIO) {
track = new dom::AudioStreamTrack(mDOMStream->GetParentObject(),
source->mTrack, source);
} else {
MOZ_ASSERT(aType == MediaSegment::VIDEO);
track = new dom::VideoStreamTrack(mDOMStream->GetParentObject(),
source->mTrack, source);
}
mTracks.AppendElement(track.get());
if (aAsyncAddTrack) {
GetMainThreadEventTarget()->Dispatch(
NewRunnableMethod<RefPtr<dom::MediaStreamTrack>>(
"DOMMediaStream::AddTrackInternal", mDOMStream.get(),
&DOMMediaStream::AddTrackInternal, track));
} else {
mDOMStream->AddTrackInternal(track);
}
}
void OutputStreamData::RemoveTrack(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
LOG(LogLevel::Debug,
"Removing output track sourced by track %p from MediaStream %p", aTrack,
mDOMStream.get());
for (const auto& t : nsTArray<WeakPtr<dom::MediaStreamTrack>>(mTracks)) {
mTracks.RemoveElement(t);
if (!t || t->Ended()) {
continue;
}
DecodedStreamTrackSource& source =
static_cast<DecodedStreamTrackSource&>(t->GetSource());
GetMainThreadEventTarget()->Dispatch(
NewRunnableMethod("DecodedStreamTrackSource::ForceEnded", &source,
&DecodedStreamTrackSource::ForceEnded));
}
}
void OutputStreamData::SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_DIAGNOSTIC_ASSERT(mDOMStream);
for (const WeakPtr<dom::MediaStreamTrack>& track : mTracks) {
if (!track || track->Ended()) {
continue;
}
DecodedStreamTrackSource& source =
static_cast<DecodedStreamTrackSource&>(track->GetSource());
source.SetPrincipal(aPrincipal);
}
}
OutputStreamManager::OutputStreamManager(SharedDummyTrack* aDummyStream,
nsIPrincipal* aPrincipal,
AbstractThread* aAbstractMainThread)
: mAbstractMainThread(aAbstractMainThread),
mDummyStream(aDummyStream),
mPrincipalHandle(
aAbstractMainThread,
aPrincipal ? MakePrincipalHandle(aPrincipal) : PRINCIPAL_HANDLE_NONE,
"OutputStreamManager::mPrincipalHandle (Canonical)") {
MOZ_ASSERT(NS_IsMainThread());
}
void OutputStreamManager::Add(DOMMediaStream* aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Adding MediaStream %p", aDOMStream);
OutputStreamData* p = mStreams
.AppendElement(new OutputStreamData(
this, mAbstractMainThread, aDOMStream))
->get();
for (const auto& lt : mLiveTracks) {
p->AddTrack(lt->mSourceTrack, lt->mType, mPrincipalHandle.Ref(), false);
}
}
void OutputStreamManager::Remove(DOMMediaStream* aDOMStream) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Removing MediaStream %p", aDOMStream);
AutoRemoveDestroyedStreams();
mStreams.ApplyIf(
aDOMStream, 0, StreamComparator(),
[&](const UniquePtr<OutputStreamData>& aData) {
for (const auto& lt : mLiveTracks) {
aData->RemoveTrack(lt->mSourceTrack);
}
},
[]() { MOZ_ASSERT_UNREACHABLE("Didn't exist"); });
DebugOnly<bool> rv = mStreams.RemoveElement(aDOMStream, StreamComparator());
MOZ_ASSERT(rv);
}
bool OutputStreamManager::HasTrackType(MediaSegment::Type aType) {
MOZ_ASSERT(NS_IsMainThread());
return mLiveTracks.Contains(aType, TrackTypeComparator());
}
bool OutputStreamManager::HasTracks(SourceMediaTrack* aAudioStream,
SourceMediaTrack* aVideoStream) {
MOZ_ASSERT(NS_IsMainThread());
size_t nrExpectedTracks = 0;
bool asExpected = true;
if (aAudioStream) {
Unused << ++nrExpectedTracks;
asExpected = asExpected && mLiveTracks.Contains(
MakePair(aAudioStream, MediaSegment::AUDIO),
TrackComparator());
}
if (aVideoStream) {
Unused << ++nrExpectedTracks;
asExpected = asExpected && mLiveTracks.Contains(
MakePair(aVideoStream, MediaSegment::VIDEO),
TrackComparator());
}
asExpected = asExpected && mLiveTracks.Length() == nrExpectedTracks;
return asExpected;
}
SourceMediaTrack* OutputStreamManager::GetPrecreatedTrackOfType(
MediaSegment::Type aType) const {
auto i = mLiveTracks.IndexOf(aType, 0, PrecreatedTrackTypeComparator());
return i == nsTArray<UniquePtr<LiveTrack>>::NoIndex
? nullptr
: mLiveTracks[i]->mSourceTrack.get();
}
size_t OutputStreamManager::NumberOfTracks() {
MOZ_ASSERT(NS_IsMainThread());
return mLiveTracks.Length();
}
already_AddRefed<SourceMediaTrack> OutputStreamManager::AddTrack(
MediaSegment::Type aType) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(!HasTrackType(aType),
"Cannot have two tracks of the same type at the same time");
RefPtr<SourceMediaTrack> track =
mDummyStream->mTrack->Graph()->CreateSourceTrack(aType);
if (!mPlaying) {
track->Suspend();
}
LOG(LogLevel::Info, "Adding %s track sourced by track %p",
aType == MediaSegment::AUDIO ? "audio" : "video", track.get());
mLiveTracks.AppendElement(MakeUnique<LiveTrack>(track, aType));
AutoRemoveDestroyedStreams();
for (const auto& data : mStreams) {
data->AddTrack(track, aType, mPrincipalHandle.Ref(), true);
}
return track.forget();
}
OutputStreamManager::LiveTrack::LiveTrack(SourceMediaTrack* aSourceTrack,
MediaSegment::Type aType)
: mSourceTrack(aSourceTrack), mType(aType) {}
OutputStreamManager::LiveTrack::~LiveTrack() { mSourceTrack->Destroy(); }
void OutputStreamManager::AutoRemoveDestroyedStreams() {
MOZ_ASSERT(NS_IsMainThread());
for (size_t i = mStreams.Length(); i > 0; --i) {
const auto& data = mStreams[i - 1];
if (!data->mDOMStream) {
// If the mDOMStream WeakPtr is now null, mDOMStream has been destructed.
mStreams.RemoveElementAt(i - 1);
}
}
}
void OutputStreamManager::RemoveTrack(SourceMediaTrack* aTrack) {
MOZ_ASSERT(NS_IsMainThread());
LOG(LogLevel::Info, "Removing track with source track %p", aTrack);
DebugOnly<bool> rv =
mLiveTracks.RemoveElement(aTrack, TrackStreamComparator());
MOZ_ASSERT(rv);
AutoRemoveDestroyedStreams();
for (const auto& data : mStreams) {
data->RemoveTrack(aTrack);
}
}
void OutputStreamManager::RemoveTracks() {
MOZ_ASSERT(NS_IsMainThread());
for (size_t i = mLiveTracks.Length(); i > 0; --i) {
RemoveTrack(mLiveTracks[i - 1]->mSourceTrack);
}
}
void OutputStreamManager::Disconnect() {
MOZ_ASSERT(NS_IsMainThread());
RemoveTracks();
MOZ_ASSERT(mLiveTracks.IsEmpty());
AutoRemoveDestroyedStreams();
nsTArray<RefPtr<DOMMediaStream>> domStreams(mStreams.Length());
for (const auto& data : mStreams) {
domStreams.AppendElement(data->mDOMStream);
}
for (auto& domStream : domStreams) {
Remove(domStream);
}
MOZ_ASSERT(mStreams.IsEmpty());
}
AbstractCanonical<PrincipalHandle>*
OutputStreamManager::CanonicalPrincipalHandle() {
return &mPrincipalHandle;
}
void OutputStreamManager::SetPrincipal(nsIPrincipal* aPrincipal) {
MOZ_ASSERT(NS_IsMainThread());
nsCOMPtr<nsIPrincipal> principal = GetPrincipalFromHandle(mPrincipalHandle);
if (nsContentUtils::CombineResourcePrincipals(&principal, aPrincipal)) {
AutoRemoveDestroyedStreams();
for (const UniquePtr<OutputStreamData>& data : mStreams) {
data->SetPrincipal(principal);
}
mPrincipalHandle = MakePrincipalHandle(principal);
}
}
void OutputStreamManager::SetPlaying(bool aPlaying) {
MOZ_ASSERT(NS_IsMainThread());
if (mPlaying == aPlaying) {
return;
}
mPlaying = aPlaying;
for (auto& lt : mLiveTracks) {
if (mPlaying) {
lt->mSourceTrack->Resume();
lt->mEverPlayed = true;
} else {
lt->mSourceTrack->Suspend();
}
}
}
OutputStreamManager::~OutputStreamManager() = default;
#undef LOG
} // namespace mozilla

View File

@ -0,0 +1,161 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef OutputStreamManager_h
#define OutputStreamManager_h
#include "mozilla/RefPtr.h"
#include "mozilla/StateMirroring.h"
#include "mozilla/WeakPtr.h"
#include "nsTArray.h"
namespace mozilla {
class DOMMediaStream;
class MediaInputPort;
class OutputStreamManager;
class ProcessedMediaTrack;
class SourceMediaTrack;
namespace dom {
class MediaStreamTrack;
}
class OutputStreamData {
public:
OutputStreamData(OutputStreamManager* aManager,
AbstractThread* aAbstractMainThread,
DOMMediaStream* aDOMStream);
OutputStreamData(const OutputStreamData& aOther) = delete;
OutputStreamData(OutputStreamData&& aOther) = delete;
~OutputStreamData();
// Creates and adds a MediaStreamTrack to mDOMStream so that we can feed data
// to it. For a true aAsyncAddTrack we will dispatch a task to add the
// created track to mDOMStream, as is required by spec for the "addtrack"
// event.
void AddTrack(SourceMediaTrack* aTrack, MediaSegment::Type aType,
nsIPrincipal* aPrincipal, bool aAsyncAddTrack);
// Ends any MediaStreamTracks sourced from aTrack.
void RemoveTrack(SourceMediaTrack* aTrack);
void SetPrincipal(nsIPrincipal* aPrincipal);
const RefPtr<OutputStreamManager> mManager;
const RefPtr<AbstractThread> mAbstractMainThread;
// The DOMMediaStream we add tracks to and represent.
const WeakPtr<DOMMediaStream> mDOMStream;
private:
// Tracks that have been added and not yet removed.
nsTArray<WeakPtr<dom::MediaStreamTrack>> mTracks;
};
class OutputStreamManager {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OutputStreamManager);
public:
OutputStreamManager(SharedDummyTrack* aDummyStream, nsIPrincipal* aPrincipal,
AbstractThread* aAbstractMainThread);
// Add the output stream to the collection.
void Add(DOMMediaStream* aDOMStream);
// Remove the output stream from the collection.
void Remove(DOMMediaStream* aDOMStream);
// Returns true if there's a live track of the given type.
bool HasTrackType(MediaSegment::Type aType);
// Returns true if the given tracks are sourcing all currently live tracks.
// Use nullptr to make it ignored for that type.
bool HasTracks(SourceMediaTrack* aAudioStream,
SourceMediaTrack* aVideoStream);
// Gets the underlying track for the given type if it has never been played,
// or nullptr if there is none.
SourceMediaTrack* GetPrecreatedTrackOfType(MediaSegment::Type aType) const;
// Returns the number of live tracks.
size_t NumberOfTracks();
// Add a track sourced to all output tracks and return the MediaTrack that
// sources it.
already_AddRefed<SourceMediaTrack> AddTrack(MediaSegment::Type aType);
// Remove all currently live tracks.
void RemoveTracks();
// Remove all currently live tracks and all output streams.
void Disconnect();
// The principal handle for the underlying decoder.
AbstractCanonical<PrincipalHandle>* CanonicalPrincipalHandle();
// Called when the underlying decoder's principal has changed.
void SetPrincipal(nsIPrincipal* aPrincipal);
// Called by DecodedStream when its playing state changes. While not playing
// we suspend mSourceTrack.
void SetPlaying(bool aPlaying);
// Return true if the collection of output streams is empty.
bool IsEmpty() const {
MOZ_ASSERT(NS_IsMainThread());
return mStreams.IsEmpty();
}
const RefPtr<AbstractThread> mAbstractMainThread;
private:
~OutputStreamManager();
class LiveTrack {
public:
LiveTrack(SourceMediaTrack* aSourceTrack, MediaSegment::Type aType);
~LiveTrack();
const RefPtr<SourceMediaTrack> mSourceTrack;
const MediaSegment::Type mType;
bool mEverPlayed = false;
};
struct StreamComparator {
static bool Equals(const UniquePtr<OutputStreamData>& aData,
DOMMediaStream* aStream) {
return aData->mDOMStream == aStream;
}
};
struct TrackStreamComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
SourceMediaTrack* aTrack) {
return aLiveTrack->mSourceTrack == aTrack;
}
};
struct TrackTypeComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
MediaSegment::Type aType) {
return aLiveTrack->mType == aType;
}
};
struct PrecreatedTrackTypeComparator {
static bool Equals(const UniquePtr<LiveTrack>& aLiveTrack,
MediaSegment::Type aType) {
return !aLiveTrack->mEverPlayed && aLiveTrack->mType == aType;
}
};
struct TrackComparator {
static bool Equals(
const UniquePtr<LiveTrack>& aLiveTrack,
const Pair<SourceMediaTrack*, MediaSegment::Type>& aOther) {
return aLiveTrack->mSourceTrack == aOther.first() &&
aLiveTrack->mType == aOther.second();
}
};
// Goes through mStreams and removes any entries that have been destroyed.
void AutoRemoveDestroyedStreams();
// Remove tracks sourced from aTrack from all output tracks.
void RemoveTrack(SourceMediaTrack* aTrack);
const RefPtr<SharedDummyTrack> mDummyStream;
nsTArray<UniquePtr<OutputStreamData>> mStreams;
nsTArray<UniquePtr<LiveTrack>> mLiveTracks;
Canonical<PrincipalHandle> mPrincipalHandle;
bool mPlaying = false;
};
} // namespace mozilla
#endif // OutputStreamManager_h

View File

@ -156,6 +156,18 @@ VideoSink::~VideoSink() {
#endif
}
const MediaSink::PlaybackParams& VideoSink::GetPlaybackParams() const {
AssertOwnerThread();
return mAudioSink->GetPlaybackParams();
}
void VideoSink::SetPlaybackParams(const PlaybackParams& aParams) {
AssertOwnerThread();
mAudioSink->SetPlaybackParams(aParams);
}
RefPtr<VideoSink::EndedPromise> VideoSink::OnEnded(TrackType aType) {
AssertOwnerThread();
MOZ_ASSERT(mAudioSink->IsStarted(), "Must be called after playback starts.");
@ -211,12 +223,6 @@ void VideoSink::SetPreservesPitch(bool aPreservesPitch) {
mAudioSink->SetPreservesPitch(aPreservesPitch);
}
double VideoSink::PlaybackRate() const {
AssertOwnerThread();
return mAudioSink->PlaybackRate();
}
void VideoSink::EnsureHighResTimersOnOnlyIfPlaying() {
#ifdef XP_WIN
const bool needed = IsPlaying();
@ -434,8 +440,8 @@ void VideoSink::TryUpdateRenderedVideoFrames() {
// If we send this future frame to the compositor now, it will be rendered
// immediately and break A/V sync. Instead, we schedule a timer to send it
// later.
int64_t delta =
(v->mTime - clockTime).ToMicroseconds() / mAudioSink->PlaybackRate();
int64_t delta = (v->mTime - clockTime).ToMicroseconds() /
mAudioSink->GetPlaybackParams().mPlaybackRate;
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure(
@ -475,7 +481,7 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
AutoTArray<ImageContainer::NonOwningImage, 16> images;
TimeStamp lastFrameTime;
double playbackRate = mAudioSink->PlaybackRate();
MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
for (uint32_t i = 0; i < frames.Length(); ++i) {
VideoData* frame = frames[i];
bool wasSent = frame->IsSentToCompositor();
@ -493,8 +499,8 @@ void VideoSink::RenderVideoFrames(int32_t aMaxFrames, int64_t aClockTime,
MOZ_ASSERT(!aClockTimeStamp.IsNull());
int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
TimeStamp t =
aClockTimeStamp + TimeDuration::FromMicroseconds(delta / playbackRate);
TimeStamp t = aClockTimeStamp +
TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
// Timestamps out of order; drop the new frame. In theory we should
// probably replace the previous frame with the new frame if the
@ -607,8 +613,9 @@ void VideoSink::UpdateRenderedVideoFrames() {
int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
int64_t delta = std::max(nextFrameTime - clockTime.ToMicroseconds(),
MIN_UPDATE_INTERVAL_US);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->PlaybackRate());
TimeStamp target =
nowTime + TimeDuration::FromMicroseconds(
delta / mAudioSink->GetPlaybackParams().mPlaybackRate);
RefPtr<VideoSink> self = this;
mUpdateScheduler.Ensure(
@ -640,7 +647,7 @@ void VideoSink::MaybeResolveEndPromise() {
"end promise. clockTime=%" PRId64 ", endTime=%" PRId64,
clockTime.ToMicroseconds(), mVideoFrameEndTime.ToMicroseconds());
int64_t delta = (mVideoFrameEndTime - clockTime).ToMicroseconds() /
mAudioSink->PlaybackRate();
mAudioSink->GetPlaybackParams().mPlaybackRate;
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(delta);
auto resolveEndPromise = [self = RefPtr<VideoSink>(this)]() {
self->mEndPromiseHolder.ResolveIfExists(true, __func__);

View File

@ -32,6 +32,10 @@ class VideoSink : public MediaSink {
MediaQueue<VideoData>& aVideoQueue, VideoFrameContainer* aContainer,
FrameStatistics& aFrameStats, uint32_t aVQueueSentToCompositerSize);
const PlaybackParams& GetPlaybackParams() const override;
void SetPlaybackParams(const PlaybackParams& aParams) override;
RefPtr<EndedPromise> OnEnded(TrackType aType) override;
TimeUnit GetEndTime(TrackType aType) const override;
@ -48,8 +52,6 @@ class VideoSink : public MediaSink {
void SetPlaying(bool aPlaying) override;
double PlaybackRate() const override;
void Redraw(const VideoInfo& aInfo) override;
nsresult Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) override;

View File

@ -8,6 +8,7 @@ UNIFIED_SOURCES += [
'AudioSink.cpp',
'AudioSinkWrapper.cpp',
'DecodedStream.cpp',
'OutputStreamManager.cpp',
'VideoSink.cpp',
]

View File

@ -24,11 +24,11 @@ mozilla::LogModule* GetSourceBufferResourceLog() {
namespace mozilla {
RefPtr<GenericPromise> SourceBufferResource::Close() {
nsresult SourceBufferResource::Close() {
MOZ_ASSERT(OnThread());
SBR_DEBUG("Close");
mClosed = true;
return GenericPromise::CreateAndResolve(true, __func__);
return NS_OK;
}
nsresult SourceBufferResource::ReadAt(int64_t aOffset, char* aBuffer,

View File

@ -36,7 +36,7 @@ class SourceBufferResource final
public DecoderDoctorLifeLogger<SourceBufferResource> {
public:
SourceBufferResource();
RefPtr<GenericPromise> Close() override;
nsresult Close() override;
nsresult ReadAt(int64_t aOffset, char* aBuffer, uint32_t aCount,
uint32_t* aBytes) override;
// Memory-based and no locks, caching discouraged.

View File

@ -10,19 +10,19 @@
<pre id="test">
<script class="testbody" type="text/javascript">
const manager = new MediaTestManager;
var manager = new MediaTestManager;
function startTest(test, token) {
const elemType = getMajorMimeType(test.type);
const element = document.createElement(elemType);
var elemType = getMajorMimeType(test.type);
var element = document.createElement(elemType);
let audioOnchange = 0;
let audioOnaddtrack = 0;
let audioOnremovetrack = 0;
let videoOnchange = 0;
let videoOnaddtrack = 0;
let videoOnremovetrack = 0;
let isPlaying = false;
var audioOnchange = 0;
var audioOnaddtrack = 0;
var audioOnremovetrack = 0;
var videoOnchange = 0;
var videoOnaddtrack = 0;
var videoOnremovetrack = 0;
var isPlaying = false;
isnot(element.audioTracks, undefined,
'HTMLMediaElement::AudioTracks() property should be available.');
@ -53,43 +53,26 @@ function startTest(test, token) {
videoOnchange++;
}
function checkTrackNotRemoved() {
is(audioOnremovetrack, 0, 'Should have no calls of onremovetrack on audioTracks.');
is(videoOnremovetrack, 0, 'Should have no calls of onremovetrack on videoTracks.');
if (isPlaying) {
is(element.audioTracks.length, test.hasAudio ? 1 : 0,
'Expected length of audioTracks.');
is(element.videoTracks.length, test.hasVideo ? 1 : 0,
'Expected length of videoTracks.');
}
}
function checkTrackRemoved() {
is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
if (isPlaying) {
is(audioOnremovetrack, test.hasAudio ? 1 : 0,
'Expected calls of onremovetrack on audioTracks.');
is(videoOnremovetrack, test.hasVideo ? 1 : 0,
'Expected calls of onremovetrack on videoTracks.');
if (test.hasAudio) {
is(audioOnremovetrack, 1, 'Calls of onremovetrack on audioTracks should be 1.');
is(element.audioTracks.length, 0, 'The length of audioTracks should be 0.');
}
if (test.hasVideo) {
is(videoOnremovetrack, 1, 'Calls of onremovetrack on videoTracks should be 1.');
is(element.videoTracks.length, 0, 'The length of videoTracks should be 0.');
}
}
}
function onended() {
ok(true, 'Event ended is expected to be fired on element.');
checkTrackNotRemoved();
checkTrackRemoved();
element.onended = null;
element.onplaying = null;
element.onpause = null;
element.src = "";
is(element.audioTracks.length, 0, 'audioTracks have been forgotten');
is(element.videoTracks.length, 0, 'videoTracks have been forgotten');
is(audioOnremovetrack, 0, 'No audio removetrack events yet');
is(videoOnremovetrack, 0, 'No video removetrack events yet');
setTimeout(() => {
checkTrackRemoved();
manager.finished(element.token);
}, 100);
manager.finished(element.token);
}
function checkTrackAdded() {

View File

@ -10,29 +10,28 @@
<pre id="test">
<script class="testbody" type="text/javascript">
const manager = new MediaTestManager;
var manager = new MediaTestManager;
function startTest(test, token) {
// Scenario to test:
// 1. Audio tracks and video tracks should be added to the track list when
// metadata has loaded, and all tracks should remain even after we seek to
// the end.
// 2. No tracks should be added back to the list if we replay from the end,
// and no tracks should be removed from the list after we seek to the end.
// 3. After seek to the middle from end of playback, all tracks should remain
// in the list if we play from here, and no tracks should be removed from
// the list after we seek to the end.
// 4. Unsetting the media element's src attribute should remove all tracks.
// playing, and all tracks should be removed from the list after we seek
// to the end.
// 2. All tracks should be added back to the list if we replay from the end,
// and all tracks should be removed from the list after we seek to the end.
// 3. After seek to the middle from end of playback, all tracks should be
// added back to the list if we play from here, and all tracks should be
// removed from the list after we seek to the end.
const elemType = getMajorMimeType(test.type);
const element = document.createElement(elemType);
var elemType = getMajorMimeType(test.type);
var element = document.createElement(elemType);
let audioOnaddtrack = 0;
let audioOnremovetrack = 0;
let videoOnaddtrack = 0;
let videoOnremovetrack = 0;
let isPlaying = false;
let steps = 0;
var audioOnaddtrack = 0;
var audioOnremovetrack = 0;
var videoOnaddtrack = 0;
var videoOnremovetrack = 0;
var isPlaying = false;
var steps = 0;
element.audioTracks.onaddtrack = function(e) {
audioOnaddtrack++;
@ -50,23 +49,16 @@ function startTest(test, token) {
videoOnremovetrack++;
}
function testExpectedAddtrack(expectedCalls) {
function testTrackEventCalls(expectedCalls) {
if (test.hasAudio) {
is(audioOnaddtrack, expectedCalls,
'Calls of onaddtrack on audioTracks should be '+expectedCalls+' times.');
}
if (test.hasVideo) {
is(videoOnaddtrack, expectedCalls,
'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
}
}
function testExpectedRemovetrack(expectedCalls) {
if (test.hasAudio) {
is(audioOnremovetrack, expectedCalls,
'Calls of onremovetrack on audioTracks should be '+expectedCalls+' times.');
}
if (test.hasVideo) {
is(videoOnaddtrack, expectedCalls,
'Calls of onaddtrack on videoTracks should be '+expectedCalls+' times.');
is(videoOnremovetrack, expectedCalls,
'Calls of onremovetrack on videoTracks should be '+expectedCalls+' times.');
}
@ -84,29 +76,21 @@ function startTest(test, token) {
if (isPlaying) {
switch(steps) {
case 1:
testExpectedAddtrack(1);
testExpectedRemovetrack(0);
testTrackEventCalls(1);
element.onplaying = onplaying;
element.play();
steps++;
break;
case 2:
testExpectedAddtrack(1);
testExpectedRemovetrack(0);
testTrackEventCalls(2);
element.currentTime = element.duration * 0.5;
element.onplaying = onplaying;
element.play();
steps++;
break;
case 3:
testExpectedAddtrack(1);
testExpectedRemovetrack(0);
element.src = "";
setTimeout(() => {
testExpectedAddtrack(1);
testExpectedRemovetrack(1);
finishTesting();
}, 0);
testTrackEventCalls(3);
finishTesting();
break;
}
} else {

View File

@ -1,39 +1,35 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test that reloading and seeking in a media element that's being captured behaves as expected</title>
<title>Test that reloading and seeking in a media element that's being captured doesn't crash</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
<script src="manifest.js"></script>
<script type="text/javascript" src="manifest.js"></script>
</head>
<body>
<video id="v"></video>
<video id="vout"></video>
<video id="vout_untilended"></video>
<pre id="test">
<script>
const v = document.getElementById('v');
const vout = document.getElementById('vout');
const vout_untilended = document.getElementById('vout_untilended');
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
var v = document.getElementById('v');
var vout = document.getElementById('vout');
var vout_untilended = document.getElementById('vout_untilended');
function dumpEvent(event) {
const video = event.target;
info(
`${video.name}:${video.id} GOT EVENT ${event.type} ` +
`currentTime=${video.currentTime} paused=${video.paused} ` +
`ended=${video.ended} readyState=${video.readyState}`
);
var video = event.target;
info(video.name + " GOT EVENT " + event.type +
" currentTime=" + video.currentTime +
" paused=" + video.paused +
" ended=" + video.ended +
" readyState=" + video.readyState);
}
function unexpected(event) {
ok(false, `${event.type} event received on ${event.target.id} unexpectedly`);
};
const events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
for (const e of events) {
v.addEventListener(e, dumpEvent);
vout.addEventListener(e, dumpEvent);
vout_untilended.addEventListener(e, dumpEvent);
var events = ["timeupdate", "seeking", "seeked", "ended", "playing", "pause"];
for (var i = 0; i < events.length; ++i) {
v.addEventListener(events[i], dumpEvent);
}
function isWithinEps(a, b, msg) {
@ -46,91 +42,92 @@ function isGreaterThanOrEqualEps(a, b, msg) {
"Got " + a + ", expected at least " + b + "; " + msg);
}
async function startTest(test) {
const seekTime = test.duration/2;
function startTest(test) {
var seekTime = test.duration/2;
function endedAfterReplay() {
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at third 'ended' event");
isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration*2,
"checking vout.currentTime after seeking, playing through and reloading");
SimpleTest.finish();
};
function endedAfterSeek() {
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at second 'ended' event");
isGreaterThanOrEqualEps(vout.currentTime, (test.duration - seekTime) + test.duration,
"checking vout.currentTime after seeking and playing through again");
v.removeEventListener("ended", endedAfterSeek);
v.addEventListener("ended", endedAfterReplay);
v.src = test.name + "?1";
v.play();
};
function seeked() {
isGreaterThanOrEqualEps(v.currentTime, seekTime, "Finished seeking");
isGreaterThanOrEqualEps(vout.currentTime, test.duration,
"checking vout.currentTime has not changed after seeking");
v.removeEventListener("seeked", seeked);
function dontPlayAgain() {
ok(false, "vout_untilended should not play again");
}
vout_untilended.addEventListener("playing", dontPlayAgain);
vout_untilended.addEventListener("ended", dontPlayAgain);
v.addEventListener("ended", endedAfterSeek);
v.play();
};
function ended() {
// Don't compare current time until both v and vout_untilended are ended,
// otherwise, current time could be smaller than the duration.
if (!v.ended || !vout_untilended.ended) {
return;
}
isGreaterThanOrEqualEps(vout.currentTime, test.duration, "checking vout.currentTime at first 'ended' event");
isGreaterThanOrEqualEps(v.currentTime, test.duration, "checking v.currentTime at first 'ended' event");
is(vout.ended, false, "checking vout has not ended");
is(vout_untilended.ended, true, "checking vout_untilended has actually ended");
v.removeEventListener("ended", ended);
vout_untilended.removeEventListener("ended", ended);
v.pause();
v.currentTime = seekTime;
v.addEventListener("seeked", seeked);
};
v.addEventListener("ended", ended);
vout_untilended.addEventListener("ended", ended);
function checkNoEnded() {
ok(false, "ended event received unexpectedly");
};
vout.addEventListener("ended", checkNoEnded);
v.src = test.name;
v.name = test.name;
vout.name = test.name;
vout_untilended.name = test.name;
v.preload = "metadata";
await new Promise(r => v.onloadedmetadata = r);
vout.srcObject = v.mozCaptureStream();
vout.play();
function loadedmetadata() {
vout.srcObject = v.mozCaptureStream();
vout.play();
vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
vout_untilended.play();
vout_untilended.srcObject = v.mozCaptureStreamUntilEnded();
vout_untilended.play();
v.play();
v.play();
};
await new Promise(r => v.onended = r);
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at first 'ended' event");
await Promise.all([
new Promise(r => vout.onended = r),
new Promise(r => vout_untilended.onended = r),
]);
isGreaterThanOrEqualEps(vout.currentTime, test.duration,
"checking vout.currentTime at first 'ended' event");
ok(vout.ended, "checking vout has actually ended");
ok(vout_untilended.ended, "checking vout_untilended has actually ended");
vout_untilended.srcObject.onaddtrack = unexpected;
vout_untilended.onplaying = unexpected;
vout_untilended.onended = unexpected;
const voutPreSeekCurrentTime = vout.currentTime;
v.currentTime = seekTime;
await new Promise(r => v.onseeked = r);
is(v.currentTime, seekTime, "Finished seeking");
is(vout.currentTime, voutPreSeekCurrentTime,
"checking vout.currentTime has not changed after seeking");
v.play();
vout.play();
await new Promise(r => v.onended = r);
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at second 'ended' event");
await new Promise(r => vout.onended = r);
isGreaterThanOrEqualEps(vout.currentTime,
(test.duration - seekTime) + test.duration,
"checking vout.currentTime after seeking and playing through again");
v.src = test.name + "?1";
v.play();
vout.play();
await new Promise(r => v.onended = r);
isGreaterThanOrEqualEps(v.currentTime, test.duration,
"checking v.currentTime at third 'ended' event");
await new Promise(r => vout.onended = r);
isGreaterThanOrEqualEps(vout.currentTime,
(test.duration - seekTime) + test.duration*2,
"checking vout.currentTime after seeking, playing through and reloading");
v.addEventListener("loadedmetadata", loadedmetadata, {once: true});
}
(async () => {
SimpleTest.waitForExplicitFinish();
try {
const testVideo = getPlayableVideo(gSmallTests);
if (testVideo) {
await startTest(testVideo);
} else {
todo(false, "No playable video");
}
} catch(e) {
ok(false, `Error: ${e}`);
} finally {
SimpleTest.finish();
}
})();
var testVideo = getPlayableVideo(gSmallTests);
if (testVideo) {
startTest(testVideo);
} else {
todo(false, "No playable video");
}
</script>
</pre>
</body>

View File

@ -9,7 +9,6 @@
#include "AudioDestinationNode.h"
#include "nsIScriptError.h"
#include "AudioNodeTrack.h"
#include "MediaStreamTrack.h"
namespace mozilla {
namespace dom {

View File

@ -76,8 +76,8 @@ void MediaStreamAudioSourceNode::Init(DOMMediaStream* aMediaStream,
mInputStream->AddConsumerToKeepAlive(ToSupports(this));
mInputStream->RegisterTrackListener(this);
if (mInputStream->Audible()) {
NotifyAudible();
if (mInputStream->Active()) {
NotifyActive();
}
AttachToRightTrack(mInputStream, aRv);
}
@ -119,7 +119,6 @@ void MediaStreamAudioSourceNode::AttachToTrack(
mInputPort = mInputTrack->ForwardTrackContentsTo(outputTrack);
PrincipalChanged(mInputTrack); // trigger enabling/disabling of the connector
mInputTrack->AddPrincipalChangeObserver(this);
MarkActive();
}
void MediaStreamAudioSourceNode::DetachFromTrack() {
@ -166,6 +165,7 @@ void MediaStreamAudioSourceNode::AttachToRightTrack(
if (!track->Ended()) {
AttachToTrack(track, aRv);
MarkActive();
}
return;
}
@ -202,7 +202,7 @@ void MediaStreamAudioSourceNode::NotifyTrackRemoved(
}
}
void MediaStreamAudioSourceNode::NotifyAudible() {
void MediaStreamAudioSourceNode::NotifyActive() {
MOZ_ASSERT(mInputStream);
Context()->StartBlockedAudioContextIfAllowed();
}

View File

@ -91,7 +91,7 @@ class MediaStreamAudioSourceNode
// From DOMMediaStream::TrackListener.
void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override;
void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override;
void NotifyAudible() override;
void NotifyActive() override;
// From PrincipalChangeObserver<MediaStreamTrack>.
void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override;

View File

@ -0,0 +1,8 @@
[mediaElementAudioSourceToScriptProcessorTest.html]
disabled:
if (os == "mac") and (version == "OS X 10.14"): new platform
if (os == "android") and debug: https://bugzilla.mozilla.org/show_bug.cgi?id=1546756
[All data processed correctly]
expected:
if processor == "aarch64": ["PASS", "FAIL"]