2013-02-04 10:04:25 +00:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#ifndef MOZILLA_MEDIASTREAMGRAPHIMPL_H_
|
|
|
|
#define MOZILLA_MEDIASTREAMGRAPHIMPL_H_
|
|
|
|
|
|
|
|
#include "MediaStreamGraph.h"
|
|
|
|
|
2016-02-04 02:12:51 +00:00
|
|
|
#include "nsDataHashtable.h"
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
#include "mozilla/Monitor.h"
|
|
|
|
#include "mozilla/TimeStamp.h"
|
2014-04-13 18:08:10 +00:00
|
|
|
#include "nsIMemoryReporter.h"
|
2013-02-04 10:04:25 +00:00
|
|
|
#include "nsIThread.h"
|
|
|
|
#include "nsIRunnable.h"
|
2016-01-22 18:49:54 +00:00
|
|
|
#include "nsIAsyncShutdown.h"
|
2013-09-25 02:10:24 +00:00
|
|
|
#include "Latency.h"
|
2016-01-20 21:14:33 +00:00
|
|
|
#include "mozilla/UniquePtr.h"
|
2014-03-24 10:06:06 +00:00
|
|
|
#include "mozilla/WeakPtr.h"
|
2014-04-25 14:09:30 +00:00
|
|
|
#include "GraphDriver.h"
|
2014-08-25 13:25:49 +00:00
|
|
|
#include "AudioMixer.h"
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2013-07-19 14:40:58 +00:00
|
|
|
template <typename T>
|
|
|
|
class LinkedList;
|
2014-09-09 16:23:01 +00:00
|
|
|
#ifdef MOZ_WEBRTC
|
2014-08-26 15:02:31 +00:00
|
|
|
class AudioOutputObserver;
|
2014-09-09 16:23:01 +00:00
|
|
|
#endif
|
2013-07-19 14:40:58 +00:00
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* A per-stream update message passed from the media graph thread to the
|
|
|
|
* main thread.
|
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
struct StreamUpdate
|
|
|
|
{
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<MediaStream> mStream;
|
2013-02-04 10:04:25 +00:00
|
|
|
StreamTime mNextMainThreadCurrentTime;
|
|
|
|
bool mNextMainThreadFinished;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
2015-10-22 05:47:57 +00:00
|
|
|
* This represents a message run on the graph thread to modify stream or graph
|
|
|
|
* state. These are passed from main thread to graph thread through
|
|
|
|
* AppendMessage(), or scheduled on the graph thread with
|
|
|
|
* RunMessageAfterProcessing(). A ControlMessage
|
|
|
|
* always has a weak reference to a particular affected stream.
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
class ControlMessage
|
|
|
|
{
|
2013-02-04 10:04:25 +00:00
|
|
|
public:
|
2013-10-24 23:07:29 +00:00
|
|
|
explicit ControlMessage(MediaStream* aStream) : mStream(aStream)
|
2013-02-04 10:04:25 +00:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(ControlMessage);
|
|
|
|
}
|
|
|
|
// All these run on the graph thread
|
|
|
|
virtual ~ControlMessage()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(ControlMessage);
|
|
|
|
}
|
|
|
|
// Do the action of this message on the MediaStreamGraph thread. Any actions
|
2015-07-31 09:28:29 +00:00
|
|
|
// affecting graph processing should take effect at mProcessedTime.
|
|
|
|
// All stream data for times < mProcessedTime has already been
|
2013-02-04 10:04:25 +00:00
|
|
|
// computed.
|
|
|
|
virtual void Run() = 0;
|
2015-10-22 05:47:57 +00:00
|
|
|
// RunDuringShutdown() is only relevant to messages generated on the main
|
|
|
|
// thread (for AppendMessage()).
|
2013-02-04 10:04:25 +00:00
|
|
|
// When we're shutting down the application, most messages are ignored but
|
|
|
|
// some cleanup messages should still be processed (on the main thread).
|
2014-07-02 06:04:54 +00:00
|
|
|
// This must not add new control messages to the graph.
|
2013-02-04 10:04:25 +00:00
|
|
|
virtual void RunDuringShutdown() {}
|
|
|
|
MediaStream* GetStream() { return mStream; }
|
|
|
|
|
|
|
|
protected:
|
|
|
|
// We do not hold a reference to mStream. The graph will be holding
|
|
|
|
// a reference to the stream until the Destroy message is processed. The
|
|
|
|
// last message referencing a stream is the Destroy message for that stream.
|
|
|
|
MediaStream* mStream;
|
|
|
|
};
|
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
class MessageBlock
|
|
|
|
{
|
2014-08-26 15:01:33 +00:00
|
|
|
public:
|
2016-01-20 21:14:33 +00:00
|
|
|
nsTArray<UniquePtr<ControlMessage>> mMessages;
|
2014-04-25 14:09:30 +00:00
|
|
|
};
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* The implementation of a media stream graph. This class is private to this
|
|
|
|
* file. It's not in the anonymous namespace because MediaStream needs to
|
|
|
|
* be able to friend it.
|
|
|
|
*
|
2016-04-25 07:00:43 +00:00
|
|
|
* There can be multiple MediaStreamGraph per process: one per AudioChannel.
|
|
|
|
* Additionaly, each OfflineAudioContext object creates its own MediaStreamGraph
|
|
|
|
* object too.
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2014-04-13 18:08:10 +00:00
|
|
|
class MediaStreamGraphImpl : public MediaStreamGraph,
|
2015-05-13 13:34:56 +00:00
|
|
|
public nsIMemoryReporter
|
|
|
|
{
|
2013-02-04 10:04:25 +00:00
|
|
|
public:
|
2014-08-26 15:04:39 +00:00
|
|
|
NS_DECL_THREADSAFE_ISUPPORTS
|
2014-04-13 18:08:10 +00:00
|
|
|
NS_DECL_NSIMEMORYREPORTER
|
|
|
|
|
2013-05-08 11:44:07 +00:00
|
|
|
/**
|
2015-08-25 08:17:31 +00:00
|
|
|
* Use aGraphDriverRequested with SYSTEM_THREAD_DRIVER or AUDIO_THREAD_DRIVER
|
|
|
|
* to create a MediaStreamGraph which provides support for real-time audio
|
2016-04-25 07:00:43 +00:00
|
|
|
* and/or video. Set it to OFFLINE_THREAD_DRIVER in order to create a
|
|
|
|
* non-realtime instance which just churns through its inputs and produces
|
|
|
|
* output. Those objects currently only support audio, and are used to
|
|
|
|
* implement OfflineAudioContext. They do not support MediaStream inputs.
|
2013-05-08 11:44:07 +00:00
|
|
|
*/
|
2015-08-25 08:17:31 +00:00
|
|
|
explicit MediaStreamGraphImpl(GraphDriverType aGraphDriverRequested,
|
2014-08-26 15:02:08 +00:00
|
|
|
TrackRate aSampleRate,
|
2015-08-25 08:17:31 +00:00
|
|
|
dom::AudioChannel aChannel);
|
2014-04-13 18:08:10 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Unregisters memory reporting and deletes this instance. This should be
|
|
|
|
* called instead of calling the destructor directly.
|
|
|
|
*/
|
|
|
|
void Destroy();
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
// Main thread only.
|
|
|
|
/**
|
|
|
|
* This runs every time we need to sync state from the media graph thread
|
|
|
|
* to the main thread while the main thread is not in the middle
|
|
|
|
* of a script. It runs during a "stable state" (per HTML5) or during
|
|
|
|
* an event posted to the main thread.
|
2014-08-25 12:13:14 +00:00
|
|
|
* The boolean affects which boolean controlling runnable dispatch is cleared
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2014-08-25 12:13:14 +00:00
|
|
|
void RunInStableState(bool aSourceIsMSG);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Ensure a runnable to run RunInStableState is posted to the appshell to
|
|
|
|
* run at the next stable state (per HTML5).
|
|
|
|
* See EnsureStableStateEventPosted.
|
|
|
|
*/
|
|
|
|
void EnsureRunInStableState();
|
|
|
|
/**
|
|
|
|
* Called to apply a StreamUpdate to its stream.
|
|
|
|
*/
|
|
|
|
void ApplyStreamUpdate(StreamUpdate* aUpdate);
|
|
|
|
/**
|
|
|
|
* Append a ControlMessage to the message queue. This queue is drained
|
|
|
|
* during RunInStableState; the messages will run on the graph thread.
|
|
|
|
*/
|
2016-01-20 21:14:33 +00:00
|
|
|
void AppendMessage(UniquePtr<ControlMessage> aMessage);
|
2016-01-22 18:49:54 +00:00
|
|
|
|
|
|
|
// Shutdown helpers.
|
|
|
|
|
|
|
|
static already_AddRefed<nsIAsyncShutdownClient>
|
|
|
|
GetShutdownBarrier()
|
|
|
|
{
|
|
|
|
nsCOMPtr<nsIAsyncShutdownService> svc = services::GetAsyncShutdown();
|
|
|
|
MOZ_RELEASE_ASSERT(svc);
|
|
|
|
|
|
|
|
nsCOMPtr<nsIAsyncShutdownClient> barrier;
|
|
|
|
nsresult rv = svc->GetProfileBeforeChange(getter_AddRefs(barrier));
|
|
|
|
if (!barrier) {
|
|
|
|
// We are probably in a content process.
|
|
|
|
rv = svc->GetContentChildShutdown(getter_AddRefs(barrier));
|
|
|
|
}
|
|
|
|
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
|
|
|
|
MOZ_RELEASE_ASSERT(barrier);
|
|
|
|
return barrier.forget();
|
|
|
|
}
|
|
|
|
|
|
|
|
class ShutdownTicket final
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit ShutdownTicket(nsIAsyncShutdownBlocker* aBlocker) : mBlocker(aBlocker) {}
|
|
|
|
NS_INLINE_DECL_REFCOUNTING(ShutdownTicket)
|
|
|
|
private:
|
|
|
|
~ShutdownTicket()
|
|
|
|
{
|
|
|
|
nsCOMPtr<nsIAsyncShutdownClient> barrier = GetShutdownBarrier();
|
|
|
|
barrier->RemoveBlocker(mBlocker);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsCOMPtr<nsIAsyncShutdownBlocker> mBlocker;
|
|
|
|
};
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Make this MediaStreamGraph enter forced-shutdown state. This state
|
|
|
|
* will be noticed by the media graph thread, which will shut down all streams
|
|
|
|
* and other state controlled by the media graph thread.
|
|
|
|
* This is called during application shutdown.
|
|
|
|
*/
|
2016-01-22 18:49:54 +00:00
|
|
|
void ForceShutDown(ShutdownTicket* aShutdownTicket);
|
2013-02-04 10:04:25 +00:00
|
|
|
|
2013-12-09 19:54:49 +00:00
|
|
|
/**
|
|
|
|
* Called before the thread runs.
|
|
|
|
*/
|
|
|
|
void Init();
|
2013-02-04 10:04:25 +00:00
|
|
|
// The following methods run on the graph thread (or possibly the main thread if
|
|
|
|
// mLifecycleState > LIFECYCLE_RUNNING)
|
2015-05-13 13:34:56 +00:00
|
|
|
void AssertOnGraphThreadOrNotRunning() const
|
|
|
|
{
|
2014-09-28 16:07:24 +00:00
|
|
|
// either we're on the right thread (and calling CurrentDriver() is safe),
|
|
|
|
// or we're going to assert anyways, so don't cross-check CurrentDriver
|
2014-09-30 12:59:05 +00:00
|
|
|
#ifdef DEBUG
|
|
|
|
// if all the safety checks fail, assert we own the monitor
|
|
|
|
if (!mDriver->OnThread()) {
|
|
|
|
if (!(mDetectedNotRunning &&
|
|
|
|
mLifecycleState > LIFECYCLE_RUNNING &&
|
|
|
|
NS_IsMainThread())) {
|
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2014-09-28 16:07:24 +00:00
|
|
|
}
|
2015-09-04 12:26:48 +00:00
|
|
|
|
|
|
|
void MaybeProduceMemoryReport();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns true if this MediaStreamGraph should keep running
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2015-09-04 12:26:48 +00:00
|
|
|
bool UpdateMainThreadState();
|
2014-08-25 13:26:21 +00:00
|
|
|
|
2015-09-04 12:26:48 +00:00
|
|
|
/**
|
|
|
|
* Returns true if this MediaStreamGraph should keep running
|
|
|
|
*/
|
2015-08-04 07:42:10 +00:00
|
|
|
bool OneIteration(GraphTime aStateEnd);
|
2014-08-25 13:26:21 +00:00
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
bool Running() const
|
|
|
|
{
|
2014-08-26 15:04:36 +00:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
return mLifecycleState == LIFECYCLE_RUNNING;
|
|
|
|
}
|
|
|
|
|
2014-04-25 14:09:30 +00:00
|
|
|
/* This is the end of the current iteration, that is, the current time of the
|
|
|
|
* graph. */
|
2015-05-13 13:34:56 +00:00
|
|
|
GraphTime IterationEnd() const;
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Ensure there is an event posted to the main thread to run RunInStableState.
|
|
|
|
* mMonitor must be held.
|
|
|
|
* See EnsureRunInStableState
|
|
|
|
*/
|
|
|
|
void EnsureStableStateEventPosted();
|
|
|
|
/**
|
|
|
|
* Generate messages to the main thread to update it for all state changes.
|
|
|
|
* mMonitor must be held.
|
|
|
|
*/
|
2013-06-17 13:06:34 +00:00
|
|
|
void PrepareUpdatesToMainThreadState(bool aFinalUpdate);
|
2013-12-09 05:08:02 +00:00
|
|
|
/**
|
|
|
|
* Returns false if there is any stream that has finished but not yet finished
|
|
|
|
* playing out.
|
|
|
|
*/
|
|
|
|
bool AllFinishedStreamsNotified();
|
2013-07-19 14:40:57 +00:00
|
|
|
/**
|
|
|
|
* If we are rendering in non-realtime mode, we don't want to send messages to
|
|
|
|
* the main thread at each iteration for performance reasons. We instead
|
|
|
|
* notify the main thread at the same rate
|
|
|
|
*/
|
|
|
|
bool ShouldUpdateMainThread();
|
2013-02-04 10:04:25 +00:00
|
|
|
// The following methods are the various stages of RunThread processing.
|
|
|
|
/**
|
2015-09-04 12:42:53 +00:00
|
|
|
* Advance all stream state to mStateComputedTime.
|
2014-04-25 16:04:23 +00:00
|
|
|
*/
|
2015-09-04 12:42:53 +00:00
|
|
|
void UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime);
|
2016-01-26 08:45:25 +00:00
|
|
|
/**
|
|
|
|
* Process chunks for all streams and raise events for properties that have
|
|
|
|
* changed, such as principalId.
|
|
|
|
*/
|
|
|
|
void ProcessChunkMetadata(GraphTime aPrevCurrentTime);
|
|
|
|
/**
|
|
|
|
* Process chunks for the given stream and interval, and raise events for
|
|
|
|
* properties that have changed, such as principalId.
|
|
|
|
*/
|
|
|
|
template<typename C, typename Chunk>
|
|
|
|
void ProcessChunkMetadataForInterval(MediaStream* aStream,
|
|
|
|
TrackID aTrackID,
|
|
|
|
C& aSegment,
|
|
|
|
StreamTime aStart,
|
|
|
|
StreamTime aEnd);
|
2014-04-25 16:04:23 +00:00
|
|
|
/**
|
2015-10-22 05:47:57 +00:00
|
|
|
* Process graph messages in mFrontMessageQueue.
|
|
|
|
*/
|
|
|
|
void RunMessagesInQueue();
|
|
|
|
/**
|
|
|
|
* Update stream processing order and recompute stream blocking until
|
|
|
|
* aEndBlockingDecisions.
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2014-08-25 13:26:21 +00:00
|
|
|
void UpdateGraph(GraphTime aEndBlockingDecisions);
|
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
void SwapMessageQueues()
|
|
|
|
{
|
2015-09-17 05:08:10 +00:00
|
|
|
MOZ_ASSERT(CurrentDriver()->OnThread());
|
|
|
|
MOZ_ASSERT(mFrontMessageQueue.IsEmpty());
|
2014-08-26 15:01:33 +00:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
2014-08-25 13:26:21 +00:00
|
|
|
mFrontMessageQueue.SwapElements(mBackMessageQueue);
|
|
|
|
}
|
2014-04-25 16:04:23 +00:00
|
|
|
/**
|
2015-09-08 04:58:19 +00:00
|
|
|
* Do all the processing and play the audio and video, from
|
|
|
|
* mProcessedTime to mStateComputedTime.
|
2014-04-25 16:04:23 +00:00
|
|
|
*/
|
2015-09-08 04:58:19 +00:00
|
|
|
void Process();
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Extract any state updates pending in aStream, and apply them.
|
|
|
|
*/
|
|
|
|
void ExtractPendingInput(SourceMediaStream* aStream,
|
|
|
|
GraphTime aDesiredUpToTime,
|
|
|
|
bool* aEnsureNextIteration);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
|
2015-10-22 05:47:57 +00:00
|
|
|
/**
|
|
|
|
* For use during ProcessedMediaStream::ProcessInput() or
|
|
|
|
* MediaStreamListener callbacks, when graph state cannot be changed.
|
|
|
|
* Schedules |aMessage| to run after processing, at a time when graph state
|
|
|
|
* can be changed. Graph thread.
|
|
|
|
*/
|
2016-01-20 21:14:33 +00:00
|
|
|
void RunMessageAfterProcessing(UniquePtr<ControlMessage> aMessage);
|
2015-10-22 05:47:57 +00:00
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
/**
|
|
|
|
* Called when a suspend/resume/close operation has been completed, on the
|
|
|
|
* graph thread.
|
|
|
|
*/
|
|
|
|
void AudioContextOperationCompleted(MediaStream* aStream,
|
|
|
|
void* aPromise,
|
|
|
|
dom::AudioContextOperation aOperation);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Apply and AudioContext operation (suspend/resume/closed), on the graph
|
|
|
|
* thread.
|
|
|
|
*/
|
2015-09-16 04:15:21 +00:00
|
|
|
void ApplyAudioContextOperationImpl(MediaStream* aDestinationStream,
|
|
|
|
const nsTArray<MediaStream*>& aStreams,
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
dom::AudioContextOperation aOperation,
|
|
|
|
void* aPromise);
|
|
|
|
|
2015-09-03 11:54:00 +00:00
|
|
|
/**
|
|
|
|
* Increment suspend count on aStream and move it to mSuspendedStreams if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
void IncrementSuspendCount(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Increment suspend count on aStream and move it to mStreams if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
void DecrementSuspendCount(MediaStream* aStream);
|
|
|
|
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
/*
|
|
|
|
* Move streams from the mStreams to mSuspendedStream if suspending/closing an
|
|
|
|
* AudioContext, or the inverse when resuming an AudioContext.
|
|
|
|
*/
|
2015-09-16 04:15:21 +00:00
|
|
|
void SuspendOrResumeStreams(dom::AudioContextOperation aAudioContextOperation,
|
|
|
|
const nsTArray<MediaStream*>& aStreamSet);
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
|
2016-03-08 17:11:08 +00:00
|
|
|
/**
|
|
|
|
* Determine if we have any audio tracks, or are about to add any audiotracks.
|
|
|
|
* Also checks if we'll need the AEC running (i.e. microphone input tracks)
|
|
|
|
*/
|
|
|
|
bool AudioTrackPresent(bool& aNeedsAEC);
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Sort mStreams so that every stream not in a cycle is after any streams
|
|
|
|
* it depends on, and every stream in a cycle is marked as being in a cycle.
|
|
|
|
* Also sets mIsConsumed on every stream.
|
|
|
|
*/
|
|
|
|
void UpdateStreamOrder();
|
2015-05-13 13:34:56 +00:00
|
|
|
|
2014-04-25 16:04:23 +00:00
|
|
|
/**
|
2014-09-18 05:13:15 +00:00
|
|
|
* Returns smallest value of t such that t is a multiple of
|
|
|
|
* WEBAUDIO_BLOCK_SIZE and t > aTime.
|
2014-04-25 16:04:23 +00:00
|
|
|
*/
|
|
|
|
GraphTime RoundUpToNextAudioBlock(GraphTime aTime);
|
2013-01-13 22:46:57 +00:00
|
|
|
/**
|
2015-09-16 04:24:10 +00:00
|
|
|
* Produce data for all streams >= aStreamIndex for the current time interval.
|
2013-01-13 22:46:57 +00:00
|
|
|
* Advances block by block, each iteration producing data for all streams
|
|
|
|
* for a single block.
|
|
|
|
* This is called whenever we have an AudioNodeStream in the graph.
|
|
|
|
*/
|
|
|
|
void ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
|
2015-09-16 04:24:10 +00:00
|
|
|
TrackRate aSampleRate);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
2015-09-16 04:17:30 +00:00
|
|
|
* If aStream will underrun between aTime, and aEndBlockingDecisions, returns
|
|
|
|
* the time at which the underrun will start. Otherwise return
|
|
|
|
* aEndBlockingDecisions.
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2015-09-16 04:17:30 +00:00
|
|
|
GraphTime WillUnderrun(MediaStream* aStream, GraphTime aEndBlockingDecisions);
|
2014-04-25 16:04:23 +00:00
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Given a graph time aTime, convert it to a stream time taking into
|
|
|
|
* account the time during which aStream is scheduled to be blocked.
|
|
|
|
*/
|
2015-09-08 03:41:00 +00:00
|
|
|
StreamTime GraphTimeToStreamTimeWithBlocking(MediaStream* aStream, GraphTime aTime);
|
2015-09-08 04:42:42 +00:00
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Call NotifyHaveCurrentData on aStream's listeners.
|
|
|
|
*/
|
|
|
|
void NotifyHasCurrentData(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* If aStream needs an audio stream but doesn't have one, create it.
|
|
|
|
* If aStream doesn't need an audio stream but has one, destroy it.
|
|
|
|
*/
|
2015-09-16 04:23:14 +00:00
|
|
|
void CreateOrDestroyAudioStreams(MediaStream* aStream);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Queue audio (mix of stream audio and silence for blocked intervals)
|
2014-03-24 10:06:06 +00:00
|
|
|
* to the audio output stream. Returns the number of frames played.
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2015-09-08 04:58:19 +00:00
|
|
|
StreamTime PlayAudio(MediaStream* aStream);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Set the correct current video frame for stream aStream.
|
|
|
|
*/
|
|
|
|
void PlayVideo(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* No more data will be forthcoming for aStream. The stream will end
|
2016-01-26 02:49:01 +00:00
|
|
|
* at the current buffer end point. The StreamTracks's tracks must be
|
2013-02-04 10:04:25 +00:00
|
|
|
* explicitly set to finished by the caller.
|
|
|
|
*/
|
2016-03-08 17:11:09 +00:00
|
|
|
void OpenAudioInputImpl(int aID,
|
2016-01-21 16:51:36 +00:00
|
|
|
AudioDataListener *aListener);
|
2016-03-08 17:11:09 +00:00
|
|
|
virtual nsresult OpenAudioInput(int aID,
|
2016-01-21 16:51:36 +00:00
|
|
|
AudioDataListener *aListener) override;
|
2016-01-21 16:51:36 +00:00
|
|
|
void CloseAudioInputImpl(AudioDataListener *aListener);
|
|
|
|
virtual void CloseAudioInput(AudioDataListener *aListener) override;
|
2016-01-21 16:51:35 +00:00
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
void FinishStream(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Compute how much stream data we would like to buffer for aStream.
|
|
|
|
*/
|
|
|
|
StreamTime GetDesiredBufferEnd(MediaStream* aStream);
|
|
|
|
/**
|
|
|
|
* Returns true when there are no active streams.
|
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
bool IsEmpty() const
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
{
|
|
|
|
return mStreams.IsEmpty() && mSuspendedStreams.IsEmpty() && mPortCount == 0;
|
|
|
|
}
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Add aStream to the graph and initializes its graph-specific state.
|
|
|
|
*/
|
2015-08-11 23:29:35 +00:00
|
|
|
void AddStreamGraphThread(MediaStream* aStream);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Remove aStream from the graph. Ensures that pending messages about the
|
|
|
|
* stream back to the main thread are flushed.
|
|
|
|
*/
|
2015-08-11 23:29:35 +00:00
|
|
|
void RemoveStreamGraphThread(MediaStream* aStream);
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Remove aPort from the graph and release it.
|
|
|
|
*/
|
|
|
|
void DestroyPort(MediaInputPort* aPort);
|
2013-09-13 16:12:07 +00:00
|
|
|
/**
|
|
|
|
* Mark the media stream order as dirty.
|
|
|
|
*/
|
|
|
|
void SetStreamOrderDirty()
|
|
|
|
{
|
|
|
|
mStreamOrderDirty = true;
|
|
|
|
}
|
2013-02-04 10:04:25 +00:00
|
|
|
|
2014-08-25 13:25:49 +00:00
|
|
|
// Always stereo for now.
|
2015-05-13 13:34:56 +00:00
|
|
|
uint32_t AudioChannelCount() const { return 2; }
|
2014-04-23 09:20:56 +00:00
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
double MediaTimeToSeconds(GraphTime aTime) const
|
2014-06-12 04:45:00 +00:00
|
|
|
{
|
2015-08-13 05:07:49 +00:00
|
|
|
NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
|
|
|
|
"Bad time");
|
2014-09-18 05:20:43 +00:00
|
|
|
return static_cast<double>(aTime)/GraphRate();
|
2014-06-12 04:45:00 +00:00
|
|
|
}
|
2015-05-13 13:34:56 +00:00
|
|
|
|
|
|
|
GraphTime SecondsToMediaTime(double aS) const
|
2014-06-12 04:45:00 +00:00
|
|
|
{
|
2014-09-18 05:20:43 +00:00
|
|
|
NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX/TRACK_RATE_MAX,
|
|
|
|
"Bad seconds");
|
|
|
|
return GraphRate() * aS;
|
2014-06-12 04:45:00 +00:00
|
|
|
}
|
2015-05-13 13:34:56 +00:00
|
|
|
|
|
|
|
GraphTime MillisecondsToMediaTime(int32_t aMS) const
|
2014-06-12 04:45:00 +00:00
|
|
|
{
|
|
|
|
return RateConvertTicksRoundDown(GraphRate(), 1000, aMS);
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:02:28 +00:00
|
|
|
/**
|
|
|
|
* Signal to the graph that the thread has paused indefinitly,
|
|
|
|
* or resumed.
|
|
|
|
*/
|
|
|
|
void PausedIndefinitly();
|
|
|
|
void ResumedFromPaused();
|
|
|
|
|
2014-09-28 16:07:24 +00:00
|
|
|
/**
|
|
|
|
* Not safe to call off the MediaStreamGraph thread unless monitor is held!
|
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
GraphDriver* CurrentDriver() const
|
|
|
|
{
|
2014-09-30 12:59:05 +00:00
|
|
|
AssertOnGraphThreadOrNotRunning();
|
2014-08-26 15:02:28 +00:00
|
|
|
return mDriver;
|
|
|
|
}
|
|
|
|
|
2015-02-09 13:43:55 +00:00
|
|
|
bool RemoveMixerCallback(MixerCallbackReceiver* aReceiver)
|
|
|
|
{
|
|
|
|
return mMixer.RemoveCallback(aReceiver);
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:01:33 +00:00
|
|
|
/**
|
|
|
|
* Effectively set the new driver, while we are switching.
|
|
|
|
* It is only safe to call this at the very end of an iteration, when there
|
|
|
|
* has been a SwitchAtNextIteration call during the iteration. The driver
|
|
|
|
* should return and pass the control to the new driver shortly after.
|
2014-09-28 16:07:24 +00:00
|
|
|
* We can also switch from Revive() (on MainThread), in which case the
|
|
|
|
* monitor is held
|
2014-08-26 15:01:33 +00:00
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
void SetCurrentDriver(GraphDriver* aDriver)
|
|
|
|
{
|
2014-09-30 12:59:05 +00:00
|
|
|
AssertOnGraphThreadOrNotRunning();
|
2014-08-26 15:02:28 +00:00
|
|
|
mDriver = aDriver;
|
2014-08-26 15:01:33 +00:00
|
|
|
}
|
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
Monitor& GetMonitor()
|
|
|
|
{
|
2014-08-26 15:01:33 +00:00
|
|
|
return mMonitor;
|
|
|
|
}
|
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
void EnsureNextIteration()
|
|
|
|
{
|
2014-09-28 16:07:25 +00:00
|
|
|
mNeedAnotherIteration = true; // atomic
|
|
|
|
if (mGraphDriverAsleep) { // atomic
|
|
|
|
MonitorAutoLock mon(mMonitor);
|
|
|
|
CurrentDriver()->WakeUp(); // Might not be the same driver; might have woken already
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-13 13:34:56 +00:00
|
|
|
void EnsureNextIterationLocked()
|
|
|
|
{
|
2014-09-28 16:07:25 +00:00
|
|
|
mNeedAnotherIteration = true; // atomic
|
|
|
|
if (mGraphDriverAsleep) { // atomic
|
|
|
|
CurrentDriver()->WakeUp(); // Might not be the same driver; might have woken already
|
|
|
|
}
|
2014-09-28 16:07:24 +00:00
|
|
|
}
|
|
|
|
|
2015-07-24 12:28:16 +00:00
|
|
|
// Capture Stream API. This allows to get a mixed-down output for a window.
|
|
|
|
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
|
|
|
|
ProcessedMediaStream* aCaptureStream);
|
|
|
|
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
|
|
|
|
already_AddRefed<MediaInputPort>
|
|
|
|
ConnectToCaptureStream(uint64_t aWindowId, MediaStream* aMediaStream);
|
|
|
|
|
2015-09-08 04:18:15 +00:00
|
|
|
class StreamSet {
|
|
|
|
public:
|
|
|
|
class iterator {
|
|
|
|
public:
|
|
|
|
explicit iterator(MediaStreamGraphImpl& aGraph)
|
|
|
|
: mGraph(&aGraph), mArrayNum(-1), mArrayIndex(0)
|
|
|
|
{
|
|
|
|
++(*this);
|
|
|
|
}
|
|
|
|
iterator() : mGraph(nullptr), mArrayNum(2), mArrayIndex(0) {}
|
|
|
|
MediaStream* operator*()
|
|
|
|
{
|
|
|
|
return Array()->ElementAt(mArrayIndex);
|
|
|
|
}
|
|
|
|
iterator operator++()
|
|
|
|
{
|
|
|
|
++mArrayIndex;
|
|
|
|
while (mArrayNum < 2 &&
|
|
|
|
(mArrayNum < 0 || mArrayIndex >= Array()->Length())) {
|
|
|
|
++mArrayNum;
|
|
|
|
mArrayIndex = 0;
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
bool operator==(const iterator& aOther) const
|
|
|
|
{
|
|
|
|
return mArrayNum == aOther.mArrayNum && mArrayIndex == aOther.mArrayIndex;
|
|
|
|
}
|
|
|
|
bool operator!=(const iterator& aOther) const
|
|
|
|
{
|
|
|
|
return !(*this == aOther);
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
nsTArray<MediaStream*>* Array()
|
|
|
|
{
|
|
|
|
return mArrayNum == 0 ? &mGraph->mStreams : &mGraph->mSuspendedStreams;
|
|
|
|
}
|
|
|
|
MediaStreamGraphImpl* mGraph;
|
|
|
|
int mArrayNum;
|
|
|
|
uint32_t mArrayIndex;
|
|
|
|
};
|
|
|
|
|
|
|
|
explicit StreamSet(MediaStreamGraphImpl& aGraph) : mGraph(aGraph) {}
|
|
|
|
iterator begin() { return iterator(mGraph); }
|
|
|
|
iterator end() { return iterator(); }
|
|
|
|
private:
|
|
|
|
MediaStreamGraphImpl& mGraph;
|
|
|
|
};
|
|
|
|
StreamSet AllStreams() { return StreamSet(*this); }
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
// Data members
|
2014-08-26 15:02:28 +00:00
|
|
|
//
|
|
|
|
/**
|
|
|
|
* Graphs own owning references to their driver, until shutdown. When a driver
|
|
|
|
* switch occur, previous driver is either deleted, or it's ownership is
|
|
|
|
* passed to a event that will take care of the asynchronous cleanup, as
|
|
|
|
* audio stream can take some time to shut down.
|
|
|
|
*/
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<GraphDriver> mDriver;
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
// The following state is managed on the graph thread only, unless
|
|
|
|
// mLifecycleState > LIFECYCLE_RUNNING in which case the graph thread
|
|
|
|
// is not running and this state can be used from the main thread.
|
|
|
|
|
2014-07-06 23:52:25 +00:00
|
|
|
/**
|
|
|
|
* The graph keeps a reference to each stream.
|
|
|
|
* References are maintained manually to simplify reordering without
|
|
|
|
* unnecessary thread-safe refcount changes.
|
|
|
|
*/
|
|
|
|
nsTArray<MediaStream*> mStreams;
|
Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
- http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange
- In a couple words, the behavior we want:
- Closed context cannot have new nodes created, but can do decodeAudioData,
and create buffers, and such.
- OfflineAudioContexts don't support those methods, transitions happen at
startRendering and at the end of processing. onstatechange is used to make
this observable.
- (regular) AudioContexts support those methods. The promises and
onstatechange should be resolved/called when the operation has actually
completed on the rendering thread. Once a context has been closed, it
cannot transition back to "running". An AudioContext switches to "running"
when the audio callback start running, this allow authors to know how long
the audio stack takes to start running.
- MediaStreams that feed in/go out of a suspended graph should respectively
not buffer at the graph input, and output silence
- suspended context should not be doing much on the CPU, and we should try
to pause audio streams if we can (this behaviour is the main reason we need
this in the first place, for saving battery on mobile, and CPU on all
platforms)
- Now, the implementation:
- AudioNodeStreams are now tagged with a context id, to be able to operate
on all the streams of a given AudioContext on the Graph thread without
having to go and lock everytime to touch the AudioContext. This happens in
the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
node.
- When an AudioContext goes into suspended mode, streams for this
AudioContext are moved out of the mStreams array to a second array,
mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
processed.
- The MSG will automatically switch to a SystemClockDriver when it finds
that there are no more AudioNodeStream/Stream with an audio track. This is
how pausing the audio subsystem and saving battery works. Subsequently, when
the MSG finds that there are only streams in mSuspendedStreams, it will go
to sleep (block on a monitor), so we save CPU, but it does not shut itself
down. This is mostly not a new behaviour (this is what the MSG does since
the refactoring), but is important to note.
- Promises are gripped (addref-ed) on the main thread, and then shepherd
down other threads and to the GraphDriver, if needed (sometimes we can
resolve them right away). They move between threads as void* to prevent
calling methods on them, as they are not thread safe. Then, the driver
executes the operation, and when it's done (initializing and closing audio
streams can take some time), we send the promise back to the main thread,
and resolve it, casting back to Promise* after asserting we're back on the
main thread. This way, we can send them back on the main thread once an
operation has complete (suspending an audio stream, starting it again on
resume(), etc.), without having to do bookkeeping between suspend calls and
their result. Promises are not thread safe, so we can't move them around
AddRef-ed.
- The stream destruction logic now takes into account that a stream can be
destroyed while not being in mStreams.
- A graph can now switch GraphDriver twice or more per iteration, for
example if an author goes suspend()/resume()/suspend() in the same script.
- Some operation have to be done on suspended stream, so we now use double
for-loop around mSuspendedStreams and mStreams in some places in
MediaStreamGraph.cpp.
- A tricky part was making sure everything worked at AudioContext
boundaries. TrackUnionStream that have one of their input stream suspended
append null ticks instead.
- The graph ordering algorithm had to be altered to not include suspended
streams.
- There are some edge cases (adding a stream on a suspended graph, calling
suspend/resume when a graph has just been close()d).
2015-02-27 17:22:05 +00:00
|
|
|
/**
|
|
|
|
* This stores MediaStreams that are part of suspended AudioContexts.
|
|
|
|
* mStreams and mSuspendStream are disjoint sets: a stream is either suspended
|
|
|
|
* or not suspended. Suspended streams are not ordered in UpdateStreamOrder,
|
|
|
|
* and are therefore not doing any processing.
|
|
|
|
*/
|
|
|
|
nsTArray<MediaStream*> mSuspendedStreams;
|
2015-09-16 04:15:21 +00:00
|
|
|
/**
|
|
|
|
* Suspended AudioContext IDs
|
|
|
|
*/
|
|
|
|
nsTHashtable<nsUint64HashKey> mSuspendedContexts;
|
2013-07-19 14:40:56 +00:00
|
|
|
/**
|
2014-07-17 00:55:55 +00:00
|
|
|
* Streams from mFirstCycleBreaker to the end of mStreams produce output
|
|
|
|
* before they receive input. They correspond to DelayNodes that are in
|
|
|
|
* cycles.
|
2013-07-19 14:40:56 +00:00
|
|
|
*/
|
2014-07-17 00:55:55 +00:00
|
|
|
uint32_t mFirstCycleBreaker;
|
2015-08-13 04:23:17 +00:00
|
|
|
/**
|
2015-07-31 09:28:29 +00:00
|
|
|
* Blocking decisions have been computed up to this time.
|
|
|
|
* Between each iteration, this is the same as mProcessedTime.
|
2015-08-13 04:23:17 +00:00
|
|
|
*/
|
|
|
|
GraphTime mStateComputedTime = 0;
|
2015-07-31 09:28:29 +00:00
|
|
|
/**
|
|
|
|
* All stream contents have been computed up to this time.
|
|
|
|
* The next batch of updates from the main thread will be processed
|
|
|
|
* at this time. This is behind mStateComputedTime during processing.
|
|
|
|
*/
|
|
|
|
GraphTime mProcessedTime = 0;
|
2013-07-19 14:40:57 +00:00
|
|
|
/**
|
|
|
|
* Date of the last time we updated the main thread with the graph state.
|
|
|
|
*/
|
|
|
|
TimeStamp mLastMainThreadUpdate;
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* Number of active MediaInputPorts
|
|
|
|
*/
|
|
|
|
int32_t mPortCount;
|
|
|
|
|
2016-01-21 16:51:36 +00:00
|
|
|
/**
|
|
|
|
* Devices to use for cubeb input & output, or NULL for no input (void*),
|
|
|
|
* and boolean to control if we want input/output
|
|
|
|
*/
|
|
|
|
bool mInputWanted;
|
2016-03-08 17:11:09 +00:00
|
|
|
int mInputDeviceID;
|
2016-01-21 16:51:36 +00:00
|
|
|
bool mOutputWanted;
|
2016-03-08 17:11:09 +00:00
|
|
|
int mOutputDeviceID;
|
2016-02-04 02:12:51 +00:00
|
|
|
// Maps AudioDataListeners to a usecount of streams using the listener
|
|
|
|
// so we can know when it's no longer in use.
|
|
|
|
nsDataHashtable<nsPtrHashKey<AudioDataListener>, uint32_t> mInputDeviceUsers;
|
2016-01-21 16:51:36 +00:00
|
|
|
|
2014-09-28 16:07:25 +00:00
|
|
|
// True if the graph needs another iteration after the current iteration.
|
|
|
|
Atomic<bool> mNeedAnotherIteration;
|
|
|
|
// GraphDriver may need a WakeUp() if something changes
|
|
|
|
Atomic<bool> mGraphDriverAsleep;
|
|
|
|
|
2014-08-26 15:01:33 +00:00
|
|
|
// mMonitor guards the data below.
|
|
|
|
// MediaStreamGraph normally does its work without holding mMonitor, so it is
|
|
|
|
// not safe to just grab mMonitor from some thread and start monkeying with
|
|
|
|
// the graph. Instead, communicate with the graph thread using provided
|
|
|
|
// mechanisms such as the ControlMessage queue.
|
|
|
|
Monitor mMonitor;
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
// Data guarded by mMonitor (must always be accessed with mMonitor held,
|
2014-09-28 16:07:25 +00:00
|
|
|
// regardless of the value of mLifecycleState).
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* State to copy to main thread
|
|
|
|
*/
|
|
|
|
nsTArray<StreamUpdate> mStreamUpdates;
|
|
|
|
/**
|
|
|
|
* Runnables to run after the next update to main thread state.
|
|
|
|
*/
|
|
|
|
nsTArray<nsCOMPtr<nsIRunnable> > mUpdateRunnables;
|
|
|
|
/**
|
|
|
|
* A list of batches of messages to process. Each batch is processed
|
|
|
|
* as an atomic unit.
|
|
|
|
*/
|
2015-09-17 05:08:10 +00:00
|
|
|
/*
|
|
|
|
* Message queue processed by the MSG thread during an iteration.
|
|
|
|
* Accessed on graph thread only.
|
|
|
|
*/
|
2014-08-25 13:26:21 +00:00
|
|
|
nsTArray<MessageBlock> mFrontMessageQueue;
|
2015-09-17 05:08:10 +00:00
|
|
|
/*
|
|
|
|
* Message queue in which the main thread appends messages.
|
|
|
|
* Access guarded by mMonitor.
|
|
|
|
*/
|
2014-08-25 13:26:21 +00:00
|
|
|
nsTArray<MessageBlock> mBackMessageQueue;
|
2014-08-26 15:01:33 +00:00
|
|
|
|
|
|
|
/* True if there will messages to process if we swap the message queues. */
|
2015-05-13 13:34:56 +00:00
|
|
|
bool MessagesQueued() const
|
|
|
|
{
|
2014-08-26 15:01:33 +00:00
|
|
|
mMonitor.AssertCurrentThreadOwns();
|
|
|
|
return !mBackMessageQueue.IsEmpty();
|
|
|
|
}
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* This enum specifies where this graph is in its lifecycle. This is used
|
|
|
|
* to control shutdown.
|
|
|
|
* Shutdown is tricky because it can happen in two different ways:
|
|
|
|
* 1) Shutdown due to inactivity. RunThread() detects that it has no
|
|
|
|
* pending messages and no streams, and exits. The next RunInStableState()
|
|
|
|
* checks if there are new pending messages from the main thread (true only
|
|
|
|
* if new stream creation raced with shutdown); if there are, it revives
|
|
|
|
* RunThread(), otherwise it commits to shutting down the graph. New stream
|
|
|
|
* creation after this point will create a new graph. An async event is
|
|
|
|
* dispatched to Shutdown() the graph's threads and then delete the graph
|
|
|
|
* object.
|
2014-02-11 00:04:58 +00:00
|
|
|
* 2) Forced shutdown at application shutdown, or completion of a
|
|
|
|
* non-realtime graph. A flag is set, RunThread() detects the flag and
|
|
|
|
* exits, the next RunInStableState() detects the flag, and dispatches the
|
|
|
|
* async event to Shutdown() the graph's threads. However the graph object
|
|
|
|
* is not deleted. New messages for the graph are processed synchronously on
|
|
|
|
* the main thread if necessary. When the last stream is destroyed, the
|
|
|
|
* graph object is deleted.
|
2014-08-31 12:19:48 +00:00
|
|
|
*
|
|
|
|
* This should be kept in sync with the LifecycleState_str array in
|
|
|
|
* MediaStreamGraph.cpp
|
2013-02-04 10:04:25 +00:00
|
|
|
*/
|
2015-05-13 13:34:56 +00:00
|
|
|
enum LifecycleState
|
|
|
|
{
|
2013-02-04 10:04:25 +00:00
|
|
|
// The graph thread hasn't started yet.
|
|
|
|
LIFECYCLE_THREAD_NOT_STARTED,
|
|
|
|
// RunThread() is running normally.
|
|
|
|
LIFECYCLE_RUNNING,
|
|
|
|
// In the following states, the graph thread is not running so
|
|
|
|
// all "graph thread only" state in this class can be used safely
|
|
|
|
// on the main thread.
|
|
|
|
// RunThread() has exited and we're waiting for the next
|
|
|
|
// RunInStableState(), at which point we can clean up the main-thread
|
|
|
|
// side of the graph.
|
|
|
|
LIFECYCLE_WAITING_FOR_MAIN_THREAD_CLEANUP,
|
|
|
|
// RunInStableState() posted a ShutdownRunnable, and we're waiting for it
|
|
|
|
// to shut down the graph thread(s).
|
|
|
|
LIFECYCLE_WAITING_FOR_THREAD_SHUTDOWN,
|
|
|
|
// Graph threads have shut down but we're waiting for remaining streams
|
2014-02-11 00:04:58 +00:00
|
|
|
// to be destroyed. Only happens during application shutdown and on
|
|
|
|
// completed non-realtime graphs, since normally we'd only shut down a
|
|
|
|
// realtime graph when it has no streams.
|
2013-02-04 10:04:25 +00:00
|
|
|
LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION
|
|
|
|
};
|
|
|
|
LifecycleState mLifecycleState;
|
2013-05-16 23:30:41 +00:00
|
|
|
/**
|
2013-12-09 05:08:02 +00:00
|
|
|
* The graph should stop processing at or after this time.
|
2013-05-16 23:30:41 +00:00
|
|
|
*/
|
2013-12-09 05:08:02 +00:00
|
|
|
GraphTime mEndTime;
|
2014-04-23 09:20:56 +00:00
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* True when we need to do a forced shutdown during application shutdown.
|
|
|
|
*/
|
|
|
|
bool mForceShutDown;
|
2016-01-22 18:49:54 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Drop this reference during shutdown to unblock shutdown.
|
|
|
|
**/
|
|
|
|
RefPtr<ShutdownTicket> mForceShutdownTicket;
|
|
|
|
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* True when we have posted an event to the main thread to run
|
|
|
|
* RunInStableState() and the event hasn't run yet.
|
|
|
|
*/
|
|
|
|
bool mPostedRunInStableStateEvent;
|
|
|
|
|
|
|
|
// Main thread only
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Messages posted by the current event loop task. These are forwarded to
|
|
|
|
* the media graph thread during RunInStableState. We can't forward them
|
|
|
|
* immediately because we want all messages between stable states to be
|
|
|
|
* processed as an atomic batch.
|
|
|
|
*/
|
2016-01-20 21:14:33 +00:00
|
|
|
nsTArray<UniquePtr<ControlMessage>> mCurrentTaskMessageQueue;
|
2013-02-04 10:04:25 +00:00
|
|
|
/**
|
|
|
|
* True when RunInStableState has determined that mLifecycleState is >
|
|
|
|
* LIFECYCLE_RUNNING. Since only the main thread can reset mLifecycleState to
|
|
|
|
* LIFECYCLE_RUNNING, this can be relied on to not change unexpectedly.
|
|
|
|
*/
|
|
|
|
bool mDetectedNotRunning;
|
|
|
|
/**
|
|
|
|
* True when a stable state runner has been posted to the appshell to run
|
|
|
|
* RunInStableState at the next stable state.
|
|
|
|
*/
|
|
|
|
bool mPostedRunInStableState;
|
2013-05-08 11:44:07 +00:00
|
|
|
/**
|
|
|
|
* True when processing real-time audio/video. False when processing non-realtime
|
|
|
|
* audio.
|
|
|
|
*/
|
|
|
|
bool mRealtime;
|
2013-05-16 23:30:41 +00:00
|
|
|
/**
|
|
|
|
* True when a non-realtime MediaStreamGraph has started to process input. This
|
|
|
|
* value is only accessed on the main thread.
|
|
|
|
*/
|
|
|
|
bool mNonRealtimeProcessing;
|
2013-09-13 16:12:07 +00:00
|
|
|
/**
|
|
|
|
* True when a change has happened which requires us to recompute the stream
|
|
|
|
* blocking order.
|
|
|
|
*/
|
|
|
|
bool mStreamOrderDirty;
|
2013-09-25 02:10:24 +00:00
|
|
|
/**
|
|
|
|
* Hold a ref to the Latency logger
|
|
|
|
*/
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<AsyncLatencyLogger> mLatencyLog;
|
2014-08-25 13:25:49 +00:00
|
|
|
AudioMixer mMixer;
|
2014-09-09 16:23:01 +00:00
|
|
|
#ifdef MOZ_WEBRTC
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<AudioOutputObserver> mFarendObserverRef;
|
2014-09-09 16:23:01 +00:00
|
|
|
#endif
|
2014-04-13 18:08:10 +00:00
|
|
|
|
2015-10-23 03:43:15 +00:00
|
|
|
dom::AudioChannel AudioChannel() const { return mAudioChannel; }
|
2014-11-17 16:07:55 +00:00
|
|
|
|
2014-04-13 18:08:10 +00:00
|
|
|
private:
|
|
|
|
virtual ~MediaStreamGraphImpl();
|
|
|
|
|
|
|
|
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Used to signal that a memory report has been requested.
|
|
|
|
*/
|
|
|
|
Monitor mMemoryReportMonitor;
|
|
|
|
/**
|
|
|
|
* This class uses manual memory management, and all pointers to it are raw
|
|
|
|
* pointers. However, in order for it to implement nsIMemoryReporter, it needs
|
|
|
|
* to implement nsISupports and so be ref-counted. So it maintains a single
|
|
|
|
* nsRefPtr to itself, giving it a ref-count of 1 during its entire lifetime,
|
|
|
|
* and Destroy() nulls this self-reference in order to trigger self-deletion.
|
|
|
|
*/
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<MediaStreamGraphImpl> mSelfRef;
|
2014-04-13 18:08:10 +00:00
|
|
|
/**
|
|
|
|
* Used to pass memory report information across threads.
|
|
|
|
*/
|
|
|
|
nsTArray<AudioNodeSizes> mAudioStreamSizes;
|
2015-07-24 12:28:16 +00:00
|
|
|
|
|
|
|
struct WindowAndStream
|
|
|
|
{
|
|
|
|
uint64_t mWindowId;
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<ProcessedMediaStream> mCaptureStreamSink;
|
2015-07-24 12:28:16 +00:00
|
|
|
};
|
|
|
|
/**
|
|
|
|
* Stream for window audio capture.
|
|
|
|
*/
|
|
|
|
nsTArray<WindowAndStream> mWindowCaptureStreams;
|
2014-04-13 18:08:10 +00:00
|
|
|
/**
|
|
|
|
* Indicates that the MSG thread should gather data for a memory report.
|
|
|
|
*/
|
|
|
|
bool mNeedsMemoryReport;
|
2014-07-02 06:04:54 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
/**
|
|
|
|
* Used to assert when AppendMessage() runs ControlMessages synchronously.
|
|
|
|
*/
|
|
|
|
bool mCanRunMessagesSynchronously;
|
|
|
|
#endif
|
|
|
|
|
2015-10-23 03:43:15 +00:00
|
|
|
dom::AudioChannel mAudioChannel;
|
2013-02-04 10:04:25 +00:00
|
|
|
};
|
|
|
|
|
2015-07-13 15:25:42 +00:00
|
|
|
} // namespace mozilla
|
2013-02-04 10:04:25 +00:00
|
|
|
|
|
|
|
#endif /* MEDIASTREAMGRAPHIMPL_H_ */
|