Backed out changeset 7fc52c48e6e3 (bug 1094764) for mulet m-3 perma failure in /test_dataChannel_basicAudioVideo.html

This commit is contained in:
Carsten "Tomcat" Book 2015-04-09 13:44:27 +02:00
parent aa270b640c
commit 31c4421faf
20 changed files with 191 additions and 1528 deletions

View File

@ -13057,8 +13057,7 @@ nsGlobalWindow::SuspendTimeouts(uint32_t aIncrease,
// Suspend all of the AudioContexts for this window
for (uint32_t i = 0; i < mAudioContexts.Length(); ++i) {
ErrorResult dummy;
nsRefPtr<Promise> d = mAudioContexts[i]->Suspend(dummy);
mAudioContexts[i]->Suspend();
}
}
@ -13118,8 +13117,7 @@ nsGlobalWindow::ResumeTimeouts(bool aThawChildren)
// Resume all of the AudioContexts for this window
for (uint32_t i = 0; i < mAudioContexts.Length(); ++i) {
ErrorResult dummy;
nsRefPtr<Promise> d = mAudioContexts[i]->Resume(dummy);
mAudioContexts[i]->Resume();
}
// Thaw all of the workers for this window.

View File

@ -23,7 +23,7 @@ extern PRLogModuleInfo* gMediaStreamGraphLog;
#ifdef ENABLE_LIFECYCLE_LOG
#ifdef ANDROID
#include "android/log.h"
#define LIFECYCLE_LOG(...) __android_log_print(ANDROID_LOG_INFO, "Gecko - MSG" , __VA_ARGS__); printf(__VA_ARGS__);printf("\n");
#define LIFECYCLE_LOG(args...) __android_log_print(ANDROID_LOG_INFO, "Gecko - MSG" , ## __VA_ARGS__); printf(__VA_ARGS__);printf("\n");
#else
#define LIFECYCLE_LOG(...) printf(__VA_ARGS__);printf("\n");
#endif
@ -95,6 +95,9 @@ void GraphDriver::SwitchAtNextIteration(GraphDriver* aNextDriver)
LIFECYCLE_LOG("Switching to new driver: %p (%s)",
aNextDriver, aNextDriver->AsAudioCallbackDriver() ?
"AudioCallbackDriver" : "SystemClockDriver");
// Sometimes we switch twice to a new driver per iteration, this is probably a
// bug.
MOZ_ASSERT(!mNextDriver || mNextDriver->AsAudioCallbackDriver());
mNextDriver = aNextDriver;
}
@ -142,7 +145,7 @@ public:
LIFECYCLE_LOG("Releasing audio driver off main thread.");
nsRefPtr<AsyncCubebTask> releaseEvent =
new AsyncCubebTask(mDriver->AsAudioCallbackDriver(),
AsyncCubebOperation::SHUTDOWN);
AsyncCubebTask::SHUTDOWN);
mDriver = nullptr;
releaseEvent->Dispatch();
} else {
@ -160,7 +163,7 @@ void GraphDriver::Shutdown()
if (AsAudioCallbackDriver()) {
LIFECYCLE_LOG("Releasing audio driver off main thread (GraphDriver::Shutdown).\n");
nsRefPtr<AsyncCubebTask> releaseEvent =
new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebOperation::SHUTDOWN);
new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebTask::SHUTDOWN);
releaseEvent->Dispatch();
} else {
Stop();
@ -201,7 +204,7 @@ public:
// because the osx audio stack is currently switching output device.
if (!mDriver->mPreviousDriver->AsAudioCallbackDriver()->IsSwitchingDevice()) {
nsRefPtr<AsyncCubebTask> releaseEvent =
new AsyncCubebTask(mDriver->mPreviousDriver->AsAudioCallbackDriver(), AsyncCubebOperation::SHUTDOWN);
new AsyncCubebTask(mDriver->mPreviousDriver->AsAudioCallbackDriver(), AsyncCubebTask::SHUTDOWN);
mDriver->mPreviousDriver = nullptr;
releaseEvent->Dispatch();
}
@ -502,21 +505,36 @@ AsyncCubebTask::Run()
MOZ_ASSERT(mDriver);
switch(mOperation) {
case AsyncCubebOperation::INIT: {
case AsyncCubebOperation::INIT:
LIFECYCLE_LOG("AsyncCubebOperation::INIT\n");
mDriver->Init();
mDriver->CompleteAudioContextOperations(mOperation);
break;
}
case AsyncCubebOperation::SHUTDOWN: {
case AsyncCubebOperation::SHUTDOWN:
LIFECYCLE_LOG("AsyncCubebOperation::SHUTDOWN\n");
mDriver->Stop();
mDriver->CompleteAudioContextOperations(mOperation);
mDriver = nullptr;
mShutdownGrip = nullptr;
break;
case AsyncCubebOperation::SLEEP: {
{
LIFECYCLE_LOG("AsyncCubebOperation::SLEEP\n");
MonitorAutoLock mon(mDriver->mGraphImpl->GetMonitor());
// We might just have been awoken
if (mDriver->mGraphImpl->mNeedAnotherIteration) {
mDriver->mPauseRequested = false;
mDriver->mWaitState = AudioCallbackDriver::WAITSTATE_RUNNING;
mDriver->mGraphImpl->mGraphDriverAsleep = false ; // atomic
break;
}
mDriver->Stop();
mDriver->mGraphImpl->mGraphDriverAsleep = true; // atomic
mDriver->mWaitState = AudioCallbackDriver::WAITSTATE_WAITING_INDEFINITELY;
mDriver->mPauseRequested = false;
mDriver->mGraphImpl->GetMonitor().Wait(PR_INTERVAL_NO_TIMEOUT);
}
STREAM_LOG(PR_LOG_DEBUG, ("Restarting audio stream from sleep."));
mDriver->StartStream();
break;
}
default:
MOZ_CRASH("Operation not implemented.");
@ -528,16 +546,6 @@ AsyncCubebTask::Run()
return NS_OK;
}
StreamAndPromiseForOperation::StreamAndPromiseForOperation(MediaStream* aStream,
void* aPromise,
dom::AudioContextOperation aOperation)
: mStream(aStream)
, mPromise(aPromise)
, mOperation(aOperation)
{
// MOZ_ASSERT(aPromise);
}
AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl, dom::AudioChannel aChannel)
: GraphDriver(aGraphImpl)
, mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS)
@ -553,9 +561,7 @@ AudioCallbackDriver::AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl, dom::
}
AudioCallbackDriver::~AudioCallbackDriver()
{
MOZ_ASSERT(mPromisesForOperation.IsEmpty());
}
{}
void
AudioCallbackDriver::Init()
@ -645,18 +651,12 @@ AudioCallbackDriver::Start()
if (NS_IsMainThread()) {
STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
nsRefPtr<AsyncCubebTask> initEvent =
new AsyncCubebTask(this, AsyncCubebOperation::INIT);
new AsyncCubebTask(this, AsyncCubebTask::INIT);
initEvent->Dispatch();
} else {
STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from the previous driver's thread", mGraphImpl));
Init();
// Check if we need to resolve promises because the driver just got switched
// because of a resuming AudioContext
if (!mPromisesForOperation.IsEmpty()) {
CompleteAudioContextOperations(AsyncCubebOperation::INIT);
}
if (mPreviousDriver) {
nsCOMPtr<nsIRunnable> event =
new MediaStreamGraphShutdownThreadRunnable(mPreviousDriver);
@ -704,7 +704,7 @@ AudioCallbackDriver::Revive()
} else {
STREAM_LOG(PR_LOG_DEBUG, ("Starting audio threads for MediaStreamGraph %p from a new thread.", mGraphImpl));
nsRefPtr<AsyncCubebTask> initEvent =
new AsyncCubebTask(this, AsyncCubebOperation::INIT);
new AsyncCubebTask(this, AsyncCubebTask::INIT);
initEvent->Dispatch();
}
}
@ -729,6 +729,20 @@ AudioCallbackDriver::GetCurrentTime()
void AudioCallbackDriver::WaitForNextIteration()
{
#if 0
mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
// We can't block on the monitor in the audio callback, so we kick off a new
// thread that will pause the audio stream, and restart it when unblocked.
// We don't want to sleep when we haven't started the driver yet.
if (!mGraphImpl->mNeedAnotherIteration && mAudioStream && mGraphImpl->Running()) {
STREAM_LOG(PR_LOG_DEBUG+1, ("AudioCallbackDriver going to sleep"));
mPauseRequested = true;
nsRefPtr<AsyncCubebTask> sleepEvent =
new AsyncCubebTask(this, AsyncCubebTask::SLEEP);
sleepEvent->Dispatch();
}
#endif
}
void
@ -1060,47 +1074,5 @@ AudioCallbackDriver::IsStarted() {
return mStarted;
}
void
AudioCallbackDriver::EnqueueStreamAndPromiseForOperation(MediaStream* aStream,
void* aPromise,
dom::AudioContextOperation aOperation)
{
MonitorAutoLock mon(mGraphImpl->GetMonitor());
mPromisesForOperation.AppendElement(StreamAndPromiseForOperation(aStream,
aPromise,
aOperation));
}
void AudioCallbackDriver::CompleteAudioContextOperations(AsyncCubebOperation aOperation)
{
nsAutoTArray<StreamAndPromiseForOperation, 1> array;
// We can't lock for the whole function because AudioContextOperationCompleted
// will grab the monitor
{
MonitorAutoLock mon(GraphImpl()->GetMonitor());
array.SwapElements(mPromisesForOperation);
}
for (int32_t i = array.Length() - 1; i >= 0; i--) {
StreamAndPromiseForOperation& s = array[i];
if ((aOperation == AsyncCubebOperation::INIT &&
s.mOperation == AudioContextOperation::Resume) ||
(aOperation == AsyncCubebOperation::SHUTDOWN &&
s.mOperation != AudioContextOperation::Resume)) {
GraphImpl()->AudioContextOperationCompleted(s.mStream,
s.mPromise,
s.mOperation);
array.RemoveElementAt(i);
}
}
if (!array.IsEmpty()) {
MonitorAutoLock mon(GraphImpl()->GetMonitor());
mPromisesForOperation.AppendElements(array);
}
}
} // namepace mozilla

View File

@ -13,7 +13,6 @@
#include "AudioSegment.h"
#include "SelfRef.h"
#include "mozilla/Atomics.h"
#include "AudioContext.h"
struct cubeb_stream;
@ -322,21 +321,6 @@ private:
GraphTime mSlice;
};
struct StreamAndPromiseForOperation
{
StreamAndPromiseForOperation(MediaStream* aStream,
void* aPromise,
dom::AudioContextOperation aOperation);
nsRefPtr<MediaStream> mStream;
void* mPromise;
dom::AudioContextOperation mOperation;
};
enum AsyncCubebOperation {
INIT,
SHUTDOWN
};
/**
* This is a graph driver that is based on callback functions called by the
* audio api. This ensures minimal audio latency, because it means there is no
@ -408,12 +392,6 @@ public:
return this;
}
/* Enqueue a promise that is going to be resolved when a specific operation
* occurs on the cubeb stream. */
void EnqueueStreamAndPromiseForOperation(MediaStream* aStream,
void* aPromise,
dom::AudioContextOperation aOperation);
bool IsSwitchingDevice() {
#ifdef XP_MACOSX
return mSelfReference;
@ -436,8 +414,6 @@ public:
/* Tell the driver whether this process is using a microphone or not. This is
* thread safe. */
void SetMicrophoneActive(bool aActive);
void CompleteAudioContextOperations(AsyncCubebOperation aOperation);
private:
/**
* On certain MacBookPro, the microphone is located near the left speaker.
@ -495,7 +471,6 @@ private:
/* Thread for off-main-thread initialization and
* shutdown of the audio stream. */
nsCOMPtr<nsIThread> mInitShutdownThread;
nsAutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
dom::AudioChannel mAudioChannel;
Atomic<bool> mInCallback;
/* A thread has been created to be able to pause and restart the audio thread,
@ -523,6 +498,12 @@ private:
class AsyncCubebTask : public nsRunnable
{
public:
enum AsyncCubebOperation {
INIT,
SHUTDOWN,
SLEEP
};
AsyncCubebTask(AudioCallbackDriver* aDriver, AsyncCubebOperation aOperation);

View File

@ -24,7 +24,6 @@
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioNodeExternalInputStream.h"
#include "mozilla/dom/AudioContextBinding.h"
#include <algorithm>
#include "DOMMediaStream.h"
#include "GeckoProfiler.h"
@ -103,31 +102,12 @@ MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
SetStreamOrderDirty();
}
static const GraphTime START_TIME_DELAYED = -1;
void
MediaStreamGraphImpl::AddStream(MediaStream* aStream)
{
// Check if we're adding a stream to a suspended context, in which case, we
// add it to mSuspendedStreams, and delay setting mBufferStartTime
bool contextSuspended = false;
if (aStream->AsAudioNodeStream()) {
for (uint32_t i = 0; i < mSuspendedStreams.Length(); i++) {
if (aStream->AudioContextId() == mSuspendedStreams[i]->AudioContextId()) {
contextSuspended = true;
}
}
}
if (contextSuspended) {
aStream->mBufferStartTime = START_TIME_DELAYED;
mSuspendedStreams.AppendElement(aStream);
STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
} else {
aStream->mBufferStartTime = IterationEnd();
mStreams.AppendElement(aStream);
STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph", aStream));
}
aStream->mBufferStartTime = IterationEnd();
mStreams.AppendElement(aStream);
STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph", aStream));
SetStreamOrderDirty();
}
@ -151,8 +131,6 @@ MediaStreamGraphImpl::RemoveStream(MediaStream* aStream)
SetStreamOrderDirty();
mStreams.RemoveElement(aStream);
mSuspendedStreams.RemoveElement(aStream);
NS_RELEASE(aStream); // probably destroying it
STREAM_LOG(PR_LOG_DEBUG, ("Removing media stream %p from the graph", aStream));
@ -402,64 +380,49 @@ MediaStreamGraphImpl::UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime, Gr
{
nsTArray<MediaStream*> streamsReadyToFinish;
nsAutoTArray<bool,800> streamHasOutput;
nsTArray<MediaStream*>* runningAndSuspendedPair[2];
runningAndSuspendedPair[0] = &mStreams;
runningAndSuspendedPair[1] = &mSuspendedStreams;
streamHasOutput.SetLength(mStreams.Length());
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
for (uint32_t array = 0; array < 2; array++) {
for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
MediaStream* stream = (*runningAndSuspendedPair[array])[i];
// Calculate blocked time and fire Blocked/Unblocked events
GraphTime blockedTime = 0;
GraphTime t = aPrevCurrentTime;
// include |nextCurrentTime| to ensure NotifyBlockingChanged() is called
// before NotifyEvent(this, EVENT_FINISHED) when |nextCurrentTime ==
// stream end time|
while (t <= aNextCurrentTime) {
GraphTime end;
bool blocked = stream->mBlocked.GetAt(t, &end);
if (blocked) {
blockedTime += std::min(end, aNextCurrentTime) - t;
}
if (blocked != stream->mNotifiedBlocked) {
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
MediaStreamListener* l = stream->mListeners[j];
l->NotifyBlockingChanged(this, blocked
? MediaStreamListener::BLOCKED
: MediaStreamListener::UNBLOCKED);
}
stream->mNotifiedBlocked = blocked;
}
t = end;
// Calculate blocked time and fire Blocked/Unblocked events
GraphTime blockedTime = 0;
GraphTime t = aPrevCurrentTime;
// include |nextCurrentTime| to ensure NotifyBlockingChanged() is called
// before NotifyEvent(this, EVENT_FINISHED) when |nextCurrentTime == stream end time|
while (t <= aNextCurrentTime) {
GraphTime end;
bool blocked = stream->mBlocked.GetAt(t, &end);
if (blocked) {
blockedTime += std::min(end, aNextCurrentTime) - t;
}
stream->AdvanceTimeVaryingValuesToCurrentTime(aNextCurrentTime,
blockedTime);
// Advance mBlocked last so that implementations of
// AdvanceTimeVaryingValuesToCurrentTime can rely on the value of
// mBlocked.
stream->mBlocked.AdvanceCurrentTime(aNextCurrentTime);
if (runningAndSuspendedPair[array] == &mStreams) {
streamHasOutput[i] = blockedTime < aNextCurrentTime - aPrevCurrentTime;
// Make this an assertion when bug 957832 is fixed.
NS_WARN_IF_FALSE(
!streamHasOutput[i] || !stream->mNotifiedFinished,
"Shouldn't have already notified of finish *and* have output!");
if (stream->mFinished && !stream->mNotifiedFinished) {
streamsReadyToFinish.AppendElement(stream);
if (blocked != stream->mNotifiedBlocked) {
for (uint32_t j = 0; j < stream->mListeners.Length(); ++j) {
MediaStreamListener* l = stream->mListeners[j];
l->NotifyBlockingChanged(this,
blocked ? MediaStreamListener::BLOCKED : MediaStreamListener::UNBLOCKED);
}
stream->mNotifiedBlocked = blocked;
}
STREAM_LOG(PR_LOG_DEBUG + 1,
("MediaStream %p bufferStartTime=%f blockedTime=%f", stream,
MediaTimeToSeconds(stream->mBufferStartTime),
MediaTimeToSeconds(blockedTime)));
t = end;
}
stream->AdvanceTimeVaryingValuesToCurrentTime(aNextCurrentTime, blockedTime);
// Advance mBlocked last so that implementations of
// AdvanceTimeVaryingValuesToCurrentTime can rely on the value of mBlocked.
stream->mBlocked.AdvanceCurrentTime(aNextCurrentTime);
streamHasOutput[i] = blockedTime < aNextCurrentTime - aPrevCurrentTime;
// Make this an assertion when bug 957832 is fixed.
NS_WARN_IF_FALSE(!streamHasOutput[i] || !stream->mNotifiedFinished,
"Shouldn't have already notified of finish *and* have output!");
if (stream->mFinished && !stream->mNotifiedFinished) {
streamsReadyToFinish.AppendElement(stream);
}
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p bufferStartTime=%f blockedTime=%f",
stream, MediaTimeToSeconds(stream->mBufferStartTime),
MediaTimeToSeconds(blockedTime)));
}
@ -557,21 +520,6 @@ MediaStreamGraphImpl::MarkConsumed(MediaStream* aStream)
}
}
bool
MediaStreamGraphImpl::StreamSuspended(MediaStream* aStream)
{
// Only AudioNodeStreams can be suspended, so we can shortcut here.
return aStream->AsAudioNodeStream() &&
mSuspendedStreams.IndexOf(aStream) != mSuspendedStreams.NoIndex;
}
namespace {
// Value of mCycleMarker for unvisited streams in cycle detection.
const uint32_t NOT_VISITED = UINT32_MAX;
// Value of mCycleMarker for ordered streams in muted cycles.
const uint32_t IN_MUTED_CYCLE = 1;
}
void
MediaStreamGraphImpl::UpdateStreamOrder()
{
@ -579,6 +527,11 @@ MediaStreamGraphImpl::UpdateStreamOrder()
bool shouldAEC = false;
#endif
bool audioTrackPresent = false;
// Value of mCycleMarker for unvisited streams in cycle detection.
const uint32_t NOT_VISITED = UINT32_MAX;
// Value of mCycleMarker for ordered streams in muted cycles.
const uint32_t IN_MUTED_CYCLE = 1;
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
stream->mIsConsumed = false;
@ -694,17 +647,10 @@ MediaStreamGraphImpl::UpdateStreamOrder()
// Not-visited input streams should be processed first.
// SourceMediaStreams have already been ordered.
for (uint32_t i = inputs.Length(); i--; ) {
if (StreamSuspended(inputs[i]->mSource)) {
continue;
}
auto input = inputs[i]->mSource->AsProcessedStream();
if (input && input->mCycleMarker == NOT_VISITED) {
// It can be that this stream has an input which is from a suspended
// AudioContext.
if (input->isInList()) {
input->remove();
dfsStack.insertFront(input);
}
input->remove();
dfsStack.insertFront(input);
}
}
continue;
@ -720,9 +666,6 @@ MediaStreamGraphImpl::UpdateStreamOrder()
// unless it is part of the cycle.
uint32_t cycleStackMarker = 0;
for (uint32_t i = inputs.Length(); i--; ) {
if (StreamSuspended(inputs[i]->mSource)) {
continue;
}
auto input = inputs[i]->mSource->AsProcessedStream();
if (input) {
cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
@ -818,37 +761,30 @@ MediaStreamGraphImpl::RecomputeBlocking(GraphTime aEndBlockingDecisions)
STREAM_LOG(PR_LOG_DEBUG+1, ("Media graph %p computing blocking for time %f",
this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime())));
nsTArray<MediaStream*>* runningAndSuspendedPair[2];
runningAndSuspendedPair[0] = &mStreams;
runningAndSuspendedPair[1] = &mSuspendedStreams;
for (uint32_t array = 0; array < 2; array++) {
for (uint32_t i = 0; i < (*runningAndSuspendedPair[array]).Length(); ++i) {
MediaStream* stream = (*runningAndSuspendedPair[array])[i];
if (!stream->mInBlockingSet) {
// Compute a partition of the streams containing 'stream' such that we
// can
// compute the blocking status of each subset independently.
nsAutoTArray<MediaStream*, 10> streamSet;
AddBlockingRelatedStreamsToSet(&streamSet, stream);
GraphTime end;
for (GraphTime t = CurrentDriver()->StateComputedTime();
t < aEndBlockingDecisions; t = end) {
end = GRAPH_TIME_MAX;
RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
}
}
}
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
if (!stream->mInBlockingSet) {
// Compute a partition of the streams containing 'stream' such that we can
// compute the blocking status of each subset independently.
nsAutoTArray<MediaStream*,10> streamSet;
AddBlockingRelatedStreamsToSet(&streamSet, stream);
GraphTime end;
stream->mBlocked.GetAt(IterationEnd(), &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
for (GraphTime t = CurrentDriver()->StateComputedTime();
t < aEndBlockingDecisions; t = end) {
end = GRAPH_TIME_MAX;
RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
}
}
}
GraphTime end;
stream->mBlocked.GetAt(IterationEnd(), &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
}
}
STREAM_LOG(PR_LOG_DEBUG+1, ("Media graph %p computed blocking for interval %f to %f",
this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime()),
@ -1062,6 +998,14 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
// sample. One sample may be played twice, but this should not happen
// again during an unblocked sequence of track samples.
StreamTime offset = GraphTimeToStreamTime(aStream, aFrom);
if (audioOutput.mLastTickWritten &&
audioOutput.mLastTickWritten != offset) {
// If there is a global underrun of the MSG, this property won't hold, and
// we reset the sample count tracking.
if (offset - audioOutput.mLastTickWritten == 1) {
offset = audioOutput.mLastTickWritten;
}
}
// We don't update aStream->mBufferStartTime here to account for time spent
// blocked. Instead, we'll update it in UpdateCurrentTimeForStreams after
@ -1093,13 +1037,11 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
} else {
StreamTime endTicksNeeded = offset + toWrite;
StreamTime endTicksAvailable = audio->GetDuration();
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld samples for %f to %f (samples %ld to %ld)\n",
aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
offset, endTicksNeeded));
if (endTicksNeeded <= endTicksAvailable) {
STREAM_LOG(PR_LOG_DEBUG + 1,
("MediaStream %p writing %ld samples for %f to %f "
"(samples %ld to %ld)\n",
aStream, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
output.AppendSlice(*audio, offset, endTicksNeeded);
ticksWritten += toWrite;
offset = endTicksNeeded;
@ -1110,22 +1052,12 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
if (endTicksNeeded > endTicksAvailable &&
offset < endTicksAvailable) {
output.AppendSlice(*audio, offset, endTicksAvailable);
STREAM_LOG(PR_LOG_DEBUG + 1,
("MediaStream %p writing %ld samples for %f to %f "
"(samples %ld to %ld)\n",
aStream, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
uint32_t available = endTicksAvailable - offset;
ticksWritten += available;
toWrite -= available;
offset = endTicksAvailable;
}
output.AppendNullData(toWrite);
STREAM_LOG(PR_LOG_DEBUG + 1,
("MediaStream %p writing %ld padding slsamples for %f to "
"%f (samples %ld to %ld)\n",
aStream, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
ticksWritten += toWrite;
}
output.ApplyVolume(volume);
@ -1857,7 +1789,7 @@ MediaStreamGraphImpl::EnsureStableStateEventPosted()
void
MediaStreamGraphImpl::AppendMessage(ControlMessage* aMessage)
{
MOZ_ASSERT(NS_IsMainThread(), "main thread only");
NS_ASSERTION(NS_IsMainThread(), "main thread only");
NS_ASSERTION(!aMessage->GetStream() ||
!aMessage->GetStream()->IsDestroyed(),
"Stream already destroyed");
@ -2216,46 +2148,6 @@ MediaStream::ChangeExplicitBlockerCount(int32_t aDelta)
GraphImpl()->AppendMessage(new Message(this, aDelta));
}
void
MediaStream::BlockStreamIfNeeded()
{
class Message : public ControlMessage {
public:
explicit Message(MediaStream* aStream) : ControlMessage(aStream)
{ }
virtual void Run()
{
mStream->BlockStreamIfNeededImpl(
mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
}
};
if (mMainThreadDestroyed) {
return;
}
GraphImpl()->AppendMessage(new Message(this));
}
void
MediaStream::UnblockStreamIfNeeded()
{
class Message : public ControlMessage {
public:
explicit Message(MediaStream* aStream) : ControlMessage(aStream)
{ }
virtual void Run()
{
mStream->UnblockStreamIfNeededImpl(
mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
}
};
if (mMainThreadDestroyed) {
return;
}
GraphImpl()->AppendMessage(new Message(this));
}
void
MediaStream::AddListenerImpl(already_AddRefed<MediaStreamListener> aListener)
{
@ -3139,8 +3031,7 @@ MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, T
if (!aSampleRate) {
aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
}
AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(
aEngine, aSampleRate, aEngine->NodeMainThread()->Context()->Id());
AudioNodeExternalInputStream* stream = new AudioNodeExternalInputStream(aEngine, aSampleRate);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
@ -3157,12 +3048,7 @@ MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
if (!aSampleRate) {
aSampleRate = aEngine->NodeMainThread()->Context()->SampleRate();
}
// MediaRecorders use an AudioNodeStream, but no AudioNode
AudioNode* node = aEngine->NodeMainThread();
dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() :
NO_AUDIO_CONTEXT;
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate,
contextIdForStream);
AudioNodeStream* stream = new AudioNodeStream(aEngine, aKind, aSampleRate);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
@ -3175,273 +3061,6 @@ MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine,
return stream;
}
class GraphStartedRunnable final : public nsRunnable
{
public:
GraphStartedRunnable(AudioNodeStream* aStream, MediaStreamGraph* aGraph)
: mStream(aStream)
, mGraph(aGraph)
{ }
NS_IMETHOD Run() {
mGraph->NotifyWhenGraphStarted(mStream);
return NS_OK;
}
private:
nsRefPtr<AudioNodeStream> mStream;
MediaStreamGraph* mGraph;
};
void
MediaStreamGraph::NotifyWhenGraphStarted(AudioNodeStream* aStream)
{
class GraphStartedNotificationControlMessage : public ControlMessage
{
public:
explicit GraphStartedNotificationControlMessage(AudioNodeStream* aStream)
: ControlMessage(aStream)
{
}
virtual void Run()
{
// This runs on the graph thread, so when this runs, and the current
// driver is an AudioCallbackDriver, we know the audio hardware is
// started. If not, we are going to switch soon, keep reposting this
// ControlMessage.
MediaStreamGraphImpl* graphImpl = mStream->GraphImpl();
if (graphImpl->CurrentDriver()->AsAudioCallbackDriver()) {
nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
mStream->AsAudioNodeStream(), nullptr, AudioContextState::Running);
NS_DispatchToMainThread(event);
} else {
nsCOMPtr<nsIRunnable> event = new GraphStartedRunnable(
mStream->AsAudioNodeStream(), mStream->Graph());
NS_DispatchToMainThread(event);
}
}
virtual void RunDuringShutdown()
{
MOZ_ASSERT(false, "We should be reviving the graph?");
}
};
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->AppendMessage(new GraphStartedNotificationControlMessage(aStream));
}
void
MediaStreamGraphImpl::ResetVisitedStreamState()
{
// Reset the visited/consumed/blocked state of the streams.
nsTArray<MediaStream*>* runningAndSuspendedPair[2];
runningAndSuspendedPair[0] = &mStreams;
runningAndSuspendedPair[1] = &mSuspendedStreams;
for (uint32_t array = 0; array < 2; array++) {
for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
ProcessedMediaStream* ps =
(*runningAndSuspendedPair[array])[i]->AsProcessedStream();
if (ps) {
ps->mCycleMarker = NOT_VISITED;
ps->mIsConsumed = false;
ps->mInBlockingSet = false;
}
}
}
}
void
MediaStreamGraphImpl::StreamSetForAudioContext(dom::AudioContext::AudioContextId aAudioContextId,
mozilla::LinkedList<MediaStream>& aStreamSet)
{
nsTArray<MediaStream*>* runningAndSuspendedPair[2];
runningAndSuspendedPair[0] = &mStreams;
runningAndSuspendedPair[1] = &mSuspendedStreams;
for (uint32_t array = 0; array < 2; array++) {
for (uint32_t i = 0; i < runningAndSuspendedPair[array]->Length(); ++i) {
MediaStream* stream = (*runningAndSuspendedPair[array])[i];
if (aAudioContextId == stream->AudioContextId()) {
aStreamSet.insertFront(stream);
}
}
}
}
void
MediaStreamGraphImpl::MoveStreams(AudioContextOperation aAudioContextOperation,
mozilla::LinkedList<MediaStream>& aStreamSet)
{
// For our purpose, Suspend and Close are equivalent: we want to remove the
// streams from the set of streams that are going to be processed.
nsTArray<MediaStream*>& from =
aAudioContextOperation == AudioContextOperation::Resume ? mSuspendedStreams
: mStreams;
nsTArray<MediaStream*>& to =
aAudioContextOperation == AudioContextOperation::Resume ? mStreams
: mSuspendedStreams;
MediaStream* stream;
while ((stream = aStreamSet.getFirst())) {
// It is posible to not find the stream here, if there has been two
// suspend/resume/close calls in a row.
auto i = from.IndexOf(stream);
if (i != from.NoIndex) {
from.RemoveElementAt(i);
to.AppendElement(stream);
}
// If streams got added during a period where an AudioContext was suspended,
// set their buffer start time to the appropriate value now:
if (aAudioContextOperation == AudioContextOperation::Resume &&
stream->mBufferStartTime == START_TIME_DELAYED) {
stream->mBufferStartTime = IterationEnd();
}
stream->remove();
}
STREAM_LOG(PR_LOG_DEBUG, ("Moving streams between suspended and running"
"state: mStreams: %d, mSuspendedStreams: %d\n", mStreams.Length(),
mSuspendedStreams.Length()));
#ifdef DEBUG
// The intersection of the two arrays should be null.
for (uint32_t i = 0; i < mStreams.Length(); i++) {
for (uint32_t j = 0; j < mSuspendedStreams.Length(); j++) {
MOZ_ASSERT(
mStreams[i] != mSuspendedStreams[j],
"The suspended stream set and running stream set are not disjoint.");
}
}
#endif
}
void
MediaStreamGraphImpl::AudioContextOperationCompleted(MediaStream* aStream,
void* aPromise,
AudioContextOperation aOperation)
{
// This can be called from the thread created to do cubeb operation, or the
// MSG thread. The pointers passed back here are refcounted, so are still
// alive.
MonitorAutoLock lock(mMonitor);
AudioContextState state;
switch (aOperation) {
case Suspend: state = AudioContextState::Suspended; break;
case Resume: state = AudioContextState::Running; break;
case Close: state = AudioContextState::Closed; break;
default: MOZ_CRASH("Not handled.");
}
nsCOMPtr<nsIRunnable> event = new dom::StateChangeTask(
aStream->AsAudioNodeStream(), aPromise, state);
NS_DispatchToMainThread(event);
}
void
MediaStreamGraphImpl::ApplyAudioContextOperationImpl(AudioNodeStream* aStream,
AudioContextOperation aOperation,
void* aPromise)
{
MOZ_ASSERT(CurrentDriver()->OnThread());
mozilla::LinkedList<MediaStream> streamSet;
SetStreamOrderDirty();
ResetVisitedStreamState();
StreamSetForAudioContext(aStream->AudioContextId(), streamSet);
MoveStreams(aOperation, streamSet);
MOZ_ASSERT(!streamSet.getFirst(),
"Streams should be removed from the list after having been moved.");
// If we have suspended the last AudioContext, and we don't have other
// streams that have audio, this graph will automatically switch to a
// SystemCallbackDriver, because it can't find a MediaStream that has an audio
// track. When resuming, force switching to an AudioCallbackDriver. It would
// have happened at the next iteration anyways, but doing this now save
// some time.
if (aOperation == AudioContextOperation::Resume) {
if (!CurrentDriver()->AsAudioCallbackDriver()) {
AudioCallbackDriver* driver = new AudioCallbackDriver(this);
driver->EnqueueStreamAndPromiseForOperation(aStream, aPromise, aOperation);
mMixer.AddCallback(driver);
CurrentDriver()->SwitchAtNextIteration(driver);
} else {
// We are resuming a context, but we are already using an
// AudioCallbackDriver, we can resolve the promise now.
AudioContextOperationCompleted(aStream, aPromise, aOperation);
}
}
// Close, suspend: check if we are going to switch to a
// SystemAudioCallbackDriver, and pass the promise to the AudioCallbackDriver
// if that's the case, so it can notify the content.
// This is the same logic as in UpdateStreamOrder, but it's simpler to have it
// here as well so we don't have to store the Promise(s) on the Graph.
if (aOperation != AudioContextOperation::Resume) {
bool audioTrackPresent = false;
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
if (stream->AsAudioNodeStream()) {
audioTrackPresent = true;
}
for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer(), MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
audioTrackPresent = true;
}
}
if (!audioTrackPresent && CurrentDriver()->AsAudioCallbackDriver()) {
CurrentDriver()->AsAudioCallbackDriver()->
EnqueueStreamAndPromiseForOperation(aStream, aPromise, aOperation);
SystemClockDriver* driver = new SystemClockDriver(this);
CurrentDriver()->SwitchAtNextIteration(driver);
} else {
// We are closing or suspending an AudioContext, but something else is
// using the audio stream, we can resolve the promise now.
AudioContextOperationCompleted(aStream, aPromise, aOperation);
}
}
}
void
MediaStreamGraph::ApplyAudioContextOperation(AudioNodeStream* aNodeStream,
AudioContextOperation aOperation,
void* aPromise)
{
class AudioContextOperationControlMessage : public ControlMessage
{
public:
AudioContextOperationControlMessage(AudioNodeStream* aStream,
AudioContextOperation aOperation,
void* aPromise)
: ControlMessage(aStream)
, mAudioContextOperation(aOperation)
, mPromise(aPromise)
{
}
virtual void Run()
{
mStream->GraphImpl()->ApplyAudioContextOperationImpl(
mStream->AsAudioNodeStream(), mAudioContextOperation, mPromise);
}
virtual void RunDuringShutdown()
{
MOZ_ASSERT(false, "We should be reviving the graph?");
}
private:
AudioContextOperation mAudioContextOperation;
void* mPromise;
};
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->AppendMessage(
new AudioContextOperationControlMessage(aNodeStream, aOperation, aPromise));
}
bool
MediaStreamGraph::IsNonRealtime() const
{

View File

@ -22,7 +22,6 @@
#include <speex/speex_resampler.h>
#include "mozilla/dom/AudioChannelBinding.h"
#include "DOMMediaStream.h"
#include "AudioContext.h"
class nsIRunnable;
@ -319,7 +318,6 @@ public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream)
explicit MediaStream(DOMMediaStream* aWrapper);
virtual dom::AudioContext::AudioContextId AudioContextId() const { return 0; }
protected:
// Protected destructor, to discourage deletion outside of Release():
@ -366,8 +364,6 @@ public:
// Explicitly block. Useful for example if a media element is pausing
// and we need to stop its stream emitting its buffered data.
virtual void ChangeExplicitBlockerCount(int32_t aDelta);
void BlockStreamIfNeeded();
void UnblockStreamIfNeeded();
// Events will be dispatched by calling methods of aListener.
virtual void AddListener(MediaStreamListener* aListener);
virtual void RemoveListener(MediaStreamListener* aListener);
@ -469,22 +465,6 @@ public:
{
mExplicitBlockerCount.SetAtAndAfter(aTime, mExplicitBlockerCount.GetAt(aTime) + aDelta);
}
void BlockStreamIfNeededImpl(GraphTime aTime)
{
bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
if (blocked) {
return;
}
ChangeExplicitBlockerCountImpl(aTime, 1);
}
void UnblockStreamIfNeededImpl(GraphTime aTime)
{
bool blocked = mExplicitBlockerCount.GetAt(aTime) > 0;
if (!blocked) {
return;
}
ChangeExplicitBlockerCountImpl(aTime, -1);
}
void AddListenerImpl(already_AddRefed<MediaStreamListener> aListener);
void RemoveListenerImpl(MediaStreamListener* aListener);
void RemoveAllListenersImpl();
@ -1247,21 +1227,6 @@ public:
CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine,
TrackRate aSampleRate = 0);
/* From the main thread, ask the MSG to send back an event when the graph
* thread is running, and audio is being processed. */
void NotifyWhenGraphStarted(AudioNodeStream* aNodeStream);
/* From the main thread, suspend, resume or close an AudioContext.
* aNodeStream is the stream of the DestinationNode of the AudioContext.
*
* This can possibly pause the graph thread, releasing system resources, if
* all streams have been suspended/closed.
*
* When the operation is complete, aPromise is resolved.
*/
void ApplyAudioContextOperation(AudioNodeStream* aNodeStream,
dom::AudioContextOperation aState,
void * aPromise);
bool IsNonRealtime() const;
/**
* Start processing non-realtime for a specific number of ticks.

View File

@ -248,49 +248,6 @@ public:
* Mark aStream and all its inputs (recursively) as consumed.
*/
static void MarkConsumed(MediaStream* aStream);
/**
* Given the Id of an AudioContext, return the set of all MediaStreams that
* are part of this context.
*/
void StreamSetForAudioContext(dom::AudioContext::AudioContextId aAudioContextId,
mozilla::LinkedList<MediaStream>& aStreamSet);
/**
* Called when a suspend/resume/close operation has been completed, on the
* graph thread.
*/
void AudioContextOperationCompleted(MediaStream* aStream,
void* aPromise,
dom::AudioContextOperation aOperation);
/**
* Apply and AudioContext operation (suspend/resume/closed), on the graph
* thread.
*/
void ApplyAudioContextOperationImpl(AudioNodeStream* aStream,
dom::AudioContextOperation aOperation,
void* aPromise);
/*
* Move streams from the mStreams to mSuspendedStream if suspending/closing an
* AudioContext, or the inverse when resuming an AudioContext.
*/
void MoveStreams(dom::AudioContextOperation aAudioContextOperation,
mozilla::LinkedList<MediaStream>& aStreamSet);
/*
* Reset some state about the streams before suspending them, or resuming
* them.
*/
void ResetVisitedStreamState();
/*
* True if a stream is suspended, that is, is not in mStreams, but in
* mSuspendedStream.
*/
bool StreamSuspended(MediaStream* aStream);
/**
* Sort mStreams so that every stream not in a cycle is after any streams
* it depends on, and every stream in a cycle is marked as being in a cycle.
@ -411,10 +368,7 @@ public:
/**
* Returns true when there are no active streams.
*/
bool IsEmpty()
{
return mStreams.IsEmpty() && mSuspendedStreams.IsEmpty() && mPortCount == 0;
}
bool IsEmpty() { return mStreams.IsEmpty() && mPortCount == 0; }
// For use by control messages, on graph thread only.
/**
@ -533,13 +487,6 @@ public:
* unnecessary thread-safe refcount changes.
*/
nsTArray<MediaStream*> mStreams;
/**
* This stores MediaStreams that are part of suspended AudioContexts.
* mStreams and mSuspendStream are disjoint sets: a stream is either suspended
* or not suspended. Suspended streams are not ordered in UpdateStreamOrder,
* and are therefore not doing any processing.
*/
nsTArray<MediaStream*> mSuspendedStreams;
/**
* Streams from mFirstCycleBreaker to the end of mStreams produce output
* before they receive input. They correspond to DelayNodes that are in

View File

@ -24,10 +24,10 @@
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioNodeExternalInputStream.h"
#include "webaudio/MediaStreamAudioDestinationNode.h"
#include <algorithm>
#include "DOMMediaStream.h"
#include "GeckoProfiler.h"
#include "mozilla/unused.h"
#ifdef MOZ_WEBRTC
#include "AudioOutputObserver.h"
#endif
@ -275,16 +275,12 @@ TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
} else if (InMutedCycle()) {
segment->AppendNullData(ticks);
} else {
if (GraphImpl()->StreamSuspended(source)) {
segment->AppendNullData(aTo - aFrom);
} else {
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
"Samples missing");
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
segment->AppendSlice(*aInputTrack->GetSegment(),
std::min(inputTrackEndPoint, inputStart),
std::min(inputTrackEndPoint, inputEnd));
}
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
"Samples missing");
StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
segment->AppendSlice(*aInputTrack->GetSegment(),
std::min(inputTrackEndPoint, inputStart),
std::min(inputTrackEndPoint, inputEnd));
}
ApplyTrackDisabling(outputTrack->GetID(), segment);
for (uint32_t j = 0; j < mListeners.Length(); ++j) {

View File

@ -9,8 +9,8 @@
#include "nsPIDOMWindow.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/dom/AnalyserNode.h"
#include "mozilla/dom/HTMLMediaElement.h"
#include "mozilla/dom/AudioContextBinding.h"
#include "mozilla/dom/HTMLMediaElement.h"
#include "mozilla/dom/OfflineAudioContextBinding.h"
#include "mozilla/dom/OwningNonNull.h"
#include "MediaStreamGraph.h"
@ -42,10 +42,6 @@
namespace mozilla {
namespace dom {
// 0 is a special value that MediaStreams use to denote they are not part of a
// AudioContext.
static dom::AudioContext::AudioContextId gAudioContextId = 1;
NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext)
@ -89,15 +85,12 @@ AudioContext::AudioContext(nsPIDOMWindow* aWindow,
uint32_t aLength,
float aSampleRate)
: DOMEventTargetHelper(aWindow)
, mId(gAudioContextId++)
, mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate))
, mAudioContextState(AudioContextState::Suspended)
, mNumberOfChannels(aNumberOfChannels)
, mNodeCount(0)
, mIsOffline(aIsOffline)
, mIsStarted(!aIsOffline)
, mIsShutDown(false)
, mCloseCalled(false)
{
aWindow->AddAudioContext(this);
@ -204,22 +197,9 @@ AudioContext::Constructor(const GlobalObject& aGlobal,
return object.forget();
}
bool AudioContext::CheckClosed(ErrorResult& aRv)
{
if (mAudioContextState == AudioContextState::Closed) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return true;
}
return false;
}
already_AddRefed<AudioBufferSourceNode>
AudioContext::CreateBufferSource(ErrorResult& aRv)
AudioContext::CreateBufferSource()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<AudioBufferSourceNode> bufferNode =
new AudioBufferSourceNode(this);
return bufferNode.forget();
@ -267,10 +247,6 @@ AudioContext::CreateMediaStreamDestination(ErrorResult& aRv)
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<MediaStreamAudioDestinationNode> node =
new MediaStreamAudioDestinationNode(this);
return node.forget();
@ -290,10 +266,6 @@ AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<ScriptProcessorNode> scriptProcessor =
new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels,
aNumberOfOutputChannels);
@ -301,23 +273,15 @@ AudioContext::CreateScriptProcessor(uint32_t aBufferSize,
}
already_AddRefed<AnalyserNode>
AudioContext::CreateAnalyser(ErrorResult& aRv)
AudioContext::CreateAnalyser()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<AnalyserNode> analyserNode = new AnalyserNode(this);
return analyserNode.forget();
}
already_AddRefed<StereoPannerNode>
AudioContext::CreateStereoPanner(ErrorResult& aRv)
AudioContext::CreateStereoPanner()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<StereoPannerNode> stereoPannerNode = new StereoPannerNode(this);
return stereoPannerNode.forget();
}
@ -336,11 +300,6 @@ AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement,
return nullptr;
}
#endif
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<DOMMediaStream> stream = aMediaElement.MozCaptureStream(aRv,
mDestination->Stream()->Graph());
if (aRv.Failed()) {
@ -359,34 +318,21 @@ AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream,
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<MediaStreamAudioSourceNode> mediaStreamAudioSourceNode =
new MediaStreamAudioSourceNode(this, &aMediaStream);
return mediaStreamAudioSourceNode.forget();
}
already_AddRefed<GainNode>
AudioContext::CreateGain(ErrorResult& aRv)
AudioContext::CreateGain()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<GainNode> gainNode = new GainNode(this);
return gainNode.forget();
}
already_AddRefed<WaveShaperNode>
AudioContext::CreateWaveShaper(ErrorResult& aRv)
AudioContext::CreateWaveShaper()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<WaveShaperNode> waveShaperNode = new WaveShaperNode(this);
return waveShaperNode.forget();
}
@ -394,38 +340,25 @@ AudioContext::CreateWaveShaper(ErrorResult& aRv)
already_AddRefed<DelayNode>
AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv)
{
if (CheckClosed(aRv)) {
return nullptr;
}
if (aMaxDelayTime > 0. && aMaxDelayTime < 180.) {
nsRefPtr<DelayNode> delayNode = new DelayNode(this, aMaxDelayTime);
return delayNode.forget();
}
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;
}
already_AddRefed<PannerNode>
AudioContext::CreatePanner(ErrorResult& aRv)
AudioContext::CreatePanner()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<PannerNode> pannerNode = new PannerNode(this);
mPannerNodes.PutEntry(pannerNode);
return pannerNode.forget();
}
already_AddRefed<ConvolverNode>
AudioContext::CreateConvolver(ErrorResult& aRv)
AudioContext::CreateConvolver()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<ConvolverNode> convolverNode = new ConvolverNode(this);
return convolverNode.forget();
}
@ -439,10 +372,6 @@ AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv)
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<ChannelSplitterNode> splitterNode =
new ChannelSplitterNode(this, aNumberOfOutputs);
return splitterNode.forget();
@ -457,46 +386,30 @@ AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv)
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<ChannelMergerNode> mergerNode =
new ChannelMergerNode(this, aNumberOfInputs);
return mergerNode.forget();
}
already_AddRefed<DynamicsCompressorNode>
AudioContext::CreateDynamicsCompressor(ErrorResult& aRv)
AudioContext::CreateDynamicsCompressor()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<DynamicsCompressorNode> compressorNode =
new DynamicsCompressorNode(this);
return compressorNode.forget();
}
already_AddRefed<BiquadFilterNode>
AudioContext::CreateBiquadFilter(ErrorResult& aRv)
AudioContext::CreateBiquadFilter()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<BiquadFilterNode> filterNode =
new BiquadFilterNode(this);
return filterNode.forget();
}
already_AddRefed<OscillatorNode>
AudioContext::CreateOscillator(ErrorResult& aRv)
AudioContext::CreateOscillator()
{
if (CheckClosed(aRv)) {
return nullptr;
}
nsRefPtr<OscillatorNode> oscillatorNode =
new OscillatorNode(this);
return oscillatorNode.forget();
@ -684,239 +597,22 @@ AudioContext::Shutdown()
}
}
AudioContextState AudioContext::State() const
void
AudioContext::Suspend()
{
return mAudioContextState;
}
StateChangeTask::StateChangeTask(AudioContext* aAudioContext,
void* aPromise,
AudioContextState aNewState)
: mAudioContext(aAudioContext)
, mPromise(aPromise)
, mAudioNodeStream(nullptr)
, mNewState(aNewState)
{
MOZ_ASSERT(NS_IsMainThread(),
"This constructor should be used from the main thread.");
}
StateChangeTask::StateChangeTask(AudioNodeStream* aStream,
void* aPromise,
AudioContextState aNewState)
: mAudioContext(nullptr)
, mPromise(aPromise)
, mAudioNodeStream(aStream)
, mNewState(aNewState)
{
MOZ_ASSERT(!NS_IsMainThread(),
"This constructor should be used from the graph thread.");
}
NS_IMETHODIMP
StateChangeTask::Run()
{
MOZ_ASSERT(NS_IsMainThread());
if (!mAudioContext && !mAudioNodeStream) {
return NS_OK;
MediaStream* ds = DestinationStream();
if (ds) {
ds->ChangeExplicitBlockerCount(1);
}
if (mAudioNodeStream) {
AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread();
if (!node) {
return NS_OK;
}
mAudioContext = node->Context();
if (!mAudioContext) {
return NS_OK;
}
}
mAudioContext->OnStateChanged(mPromise, mNewState);
// We have can't call Release() on the AudioContext on the MSG thread, so we
// unref it here, on the main thread.
mAudioContext = nullptr;
return NS_OK;
}
/* This runnable allows to fire the "statechange" event */
class OnStateChangeTask final : public nsRunnable
{
public:
explicit OnStateChangeTask(AudioContext* aAudioContext)
: mAudioContext(aAudioContext)
{}
NS_IMETHODIMP
Run() override
{
nsCOMPtr<nsPIDOMWindow> parent = do_QueryInterface(mAudioContext->GetParentObject());
if (!parent) {
return NS_ERROR_FAILURE;
}
nsIDocument* doc = parent->GetExtantDoc();
if (!doc) {
return NS_ERROR_FAILURE;
}
return nsContentUtils::DispatchTrustedEvent(doc,
static_cast<DOMEventTargetHelper*>(mAudioContext),
NS_LITERAL_STRING("statechange"),
false, false);
}
private:
nsRefPtr<AudioContext> mAudioContext;
};
void
AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState)
AudioContext::Resume()
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT((mAudioContextState == AudioContextState::Suspended &&
aNewState == AudioContextState::Running) ||
(mAudioContextState == AudioContextState::Running &&
aNewState == AudioContextState::Suspended) ||
(mAudioContextState == AudioContextState::Running &&
aNewState == AudioContextState::Closed) ||
(mAudioContextState == AudioContextState::Suspended &&
aNewState == AudioContextState::Closed) ||
(mAudioContextState == aNewState),
"Invalid AudioContextState transition");
MOZ_ASSERT(
mIsOffline || aPromise || aNewState == AudioContextState::Running,
"We should have a promise here if this is a real-time AudioContext."
"Or this is the first time we switch to \"running\".");
if (aPromise) {
Promise* promise = reinterpret_cast<Promise*>(aPromise);
promise->MaybeResolve(JS::UndefinedHandleValue);
DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise);
MOZ_ASSERT(rv, "Promise wasn't in the grip array?");
}
if (mAudioContextState != aNewState) {
nsRefPtr<OnStateChangeTask> onStateChangeTask =
new OnStateChangeTask(this);
NS_DispatchToMainThread(onStateChangeTask);
}
mAudioContextState = aNewState;
}
already_AddRefed<Promise>
AudioContext::Suspend(ErrorResult& aRv)
{
nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
nsRefPtr<Promise> promise;
promise = Promise::Create(parentObject, aRv);
if (aRv.Failed()) {
return nullptr;
}
if (mIsOffline) {
promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return promise.forget();
}
if (mAudioContextState == AudioContextState::Closed ||
mCloseCalled) {
promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
return promise.forget();
}
if (mAudioContextState == AudioContextState::Suspended) {
promise->MaybeResolve(JS::UndefinedHandleValue);
return promise.forget();
}
MediaStream* ds = DestinationStream();
if (ds) {
ds->BlockStreamIfNeeded();
ds->ChangeExplicitBlockerCount(-1);
}
mPromiseGripArray.AppendElement(promise);
Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
AudioContextOperation::Suspend, promise);
return promise.forget();
}
already_AddRefed<Promise>
AudioContext::Resume(ErrorResult& aRv)
{
nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
nsRefPtr<Promise> promise;
promise = Promise::Create(parentObject, aRv);
if (aRv.Failed()) {
return nullptr;
}
if (mIsOffline) {
promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return promise.forget();
}
if (mAudioContextState == AudioContextState::Closed ||
mCloseCalled) {
promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
return promise.forget();
}
if (mAudioContextState == AudioContextState::Running) {
promise->MaybeResolve(JS::UndefinedHandleValue);
return promise.forget();
}
MediaStream* ds = DestinationStream();
if (ds) {
ds->UnblockStreamIfNeeded();
}
mPromiseGripArray.AppendElement(promise);
Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
AudioContextOperation::Resume, promise);
return promise.forget();
}
already_AddRefed<Promise>
AudioContext::Close(ErrorResult& aRv)
{
nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
nsRefPtr<Promise> promise;
promise = Promise::Create(parentObject, aRv);
if (aRv.Failed()) {
return nullptr;
}
if (mIsOffline) {
promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return promise.forget();
}
if (mAudioContextState == AudioContextState::Closed) {
promise->MaybeResolve(NS_ERROR_DOM_INVALID_STATE_ERR);
return promise.forget();
}
mCloseCalled = true;
mPromiseGripArray.AppendElement(promise);
Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
AudioContextOperation::Close, promise);
MediaStream* ds = DestinationStream();
if (ds) {
ds->BlockStreamIfNeeded();
}
return promise.forget();
}
void
@ -957,9 +653,6 @@ AudioContext::StartRendering(ErrorResult& aRv)
mIsStarted = true;
nsRefPtr<Promise> promise = Promise::Create(parentObject, aRv);
mDestination->StartRendering(promise);
OnStateChanged(nullptr, AudioContextState::Running);
return promise.forget();
}

View File

@ -35,12 +35,9 @@ class DOMMediaStream;
class ErrorResult;
class MediaStream;
class MediaStreamGraph;
class AudioNodeEngine;
class AudioNodeStream;
namespace dom {
enum class AudioContextState : uint32_t;
class AnalyserNode;
class AudioBuffer;
class AudioBufferSourceNode;
@ -67,30 +64,6 @@ class WaveShaperNode;
class PeriodicWave;
class Promise;
/* This runnable allows the MSG to notify the main thread when audio is actually
* flowing */
class StateChangeTask final : public nsRunnable
{
public:
/* This constructor should be used when this event is sent from the main
* thread. */
StateChangeTask(AudioContext* aAudioContext, void* aPromise, AudioContextState aNewState);
/* This constructor should be used when this event is sent from the audio
* thread. */
StateChangeTask(AudioNodeStream* aStream, void* aPromise, AudioContextState aNewState);
NS_IMETHOD Run() override;
private:
nsRefPtr<AudioContext> mAudioContext;
void* mPromise;
nsRefPtr<AudioNodeStream> mAudioNodeStream;
AudioContextState mNewState;
};
enum AudioContextOperation { Suspend, Resume, Close };
class AudioContext final : public DOMEventTargetHelper,
public nsIMemoryReporter
{
@ -103,8 +76,6 @@ class AudioContext final : public DOMEventTargetHelper,
~AudioContext();
public:
typedef uint64_t AudioContextId;
NS_DECL_ISUPPORTS_INHERITED
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioContext,
DOMEventTargetHelper)
@ -116,6 +87,8 @@ public:
}
void Shutdown(); // idempotent
void Suspend();
void Resume();
virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
@ -151,31 +124,11 @@ public:
return mSampleRate;
}
AudioContextId Id() const
{
return mId;
}
double CurrentTime() const;
AudioListener* Listener();
AudioContextState State() const;
// Those three methods return a promise to content, that is resolved when an
// (possibly long) operation is completed on the MSG (and possibly other)
// thread(s). To avoid having to match the calls and asychronous result when
// the operation is completed, we keep a reference to the promises on the main
// thread, and then send the promises pointers down the MSG thread, as a void*
// (to make it very clear that the pointer is to merely be treated as an ID).
// When back on the main thread, we can resolve or reject the promise, by
// casting it back to a `Promise*` while asserting we're back on the main
// thread and removing the reference we added.
already_AddRefed<Promise> Suspend(ErrorResult& aRv);
already_AddRefed<Promise> Resume(ErrorResult& aRv);
already_AddRefed<Promise> Close(ErrorResult& aRv);
IMPL_EVENT_HANDLER(statechange)
already_AddRefed<AudioBufferSourceNode> CreateBufferSource(ErrorResult& aRv);
already_AddRefed<AudioBufferSourceNode> CreateBufferSource();
already_AddRefed<AudioBuffer>
CreateBuffer(JSContext* aJSContext, uint32_t aNumberOfChannels,
@ -192,16 +145,16 @@ public:
ErrorResult& aRv);
already_AddRefed<StereoPannerNode>
CreateStereoPanner(ErrorResult& aRv);
CreateStereoPanner();
already_AddRefed<AnalyserNode>
CreateAnalyser(ErrorResult& aRv);
CreateAnalyser();
already_AddRefed<GainNode>
CreateGain(ErrorResult& aRv);
CreateGain();
already_AddRefed<WaveShaperNode>
CreateWaveShaper(ErrorResult& aRv);
CreateWaveShaper();
already_AddRefed<MediaElementAudioSourceNode>
CreateMediaElementSource(HTMLMediaElement& aMediaElement, ErrorResult& aRv);
@ -212,10 +165,10 @@ public:
CreateDelay(double aMaxDelayTime, ErrorResult& aRv);
already_AddRefed<PannerNode>
CreatePanner(ErrorResult& aRv);
CreatePanner();
already_AddRefed<ConvolverNode>
CreateConvolver(ErrorResult& aRv);
CreateConvolver();
already_AddRefed<ChannelSplitterNode>
CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv);
@ -224,13 +177,13 @@ public:
CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv);
already_AddRefed<DynamicsCompressorNode>
CreateDynamicsCompressor(ErrorResult& aRv);
CreateDynamicsCompressor();
already_AddRefed<BiquadFilterNode>
CreateBiquadFilter(ErrorResult& aRv);
CreateBiquadFilter();
already_AddRefed<OscillatorNode>
CreateOscillator(ErrorResult& aRv);
CreateOscillator();
already_AddRefed<PeriodicWave>
CreatePeriodicWave(const Float32Array& aRealData, const Float32Array& aImagData,
@ -291,8 +244,6 @@ public:
return aTime + ExtraCurrentTime();
}
void OnStateChanged(void* aPromise, AudioContextState aNewState);
IMPL_EVENT_HANDLER(mozinterruptbegin)
IMPL_EVENT_HANDLER(mozinterruptend)
@ -315,23 +266,13 @@ private:
friend struct ::mozilla::WebAudioDecodeJob;
bool CheckClosed(ErrorResult& aRv);
private:
// Each AudioContext has an id, that is passed down the MediaStreams that
// back the AudioNodes, so we can easily compute the set of all the
// MediaStreams for a given context, on the MediasStreamGraph side.
const AudioContextId mId;
// Note that it's important for mSampleRate to be initialized before
// mDestination, as mDestination's constructor needs to access it!
const float mSampleRate;
AudioContextState mAudioContextState;
nsRefPtr<AudioDestinationNode> mDestination;
nsRefPtr<AudioListener> mListener;
nsTArray<nsRefPtr<WebAudioDecodeJob> > mDecodeJobs;
// This array is used to keep the suspend/resume/close promises alive until
// they are resolved, so we can safely pass them accross threads.
nsTArray<nsRefPtr<Promise>> mPromiseGripArray;
// See RegisterActiveNode. These will keep the AudioContext alive while it
// is rendering and the window remains alive.
nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
@ -345,12 +286,8 @@ private:
bool mIsOffline;
bool mIsStarted;
bool mIsShutDown;
// Close has been called, reject suspend and resume call.
bool mCloseCalled;
};
static const dom::AudioContext::AudioContextId NO_AUDIO_CONTEXT = 0;
}
}

View File

@ -5,7 +5,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioDestinationNode.h"
#include "AudioContext.h"
#include "mozilla/dom/AudioDestinationNodeBinding.h"
#include "mozilla/dom/ScriptSettings.h"
#include "mozilla/Preferences.h"
@ -177,11 +176,9 @@ public:
aNode->ResolvePromise(renderedBuffer);
nsRefPtr<OnCompleteTask> onCompleteTask =
nsRefPtr<OnCompleteTask> task =
new OnCompleteTask(context, renderedBuffer);
NS_DispatchToMainThread(onCompleteTask);
context->OnStateChanged(nullptr, AudioContextState::Closed);
NS_DispatchToMainThread(task);
}
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
@ -370,10 +367,6 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
mStream->AddMainThreadListener(this);
mStream->AddAudioOutput(&gWebAudioOutputKey);
if (!aIsOffline) {
graph->NotifyWhenGraphStarted(mStream->AsAudioNodeStream());
}
if (aChannel != AudioChannel::Normal) {
ErrorResult rv;
SetMozAudioChannelType(aChannel, rv);

View File

@ -12,8 +12,8 @@ using namespace mozilla::dom;
namespace mozilla {
AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId)
: AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate, aContextId)
AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
: AudioNodeStream(aEngine, MediaStreamGraph::INTERNAL_STREAM, aSampleRate)
{
MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
}

View File

@ -20,7 +20,7 @@ namespace mozilla {
*/
class AudioNodeExternalInputStream : public AudioNodeStream {
public:
AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate, uint32_t aContextId);
AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate);
protected:
~AudioNodeExternalInputStream();

View File

@ -27,12 +27,10 @@ namespace mozilla {
AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
MediaStreamGraph::AudioNodeStreamKind aKind,
TrackRate aSampleRate,
AudioContext::AudioContextId aContextId)
TrackRate aSampleRate)
: ProcessedMediaStream(nullptr),
mEngine(aEngine),
mSampleRate(aSampleRate),
mAudioContextId(aContextId),
mKind(aKind),
mNumberOfInputChannels(2),
mMarkAsFinishedAfterThisBlock(false),

View File

@ -47,8 +47,7 @@ public:
*/
AudioNodeStream(AudioNodeEngine* aEngine,
MediaStreamGraph::AudioNodeStreamKind aKind,
TrackRate aSampleRate,
AudioContext::AudioContextId aContextId);
TrackRate aSampleRate);
protected:
~AudioNodeStream();
@ -122,7 +121,6 @@ public:
// Any thread
AudioNodeEngine* Engine() { return mEngine; }
TrackRate SampleRate() const { return mSampleRate; }
AudioContext::AudioContextId AudioContextId() const override { return mAudioContextId; }
/**
* Convert a time in seconds on the destination stream to ticks
@ -149,7 +147,6 @@ public:
void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
AudioNodeSizes& aUsage) const;
protected:
void AdvanceOutputSegment();
void FinishOutput();
@ -169,11 +166,8 @@ protected:
OutputChunks mLastChunks;
// The stream's sampling rate
const TrackRate mSampleRate;
// This is necessary to be able to find all the nodes for a given
// AudioContext. It is set on the main thread, in the constructor.
const AudioContext::AudioContextId mAudioContextId;
// Whether this is an internal or external stream
const MediaStreamGraph::AudioNodeStreamKind mKind;
MediaStreamGraph::AudioNodeStreamKind mKind;
// The number of input channels that this stream requires. 0 means don't care.
uint32_t mNumberOfInputChannels;
// The mixing modes

View File

@ -35,7 +35,6 @@ public:
NS_ERROR("MediaStreamAudioSourceNodeEngine bad parameter index");
}
}
private:
bool mEnabled;
};

View File

@ -31,7 +31,6 @@ EXPORTS += [
EXPORTS.mozilla += [
'FFTBlock.h',
'MediaStreamAudioDestinationNode.h',
]
EXPORTS.mozilla.dom += [

View File

@ -44,8 +44,6 @@ skip-if = (toolkit == 'android' && (processor == 'x86' || debug)) || os == 'win'
skip-if = (toolkit == 'gonk') || (toolkit == 'android') || debug #bug 906752
[test_audioBufferSourceNodePassThrough.html]
[test_AudioContext.html]
skip-if = android_version == '10' # bug 1138462
[test_audioContextSuspendResumeClose.html]
[test_audioDestinationNode.html]
[test_AudioListener.html]
[test_audioParamExponentialRamp.html]

View File

@ -1,400 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test suspend, resume and close method of the AudioContext</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
SimpleTest.requestCompleteLog();
function tryToToCreateNodeOnClosedContext(ctx) {
ok(ctx.state, "closed", "The context is in closed state");
[ { name: "createBufferSource" },
{ name: "createMediaStreamDestination",
onOfflineAudioContext: false},
{ name: "createScriptProcessor" },
{ name: "createStereoPanner" },
{ name: "createAnalyser" },
{ name: "createGain" },
{ name: "createDelay" },
{ name: "createBiquadFilter" },
{ name: "createWaveShaper" },
{ name: "createPanner" },
{ name: "createConvolver" },
{ name: "createChannelSplitter" },
{ name: "createChannelMerger" },
{ name: "createDynamicsCompressor" },
{ name: "createOscillator" },
{ name: "createMediaElementSource",
args: [new Audio()],
onOfflineAudioContext: false },
{ name: "createMediaStreamSource",
args: [new Audio().mozCaptureStream()],
onOfflineAudioContext: false } ].forEach(function(e) {
if (e.onOfflineAudioContext == false &&
ctx instanceof OfflineAudioContext) {
return;
}
expectException(function() {
ctx[e.name].apply(ctx, e.args);
}, DOMException.INVALID_STATE_ERR);
});
}
function loadFile(url, callback) {
var xhr = new XMLHttpRequest();
xhr.open("GET", url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function() {
callback(xhr.response);
};
xhr.send();
}
// createBuffer, createPeriodicWave and decodeAudioData should work on a context
// that has `state` == "closed"
function tryLegalOpeerationsOnClosedContext(ctx) {
ok(ctx.state, "closed", "The context is in closed state");
[ { name: "createBuffer",
args: [1, 44100, 44100] },
{ name: "createPeriodicWave",
args: [new Float32Array(10), new Float32Array(10)] }
].forEach(function(e) {
expectNoException(function() {
ctx[e.name].apply(ctx, e.args);
});
});
loadFile("ting-44.1k-1ch.ogg", function(buf) {
ctx.decodeAudioData(buf).then(function(decodedBuf) {
ok(true, "decodeAudioData on a closed context should work, it did.")
todo(false, "0 " + (ctx instanceof OfflineAudioContext ? "Offline" : "Realtime"));
finish();
}).catch(function(e){
ok(false, "decodeAudioData on a closed context should work, it did not");
finish();
});
});
}
// Test that MediaStreams that are the output of a suspended AudioContext are
// producing silence
// ac1 produce a sine fed to a MediaStreamAudioDestinationNode
// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that
// there is silence when ac1 is suspended
function testMultiContextOutput() {
var ac1 = new AudioContext(),
ac2 = new AudioContext();
var osc1 = ac1.createOscillator(),
mediaStreamDestination1 = ac1.createMediaStreamDestination();
var mediaStreamAudioSourceNode2 =
ac2.createMediaStreamSource(mediaStreamDestination1.stream),
sp2 = ac2.createScriptProcessor(),
suspendCalled = false,
silentBuffersInARow = 0;
sp2.onaudioprocess = function(e) {
if (!suspendCalled) {
ac1.suspend();
suspendCalled = true;
} else {
// Wait until the context that produce the tone is actually suspended. It
// can be that the second context receives a little amount of data because
// of the buffering between the two contexts.
if (ac1.state == "suspended") {
var input = e.inputBuffer.getChannelData(0);
var silent = true;
for (var i = 0; i < input.length; i++) {
if (input[i] != 0.0) {
silent = false;
}
}
if (silent) {
silentBuffersInARow++;
if (silentBuffersInARow == 10) {
ok(true,
"MediaStreams produce silence when their input is blocked.");
sp2.onaudioprocess = null;
ac1.close();
ac2.close();
todo(false,"1");
finish();
}
} else {
is(silentBuffersInARow, 0,
"No non silent buffer inbetween silent buffers.");
}
}
}
}
osc1.connect(mediaStreamDestination1);
mediaStreamAudioSourceNode2.connect(sp2);
osc1.start();
}
// Test that there is no buffering between contexts when connecting a running
// AudioContext to a suspended AudioContext. Our ScriptProcessorNode does some
// buffering internally, so we ensure this by using a very very low frequency
// on a sine, and oberve that the phase has changed by a big enough margin.
function testMultiContextInput() {
var ac1 = new AudioContext(),
ac2 = new AudioContext();
var osc1 = ac1.createOscillator(),
mediaStreamDestination1 = ac1.createMediaStreamDestination(),
sp1 = ac1.createScriptProcessor();
var mediaStreamAudioSourceNode2 =
ac2.createMediaStreamSource(mediaStreamDestination1.stream),
sp2 = ac2.createScriptProcessor(),
resumed = false,
suspended = false,
countEventOnFirstSP = true,
eventReceived = 0;
osc1.frequency.value = 0.0001;
// We keep a first ScriptProcessor to get a periodic callback, since we can't
// use setTimeout anymore.
sp1.onaudioprocess = function(e) {
if (countEventOnFirstSP) {
eventReceived++;
}
if (eventReceived > 3 && suspended) {
countEventOnFirstSP = false;
eventReceived = 0;
ac2.resume().then(function() {
resumed = true;
});
}
}
sp2.onaudioprocess = function(e) {
var inputBuffer = e.inputBuffer.getChannelData(0);
if (!resumed) {
// save the last value of the buffer before suspending.
sp2.value = inputBuffer[inputBuffer.length - 1];
ac2.suspend().then(function() {
suspended = true;
});
} else {
eventReceived++;
if (eventReceived == 3) {
var delta = Math.abs(inputBuffer[1] - sp2.value),
theoreticalIncrement = 2048 * 3 * Math.PI * 2 * osc1.frequency.value / ac1.sampleRate;
ok(delta >= theoreticalIncrement,
"Buffering did not occur when the context was suspended (delta:" + delta + " increment: " + theoreticalIncrement+")");
ac1.close();
ac2.close();
sp1.onaudioprocess = null;
sp2.onaudioprocess = null;
todo(false, "2");
finish();
}
}
}
osc1.connect(mediaStreamDestination1);
osc1.connect(sp1);
mediaStreamAudioSourceNode2.connect(sp2);
osc1.start();
}
// Test that ScriptProcessorNode's onaudioprocess don't get called while the
// context is suspended/closed. It is possible that we get the handler called
// exactly once after suspend, because the event has already been sent to the
// event loop.
function testScriptProcessNodeSuspended() {
var ac = new AudioContext();
var sp = ac.createScriptProcessor();
var remainingIterations = 30;
var afterResume = false;
sp.onaudioprocess = function() {
ok(ac.state == "running" || remainingIterations == 3, "If onaudioprocess is called, the context" +
" must be running (was " + ac.state + ", remainingIterations:" + remainingIterations +")");
remainingIterations--;
if (!afterResume) {
if (remainingIterations == 0) {
ac.suspend().then(function() {
ac.resume().then(function() {
remainingIterations = 30;
afterResume = true;
});
});
}
} else {
sp.onaudioprocess = null;
todo(false,"3");
finish();
}
}
sp.connect(ac.destination);
}
// Take an AudioContext, make sure it switches to running when the audio starts
// flowing, and then, call suspend, resume and close on it, tracking its state.
function testAudioContext() {
var ac = new AudioContext();
is(ac.state, "suspended", "AudioContext should start in suspended state.");
var stateTracker = {
previous: ac.state,
// no promise for the initial suspended -> running
initial: { handler: false },
suspend: { promise: false, handler: false },
resume: { promise: false, handler: false },
close: { promise: false, handler: false }
};
function initialSuspendToRunning() {
ok(stateTracker.previous == "suspended" &&
ac.state == "running",
"AudioContext should switch to \"running\" when the audio hardware is" +
" ready.");
stateTracker.previous = ac.state;
ac.onstatechange = afterSuspend;
stateTracker.initial.handler = true;
ac.suspend().then(function() {
ok(!stateTracker.suspend.promise && !stateTracker.suspend.handler,
"Promise should be resolved before the callback, and only once.")
stateTracker.suspend.promise = true;
});
}
function afterSuspend() {
ok(stateTracker.previous == "running" &&
ac.state == "suspended",
"AudioContext should switch to \"suspend\" when the audio stream is" +
"suspended.");
ok(stateTracker.suspend.promise && !stateTracker.suspend.handler,
"Handler should be called after the callback, and only once");
stateTracker.suspend.handler = true;
stateTracker.previous = ac.state;
ac.onstatechange = afterResume;
ac.resume().then(function() {
ok(!stateTracker.resume.promise && !stateTracker.resume.handler,
"Promise should be called before the callback, and only once");
stateTracker.resume.promise = true;
});
}
function afterResume() {
ok(stateTracker.previous == "suspended" &&
ac.state == "running",
"AudioContext should switch to \"running\" when the audio stream resumes.");
ok(stateTracker.resume.promise && !stateTracker.resume.handler,
"Handler should be called after the callback, and only once");
stateTracker.resume.handler = true;
stateTracker.previous = ac.state;
ac.onstatechange = afterClose;
ac.close().then(function() {
ok(!stateTracker.close.promise && !stateTracker.close.handler,
"Promise should be called before the callback, and only once");
stateTracker.close.promise = true;
tryToToCreateNodeOnClosedContext(ac);
tryLegalOpeerationsOnClosedContext(ac);
});
}
function afterClose() {
ok(stateTracker.previous == "running" &&
ac.state == "closed",
"AudioContext should switch to \"closed\" when the audio stream is" +
" closed.");
ok(stateTracker.close.promise && !stateTracker.close.handler,
"Handler should be called after the callback, and only once");
}
ac.onstatechange = initialSuspendToRunning;
}
function testOfflineAudioContext() {
var o = new OfflineAudioContext(1, 44100, 44100);
is(o.state, "suspended", "OfflineAudioContext should start in suspended state.");
expectRejectedPromise(o, "suspend", "NotSupportedError");
expectRejectedPromise(o, "resume", "NotSupportedError");
expectRejectedPromise(o, "close", "NotSupportedError");
var previousState = o.state,
finishedRendering = false;
function beforeStartRendering() {
ok(previousState == "suspended" && o.state == "running", "onstatechanged" +
"handler is called on state changed, and the new state is running");
previousState = o.state;
o.onstatechange = onRenderingFinished;
}
function onRenderingFinished() {
ok(previousState == "running" && o.state == "closed",
"onstatechanged handler is called when rendering finishes, " +
"and the new state is closed");
ok(finishedRendering, "The Promise that is resolved when the rendering is" +
"done should be resolved earlier than the state change.");
previousState = o.state;
o.onstatechange = afterRenderingFinished;
tryToToCreateNodeOnClosedContext(o);
tryLegalOpeerationsOnClosedContext(o);
}
function afterRenderingFinished() {
ok(false, "There should be no transition out of the closed state.");
}
o.onstatechange = beforeStartRendering;
o.startRendering().then(function(buffer) {
finishedRendering = true;
});
}
var remaining = 0;
function finish() {
remaining--;
if (remaining == 0) {
SimpleTest.finish();
}
}
SimpleTest.waitForExplicitFinish();
addLoadEvent(function() {
var tests = [
testAudioContext,
testOfflineAudioContext,
testScriptProcessNodeSuspended,
testMultiContextOutput,
testMultiContextInput
];
remaining = tests.length;
tests.forEach(function(f) { f() });
});
</script>
</pre>
</body>
</html>

View File

@ -33,18 +33,6 @@ function expectTypeError(func) {
ok(threw, "The exception was thrown");
}
function expectRejectedPromise(that, func, exceptionName) {
var promise = that[func]();
ok(promise instanceof Promise, "Expect a Promise");
promise.then(function(res) {
ok(false, "Promise resolved when it should have been rejected.");
}).catch(function(err) {
is(err.name, exceptionName, "Promise correctly reject with " + exceptionName);
});
}
function fuzzyCompare(a, b) {
return Math.abs(a - b) < 9e-3;
}

View File

@ -13,12 +13,6 @@
callback DecodeSuccessCallback = void (AudioBuffer decodedData);
callback DecodeErrorCallback = void ();
enum AudioContextState {
"suspended",
"running",
"closed"
};
[Constructor,
Constructor(AudioChannel audioChannelType)]
interface AudioContext : EventTarget {
@ -27,14 +21,6 @@ interface AudioContext : EventTarget {
readonly attribute float sampleRate;
readonly attribute double currentTime;
readonly attribute AudioListener listener;
readonly attribute AudioContextState state;
[Throws]
Promise<void> suspend();
[Throws]
Promise<void> resume();
[Throws]
Promise<void> close();
attribute EventHandler onstatechange;
[NewObject, Throws]
AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
@ -45,7 +31,7 @@ interface AudioContext : EventTarget {
optional DecodeErrorCallback errorCallback);
// AudioNode creation
[NewObject, Throws]
[NewObject]
AudioBufferSourceNode createBufferSource();
[NewObject, Throws]
@ -56,25 +42,25 @@ interface AudioContext : EventTarget {
optional unsigned long numberOfInputChannels = 2,
optional unsigned long numberOfOutputChannels = 2);
[NewObject, Throws]
[NewObject]
StereoPannerNode createStereoPanner();
[NewObject, Throws]
[NewObject]
AnalyserNode createAnalyser();
[NewObject, Throws, UnsafeInPrerendering]
MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
[NewObject, Throws, UnsafeInPrerendering]
MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
[NewObject, Throws]
[NewObject]
GainNode createGain();
[NewObject, Throws]
DelayNode createDelay(optional double maxDelayTime = 1);
[NewObject, Throws]
[NewObject]
BiquadFilterNode createBiquadFilter();
[NewObject, Throws]
[NewObject]
WaveShaperNode createWaveShaper();
[NewObject, Throws]
[NewObject]
PannerNode createPanner();
[NewObject, Throws]
[NewObject]
ConvolverNode createConvolver();
[NewObject, Throws]
@ -82,10 +68,10 @@ interface AudioContext : EventTarget {
[NewObject, Throws]
ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
[NewObject, Throws]
[NewObject]
DynamicsCompressorNode createDynamicsCompressor();
[NewObject, Throws]
[NewObject]
OscillatorNode createOscillator();
[NewObject, Throws]
PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);