diff --git a/dom/media/AutoplayPolicy.cpp b/dom/media/AutoplayPolicy.cpp index 6a6194d4a5ec..bdf4f1476bca 100644 --- a/dom/media/AutoplayPolicy.cpp +++ b/dom/media/AutoplayPolicy.cpp @@ -8,7 +8,6 @@ #include "mozilla/EventStateManager.h" #include "mozilla/Preferences.h" -#include "mozilla/dom/AudioContext.h" #include "mozilla/dom/HTMLMediaElement.h" #include "mozilla/dom/HTMLMediaElementBinding.h" #include "nsContentUtils.h" @@ -69,51 +68,5 @@ AutoplayPolicy::IsMediaElementAllowedToPlay(NotNull aElement) return false; } -/* static */ bool -AutoplayPolicy::IsAudioContextAllowedToPlay(NotNull aContext) -{ - if (Preferences::GetBool("media.autoplay.enabled")) { - return true; - } - - if (!Preferences::GetBool("media.autoplay.enabled.user-gestures-needed", false)) { - return true; - } - - // Offline context won't directly output sound to audio devices. - if (aContext->IsOffline()) { - return true; - } - - nsPIDOMWindowInner* window = aContext->GetOwner(); - if (!window) { - return false; - } - - // Pages which have been granted permission to capture WebRTC camera or - // microphone are assumed to be trusted, and are allowed to autoplay. - MediaManager* manager = MediaManager::GetIfExists(); - if (manager) { - if (manager->IsActivelyCapturingOrHasAPermission(window->WindowID())) { - return true; - } - } - - nsCOMPtr principal = aContext->GetParentObject()->AsGlobal()->PrincipalOrNull(); - - // Whitelisted. - if (principal && - nsContentUtils::IsExactSitePermAllow(principal, "autoplay-media")) { - return true; - } - - // Activated by user gesture. - if (window->GetExtantDoc()->HasBeenUserActivated()) { - return true; - } - - return false; -} - } // namespace dom } // namespace mozilla diff --git a/dom/media/AutoplayPolicy.h b/dom/media/AutoplayPolicy.h index ec25e94b597c..9ebe2fb3b3fc 100644 --- a/dom/media/AutoplayPolicy.h +++ b/dom/media/AutoplayPolicy.h @@ -15,7 +15,6 @@ namespace mozilla { namespace dom { class HTMLMediaElement; -class AudioContext; /** * AutoplayPolicy is used to manage autoplay logic for all kinds of media, @@ -33,12 +32,9 @@ class AutoplayPolicy { public: static bool IsMediaElementAllowedToPlay(NotNull aElement); - static bool IsAudioContextAllowedToPlay(NotNull aContext); -private: - static bool IsDocumentAllowedToPlay(nsIDocument* aDoc); }; } // namespace dom } // namespace mozilla -#endif +#endif \ No newline at end of file diff --git a/dom/media/webaudio/AudioContext.cpp b/dom/media/webaudio/AudioContext.cpp index 88eaa0fcce2c..412220fc99fa 100644 --- a/dom/media/webaudio/AudioContext.cpp +++ b/dom/media/webaudio/AudioContext.cpp @@ -9,7 +9,6 @@ #include "blink/PeriodicWave.h" #include "mozilla/ErrorResult.h" -#include "mozilla/NotNull.h" #include "mozilla/OwningNonNull.h" #include "mozilla/RefPtr.h" #include "mozilla/Preferences.h" @@ -45,7 +44,6 @@ #include "AudioListener.h" #include "AudioNodeStream.h" #include "AudioStream.h" -#include "AutoplayPolicy.h" #include "BiquadFilterNode.h" #include "ChannelMergerNode.h" #include "ChannelSplitterNode.h" @@ -85,7 +83,6 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext) NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination) NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener) NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray) - NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises) if (!tmp->mIsStarted) { NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes) } @@ -104,7 +101,6 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext, NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray) - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises) if (!tmp->mIsStarted) { MOZ_ASSERT(tmp->mIsOffline, "Online AudioContexts should always be started"); @@ -153,28 +149,13 @@ AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, // Note: AudioDestinationNode needs an AudioContext that must already be // bound to the window. - bool allowedToStart = AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this)); - mDestination = new AudioDestinationNode(this, - aIsOffline, - allowedToStart, - aNumberOfChannels, - aLength, - aSampleRate); + mDestination = new AudioDestinationNode(this, aIsOffline, + aNumberOfChannels, aLength, aSampleRate); // The context can't be muted until it has a destination. if (mute) { Mute(); } - - // If we won't allow audio context to start, we need to suspend all its stream - // in order to delay the state changing from 'suspend' to 'start'. - if (!allowedToStart) { - ErrorResult rv; - RefPtr dummy = Suspend(rv); - MOZ_ASSERT(!rv.Failed(), "can't create promise"); - MOZ_ASSERT(dummy->State() != Promise::PromiseState::Rejected, - "suspend failed"); - } } nsresult @@ -745,11 +726,6 @@ AudioContext::Shutdown() } mPromiseGripArray.Clear(); - - for (const auto& p : mPendingResumePromises) { - p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); - } - mPendingResumePromises.Clear(); } // Release references to active nodes. @@ -929,16 +905,6 @@ AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) } } - // Resolve all pending promises once the audio context has been allowed to - // start. - if (mAudioContextState == AudioContextState::Suspended && - aNewState == AudioContextState::Running) { - for (const auto& p : mPendingResumePromises) { - p->MaybeResolveWithUndefined(); - } - mPendingResumePromises.Clear(); - } - if (mAudioContextState != aNewState) { RefPtr task = new OnStateChangeTask(this); Dispatch(task.forget()); @@ -1022,24 +988,22 @@ AudioContext::Resume(ErrorResult& aRv) return promise.forget(); } - mPendingResumePromises.AppendElement(promise); + Destination()->Resume(); - if (AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this))) { - Destination()->Resume(); - - nsTArray streams; - // If mSuspendCalled is false then we already resumed all our streams, - // so don't resume them again (since suspend(); resume(); resume(); should - // be OK). But we still need to do ApplyAudioContextOperation - // to ensure our new promise is resolved. - if (mSuspendCalled) { - streams = GetAllStreams(); - } - Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), - streams, - AudioContextOperation::Resume, promise); - mSuspendCalled = false; + nsTArray streams; + // If mSuspendCalled is false then we already resumed all our streams, + // so don't resume them again (since suspend(); resume(); resume(); should + // be OK). But we still need to do ApplyAudioContextOperation + // to ensure our new promise is resolved. + if (mSuspendCalled) { + streams = GetAllStreams(); } + mPromiseGripArray.AppendElement(promise); + Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), + streams, + AudioContextOperation::Resume, promise); + + mSuspendCalled = false; return promise.forget(); } diff --git a/dom/media/webaudio/AudioContext.h b/dom/media/webaudio/AudioContext.h index d4b648d9e505..79c397ee9c6e 100644 --- a/dom/media/webaudio/AudioContext.h +++ b/dom/media/webaudio/AudioContext.h @@ -353,14 +353,9 @@ private: RefPtr mDestination; RefPtr mListener; nsTArray > mDecodeJobs; - // This array is used to keep the suspend/close promises alive until + // This array is used to keep the suspend/resume/close promises alive until // they are resolved, so we can safely pass them accross threads. nsTArray> mPromiseGripArray; - // This array is used to onlly keep the resume promises alive until they are - // resolved, so we can safely pass them accross threads. If the audio context - // is not allowed to play, the promise would be pending in this array and be - // resolved until audio context has been allowed and user call resume() again. - nsTArray> mPendingResumePromises; // See RegisterActiveNode. These will keep the AudioContext alive while it // is rendering and the window remains alive. nsTHashtable > mActiveNodes; diff --git a/dom/media/webaudio/AudioDestinationNode.cpp b/dom/media/webaudio/AudioDestinationNode.cpp index c9541d07dd67..d6aec6700561 100644 --- a/dom/media/webaudio/AudioDestinationNode.cpp +++ b/dom/media/webaudio/AudioDestinationNode.cpp @@ -323,10 +323,8 @@ NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode) AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, bool aIsOffline, - bool aAllowedToStart, uint32_t aNumberOfChannels, - uint32_t aLength, - float aSampleRate) + uint32_t aLength, float aSampleRate) : AudioNode(aContext, aNumberOfChannels, ChannelCountMode::Explicit, ChannelInterpretation::Speakers) , mFramesToProduce(aLength) @@ -354,7 +352,7 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, mStream->AddMainThreadListener(this); mStream->AddAudioOutput(&gWebAudioOutputKey); - if (!aIsOffline && aAllowedToStart) { + if (!aIsOffline) { graph->NotifyWhenGraphStarted(mStream); } } diff --git a/dom/media/webaudio/AudioDestinationNode.h b/dom/media/webaudio/AudioDestinationNode.h index ab986f49a2d1..d85e4e3b45cb 100644 --- a/dom/media/webaudio/AudioDestinationNode.h +++ b/dom/media/webaudio/AudioDestinationNode.h @@ -25,7 +25,6 @@ public: // whether it's in offline mode. AudioDestinationNode(AudioContext* aContext, bool aIsOffline, - bool aAllowedToStart, uint32_t aNumberOfChannels = 0, uint32_t aLength = 0, float aSampleRate = 0.0f); diff --git a/dom/media/webaudio/test/mochitest.ini b/dom/media/webaudio/test/mochitest.ini index 459a425b6711..181e426b9de7 100644 --- a/dom/media/webaudio/test/mochitest.ini +++ b/dom/media/webaudio/test/mochitest.ini @@ -186,7 +186,6 @@ tags=capturestream skip-if = toolkit == 'android' # bug 1091965 [test_nodeToParamConnection.html] [test_nodeCreationDocumentGone.html] -[test_notAllowedToStartAudioContextGC.html] [test_OfflineAudioContext.html] [test_offlineDestinationChannelCountLess.html] [test_offlineDestinationChannelCountMore.html] diff --git a/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html b/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html deleted file mode 100644 index 20f931d7b8d3..000000000000 --- a/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html +++ /dev/null @@ -1,57 +0,0 @@ - - - - Test GC for not-allow-to-start audio context - - - - -
-
-
- - diff --git a/testing/specialpowers/content/specialpowersAPI.js b/testing/specialpowers/content/specialpowersAPI.js index 2a567d4c1865..3940aa06db07 100644 --- a/testing/specialpowers/content/specialpowersAPI.js +++ b/testing/specialpowers/content/specialpowersAPI.js @@ -1717,7 +1717,7 @@ SpecialPowersAPI.prototype = { var parts = props.split("."); for (var i = 0; i < parts.length; i++) { var p = parts[i]; - if (obj[p] != undefined) { + if (obj[p]) { obj = obj[p]; } else { return null; diff --git a/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js b/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js index e0d698e226af..d97e8b065f02 100644 --- a/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js +++ b/toolkit/content/tests/browser/browser_autoplay_policy_user_gestures.js @@ -1,5 +1,3 @@ -/* eslint-disable mozilla/no-arbitrary-setTimeout */ - const VIDEO_PAGE = "https://example.com/browser/toolkit/content/tests/browser/file_video.html"; var UserGestures = { @@ -17,8 +15,7 @@ var UserGestureTests = [ function setup_test_preference() { return SpecialPowers.pushPrefEnv({"set": [ ["media.autoplay.enabled", false], - ["media.autoplay.enabled.user-gestures-needed", true], - ["media.navigator.permission.fake", true] + ["media.autoplay.enabled.user-gestures-needed", true] ]}); } @@ -99,159 +96,12 @@ async function test_play_with_user_gesture(gesture) { ok(video.paused, "video can not start playing."); } } - await ContentTask.spawn(tab.linkedBrowser, gesture, play_video); info("- remove tab -"); BrowserTestUtils.removeTab(tab); } -function createAudioContext() { - content.ac = new content.AudioContext(); - let ac = content.ac; - ac.resumePromises = []; - ac.stateChangePromise = new Promise(resolve => { - ac.addEventListener("statechange", function() { - resolve(); - }, {once: true}); - }); -} - -async function checking_audio_context_running_state() { - let ac = content.ac; - await new Promise(r => setTimeout(r, 2000)); - is(ac.state, "suspended", "audio context is still suspended"); -} - -function resume_without_expected_success() { - let ac = content.ac; - let promise = ac.resume(); - ac.resumePromises.push(promise); - return new Promise((resolve, reject) => { - setTimeout(() => { - if (ac.state == "suspended") { - ok(true, "audio context is still suspended"); - resolve(); - } else { - reject("audio context should not be allowed to start"); - } - }, 2000); - }); -} - -function resume_with_expected_success() { - let ac = content.ac; - ac.resumePromises.push(ac.resume()); - return Promise.all(ac.resumePromises).then(() => { - ok(ac.state == "running", "audio context starts running"); - }); -} - -function callGUM(testParameters) { - info("- calling gum with " + JSON.stringify(testParameters.constraints)); - if (testParameters.shouldAllowStartingContext) { - // Because of the prefs we've set and passed, this is going to allow the - // window to start an AudioContext synchronously. - testParameters.constraints.fake = true; - return content.navigator.mediaDevices.getUserMedia(testParameters.constraints); - } - - // Call gUM, without sucess: we've made it so that only fake requests - // succeed without permission, and this is requesting non-fake-devices. Return - // a resolved promise so that the test continues, but the getUserMedia Promise - // will never be resolved. - // We do this to check that it's not merely calling gUM that allows starting - // an AudioContext, it's having the Promise it return resolved successfuly, - // because of saved permissions for an origin or explicit user consent using - // the prompt. - content.navigator.mediaDevices.getUserMedia(testParameters.constraints); - return Promise.resolve(); -} - - -async function test_webaudio_with_user_gesture(gesture) { - info("- open new tab -"); - let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser, - "about:blank"); - info("- create audio context -"); - // We want the same audio context to be used across different content - // tasks, so it needs to be loaded by a frame script. - let frameScript = createAudioContext; - let mm = tab.linkedBrowser.messageManager; - mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false); - - info("- check whether audio context starts running -"); - try { - await ContentTask.spawn(tab.linkedBrowser, null, - checking_audio_context_running_state); - } catch (error) { - ok(false, error.toString()); - } - - info("- calling resume() -"); - try { - await ContentTask.spawn(tab.linkedBrowser, null, - resume_without_expected_success); - } catch (error) { - ok(false, error.toString()); - } - - info("- simulate user gesture -"); - await simulateUserGesture(gesture, tab.linkedBrowser); - - info("- calling resume() again"); - try { - let resumeFunc = gesture.isActivationGesture ? - resume_with_expected_success : - resume_without_expected_success; - await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc); - } catch (error) { - ok(false, error.toString()); - } - - info("- remove tab -"); - await BrowserTestUtils.removeTab(tab); -} - -async function test_webaudio_with_gum(testParameters) { - info("- open new tab -"); - let tab = await BrowserTestUtils.openNewForegroundTab(window.gBrowser, - "about:blank"); - info("- create audio context -"); - // We want the same audio context be used between different content - // tasks, so it *must* be loaded by frame script. - let frameScript = createAudioContext; - let mm = tab.linkedBrowser.messageManager; - mm.loadFrameScript("data:,(" + frameScript.toString() + ")();", false); - - info("- check whether audio context starts running -"); - try { - await ContentTask.spawn(tab.linkedBrowser, null, - checking_audio_context_running_state); - } catch (error) { - ok(false, error.toString()); - } - - try { - await ContentTask.spawn(tab.linkedBrowser, testParameters, callGUM); - } catch (error) { - ok(false, error.toString()); - } - - info("- calling resume() again"); - try { - let resumeFunc = testParameters.shouldAllowStartingContext ? - resume_with_expected_success : - resume_without_expected_success; - await ContentTask.spawn(tab.linkedBrowser, null, resumeFunc); - } catch (error) { - ok(false, error.toString()); - } - - info("- remove tab -"); - await BrowserTestUtils.removeTab(tab); -} - add_task(async function start_test() { info("- setup test preference -"); await setup_test_preference(); @@ -261,24 +111,6 @@ add_task(async function start_test() { info("- test play after page got user gesture -"); for (let idx = 0; idx < UserGestureTests.length; idx++) { - info("- test play after page got user gesture -"); await test_play_with_user_gesture(UserGestureTests[idx]); - - info("- test web audio with user gesture -"); - await test_webaudio_with_user_gesture(UserGestureTests[idx]); } - - await test_webaudio_with_gum({constraints: { audio: true }, - shouldAllowStartingContext: true}); - await test_webaudio_with_gum({constraints: { video: true }, - shouldAllowStartingContext: true}); - await test_webaudio_with_gum({constraints: { video: true, - audio: true }, - shouldAllowStartingContext: true}); - await test_webaudio_with_gum({constraints: { video: true }, - shouldAllowStartingContext: false}); - await test_webaudio_with_gum({constraints: { audio: true }, - shouldAllowStartingContext: false}); - await test_webaudio_with_gum({constraints: { video: true, audio: true }, - shouldAllowStartingContext: false}); });