Backed out 14 changesets (bug 1156472) for bustage on a CLOSED TREE

Backed out changeset 2ddbf85a42c0 (bug 1156472)
Backed out changeset 306d02e17081 (bug 1156472)
Backed out changeset 03598139f39a (bug 1156472)
Backed out changeset 4b1e6069b598 (bug 1156472)
Backed out changeset 6c588a5eaaec (bug 1156472)
Backed out changeset 8c98d7beaea7 (bug 1156472)
Backed out changeset fbf59fbb5875 (bug 1156472)
Backed out changeset 66479dd9eed9 (bug 1156472)
Backed out changeset c8502deeed33 (bug 1156472)
Backed out changeset 1a60ff1149a1 (bug 1156472)
Backed out changeset af1638279785 (bug 1156472)
Backed out changeset 8210276a98ca (bug 1156472)
Backed out changeset 13730e7c5997 (bug 1156472)
Backed out changeset 05acb71cf981 (bug 1156472)
This commit is contained in:
Carsten "Tomcat" Book 2015-07-24 17:08:37 +02:00
parent 15d0a4b27a
commit 2b73aa4f63
49 changed files with 293 additions and 1498 deletions

View File

@ -758,7 +758,6 @@ you can use these alternative items. Otherwise, their values should be empty. -
<!ENTITY getUserMedia.selectCamera.accesskey "C">
<!ENTITY getUserMedia.selectMicrophone.label "Microphone to share:">
<!ENTITY getUserMedia.selectMicrophone.accesskey "M">
<!ENTITY getUserMedia.audioCapture.label "Audio from the tab will be shared.">
<!ENTITY getUserMedia.allWindowsShared.message "All visible windows on your screen will be shared.">
<!-- Bad Content Blocker Doorhanger Notification -->

View File

@ -553,17 +553,13 @@ identity.loggedIn.signOut.accessKey = O
# LOCALIZATION NOTE (getUserMedia.shareCamera.message, getUserMedia.shareMicrophone.message,
# getUserMedia.shareScreen.message, getUserMedia.shareCameraAndMicrophone.message,
# getUserMedia.shareScreenAndMicrophone.message, getUserMedia.shareCameraAndAudioCapture.message,
# getUserMedia.shareAudioCapture.message, getUserMedia.shareScreenAndAudioCapture.message):
# getUserMedia.shareScreenAndMicrophone.message):
# %S is the website origin (e.g. www.mozilla.org)
getUserMedia.shareCamera.message = Would you like to share your camera with %S?
getUserMedia.shareMicrophone.message = Would you like to share your microphone with %S?
getUserMedia.shareScreen.message = Would you like to share your screen with %S?
getUserMedia.shareCameraAndMicrophone.message = Would you like to share your camera and microphone with %S?
getUserMedia.shareCameraAndAudioCapture.message = Would you like to share your camera and this tab's audio with %S?
getUserMedia.shareScreenAndMicrophone.message = Would you like to share your microphone and screen with %S?
getUserMedia.shareScreenAndAudioCapture.message = Would you like to share this tab's audio and your screen with %S?
getUserMedia.shareAudioCapture.message = Would you like to share this tab's audio with %S?
getUserMedia.selectWindow.label=Window to share:
getUserMedia.selectWindow.accesskey=W
getUserMedia.selectScreen.label=Screen to share:
@ -605,7 +601,6 @@ getUserMedia.sharingApplication.message = You are currently sharing an applicati
getUserMedia.sharingScreen.message = You are currently sharing your screen with this page.
getUserMedia.sharingWindow.message = You are currently sharing a window with this page.
getUserMedia.sharingBrowser.message = You are currently sharing a tab with this page.
getUserMedia.sharingAudioCapture.message = You are currently sharing a tab's audio with this page.
getUserMedia.continueSharing.label = Continue Sharing
getUserMedia.continueSharing.accesskey = C
getUserMedia.stopSharing.label = Stop Sharing
@ -615,7 +610,6 @@ getUserMedia.sharingMenu.label = Tabs sharing devices
getUserMedia.sharingMenu.accesskey = d
# LOCALIZATION NOTE (getUserMedia.sharingMenuCamera
# getUserMedia.sharingMenuMicrophone,
# getUserMedia.sharingMenuAudioCapture,
# getUserMedia.sharingMenuApplication,
# getUserMedia.sharingMenuScreen,
# getUserMedia.sharingMenuWindow,
@ -625,11 +619,6 @@ getUserMedia.sharingMenu.accesskey = d
# getUserMedia.sharingMenuCameraMicrophoneScreen,
# getUserMedia.sharingMenuCameraMicrophoneWindow,
# getUserMedia.sharingMenuCameraMicrophoneBrowser,
# getUserMedia.sharingMenuCameraAudioCapture,
# getUserMedia.sharingMenuCameraAudioCaptureApplication,
# getUserMedia.sharingMenuCameraAudioCaptureScreen,
# getUserMedia.sharingMenuCameraAudioCaptureWindow,
# getUserMedia.sharingMenuCameraAudioCaptureBrowser,
# getUserMedia.sharingMenuCameraApplication,
# getUserMedia.sharingMenuCameraScreen,
# getUserMedia.sharingMenuCameraWindow,
@ -641,7 +630,6 @@ getUserMedia.sharingMenu.accesskey = d
# %S is the website origin (e.g. www.mozilla.org)
getUserMedia.sharingMenuCamera = %S (camera)
getUserMedia.sharingMenuMicrophone = %S (microphone)
getUserMedia.sharingMenuAudioCapture = %S (tab audio)
getUserMedia.sharingMenuApplication = %S (application)
getUserMedia.sharingMenuScreen = %S (screen)
getUserMedia.sharingMenuWindow = %S (window)
@ -651,11 +639,6 @@ getUserMedia.sharingMenuCameraMicrophoneApplication = %S (camera, microphone and
getUserMedia.sharingMenuCameraMicrophoneScreen = %S (camera, microphone and screen)
getUserMedia.sharingMenuCameraMicrophoneWindow = %S (camera, microphone and window)
getUserMedia.sharingMenuCameraMicrophoneBrowser = %S (camera, microphone and tab)
getUserMedia.sharingMenuCameraAudioCapture = %S (camera and tab audio)
getUserMedia.sharingMenuCameraAudioCaptureApplication = %S (camera, tab audio and application)
getUserMedia.sharingMenuCameraAudioCaptureScreen = %S (camera, tab audio and screen)
getUserMedia.sharingMenuCameraAudioCaptureWindow = %S (camera, tab audio and window)
getUserMedia.sharingMenuCameraAudioCaptureBrowser = %S (camera, tab audio and tab)
getUserMedia.sharingMenuCameraApplication = %S (camera and application)
getUserMedia.sharingMenuCameraScreen = %S (camera and screen)
getUserMedia.sharingMenuCameraWindow = %S (camera and window)
@ -664,10 +647,6 @@ getUserMedia.sharingMenuMicrophoneApplication = %S (microphone and application)
getUserMedia.sharingMenuMicrophoneScreen = %S (microphone and screen)
getUserMedia.sharingMenuMicrophoneWindow = %S (microphone and window)
getUserMedia.sharingMenuMicrophoneBrowser = %S (microphone and tab)
getUserMedia.sharingMenuMicrophoneApplication = %S (tab audio and application)
getUserMedia.sharingMenuMicrophoneScreen = %S (tab audio and screen)
getUserMedia.sharingMenuMicrophoneWindow = %S (tab audio and window)
getUserMedia.sharingMenuMicrophoneBrowser = %S (tab audio and tab)
# LOCALIZATION NOTE(getUserMedia.sharingMenuUnknownHost): this is used for the website
# origin for the sharing menu if no readable origin could be deduced from the URL.
getUserMedia.sharingMenuUnknownHost = Unknown origin

View File

@ -86,21 +86,14 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
// MediaStreamConstraints defines video as 'boolean or MediaTrackConstraints'.
let video = aConstraints.video || aConstraints.picture;
let audio = aConstraints.audio;
let sharingScreen = video && typeof(video) != "boolean" &&
video.mediaSource != "camera";
let sharingAudio = audio && typeof(audio) != "boolean" &&
audio.mediaSource != "microphone";
for (let device of aDevices) {
device = device.QueryInterface(Ci.nsIMediaDevice);
switch (device.type) {
case "audio":
// Check that if we got a microphone, we have not requested an audio
// capture, and if we have requested an audio capture, we are not
// getting a microphone instead.
if (audio && (device.mediaSource == "microphone") != sharingAudio) {
audioDevices.push({name: device.name, deviceIndex: devices.length,
mediaSource: device.mediaSource});
if (aConstraints.audio) {
audioDevices.push({name: device.name, deviceIndex: devices.length});
devices.push(device);
}
break;
@ -120,7 +113,7 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
if (videoDevices.length)
requestTypes.push(sharingScreen ? "Screen" : "Camera");
if (audioDevices.length)
requestTypes.push(sharingAudio ? "AudioCapture" : "Microphone");
requestTypes.push("Microphone");
if (!requestTypes.length) {
denyRequest({callID: aCallID}, "NotFoundError");
@ -140,7 +133,6 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
secure: aSecure,
requestTypes: requestTypes,
sharingScreen: sharingScreen,
sharingAudio: sharingAudio,
audioDevices: audioDevices,
videoDevices: videoDevices
};

View File

@ -188,8 +188,7 @@ function getHost(uri, href) {
function prompt(aBrowser, aRequest) {
let {audioDevices: audioDevices, videoDevices: videoDevices,
sharingScreen: sharingScreen, sharingAudio: sharingAudio,
requestTypes: requestTypes} = aRequest;
sharingScreen: sharingScreen, requestTypes: requestTypes} = aRequest;
let uri = Services.io.newURI(aRequest.documentURI, null, null);
let host = getHost(uri);
let chromeDoc = aBrowser.ownerDocument;
@ -199,9 +198,10 @@ function prompt(aBrowser, aRequest) {
let message = stringBundle.getFormattedString(stringId, [host]);
let mainLabel;
if (sharingScreen || sharingAudio) {
if (sharingScreen) {
mainLabel = stringBundle.getString("getUserMedia.shareSelectedItems.label");
} else {
}
else {
let string = stringBundle.getString("getUserMedia.shareSelectedDevices.label");
mainLabel = PluralForm.get(requestTypes.length, string);
}
@ -225,8 +225,8 @@ function prompt(aBrowser, aRequest) {
}
}
];
// Bug 1037438: implement 'never' for screen sharing.
if (!sharingScreen && !sharingAudio) {
if (!sharingScreen) { // Bug 1037438: implement 'never' for screen sharing.
secondaryActions.push({
label: stringBundle.getString("getUserMedia.never.label"),
accessKey: stringBundle.getString("getUserMedia.never.accesskey"),
@ -243,10 +243,10 @@ function prompt(aBrowser, aRequest) {
});
}
if (aRequest.secure && !sharingScreen && !sharingAudio) {
if (aRequest.secure && !sharingScreen) {
// Don't show the 'Always' action if the connection isn't secure, or for
// screen/audio sharing (because we can't guess which window the user wants
// to share without prompting).
// screen sharing (because we can't guess which window the user wants to
// share without prompting).
secondaryActions.unshift({
label: stringBundle.getString("getUserMedia.always.label"),
accessKey: stringBundle.getString("getUserMedia.always.accesskey"),
@ -266,8 +266,7 @@ function prompt(aBrowser, aRequest) {
if (aTopic == "shown") {
let PopupNotifications = chromeDoc.defaultView.PopupNotifications;
let popupId = "Devices";
if (requestTypes.length == 1 && (requestTypes[0] == "Microphone" ||
requestTypes[0] == "AudioCapture"))
if (requestTypes.length == 1 && requestTypes[0] == "Microphone")
popupId = "Microphone";
if (requestTypes.indexOf("Screen") != -1)
popupId = "Screen";
@ -385,7 +384,7 @@ function prompt(aBrowser, aRequest) {
chromeDoc.getElementById("webRTC-selectCamera").hidden = !videoDevices.length || sharingScreen;
chromeDoc.getElementById("webRTC-selectWindowOrScreen").hidden = !sharingScreen || !videoDevices.length;
chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length || sharingAudio;
chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length;
let camMenupopup = chromeDoc.getElementById("webRTC-selectCamera-menupopup");
let windowMenupopup = chromeDoc.getElementById("webRTC-selectWindow-menupopup");
@ -394,16 +393,12 @@ function prompt(aBrowser, aRequest) {
listScreenShareDevices(windowMenupopup, videoDevices);
else
listDevices(camMenupopup, videoDevices);
if (!sharingAudio)
listDevices(micMenupopup, audioDevices);
listDevices(micMenupopup, audioDevices);
if (requestTypes.length == 2) {
let stringBundle = chromeDoc.defaultView.gNavigatorBundle;
if (!sharingScreen)
addDeviceToList(camMenupopup, stringBundle.getString("getUserMedia.noVideo.label"), "-1");
if (!sharingAudio)
addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
}
this.mainAction.callback = function(aRemember) {
@ -421,18 +416,13 @@ function prompt(aBrowser, aRequest) {
}
}
if (audioDevices.length) {
if (!sharingAudio) {
let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
let allowMic = audioDeviceIndex != "-1";
if (allowMic)
allowedDevices.push(audioDeviceIndex);
if (aRemember) {
perms.add(uri, "microphone",
allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
}
} else {
// Only one device possible for audio capture.
allowedDevices.push(0);
let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
let allowMic = audioDeviceIndex != "-1";
if (allowMic)
allowedDevices.push(audioDeviceIndex);
if (aRemember) {
perms.add(uri, "microphone",
allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
}
}

View File

@ -35,7 +35,6 @@ NS_IMPL_CYCLE_COLLECTING_RELEASE(AudioChannelAgent)
AudioChannelAgent::AudioChannelAgent()
: mAudioChannelType(AUDIO_AGENT_CHANNEL_ERROR)
, mInnerWindowID(0)
, mIsRegToService(false)
{
}
@ -105,10 +104,6 @@ AudioChannelAgent::InitInternal(nsIDOMWindow* aWindow, int32_t aChannelType,
}
if (aWindow) {
nsCOMPtr<nsPIDOMWindow> pInnerWindow = do_QueryInterface(aWindow);
MOZ_ASSERT(pInnerWindow->IsInnerWindow());
mInnerWindowID = pInnerWindow->WindowID();
nsCOMPtr<nsIDOMWindow> topWindow;
aWindow->GetScriptableTop(getter_AddRefs(topWindow));
mWindow = do_QueryInterface(topWindow);
@ -196,18 +191,3 @@ AudioChannelAgent::WindowID() const
{
return mWindow ? mWindow->WindowID() : 0;
}
void
AudioChannelAgent::WindowAudioCaptureChanged(uint64_t aInnerWindowID)
{
if (aInnerWindowID != mInnerWindowID) {
return;
}
nsCOMPtr<nsIAudioChannelAgentCallback> callback = GetCallback();
if (!callback) {
return;
}
callback->WindowAudioCaptureChanged();
}

View File

@ -34,7 +34,6 @@ public:
AudioChannelAgent();
void WindowVolumeChanged();
void WindowAudioCaptureChanged(uint64_t aInnerWindowID);
nsPIDOMWindow* Window() const
{
@ -62,7 +61,6 @@ private:
nsWeakPtr mWeakCallback;
int32_t mAudioChannelType;
uint64_t mInnerWindowID;
bool mIsRegToService;
};

View File

@ -546,38 +546,6 @@ AudioChannelService::RefreshAgentsVolume(nsPIDOMWindow* aWindow)
}
}
void
AudioChannelService::RefreshAgentsCapture(nsPIDOMWindow* aWindow,
uint64_t aInnerWindowID)
{
MOZ_ASSERT(aWindow);
MOZ_ASSERT(aWindow->IsOuterWindow());
nsCOMPtr<nsIDOMWindow> topWindow;
aWindow->GetScriptableTop(getter_AddRefs(topWindow));
nsCOMPtr<nsPIDOMWindow> pTopWindow = do_QueryInterface(topWindow);
if (!pTopWindow) {
return;
}
AudioChannelWindow* winData = GetWindowData(pTopWindow->WindowID());
// This can happen, but only during shutdown, because the the outer window
// changes ScriptableTop, so that its ID is different.
// In this case either we are capturing, and it's too late because the window
// has been closed anyways, or we are un-capturing, and everything has already
// been cleaned up by the HTMLMediaElements or the AudioContexts.
if (!winData) {
return;
}
nsTObserverArray<AudioChannelAgent*>::ForwardIterator
iter(winData->mAgents);
while (iter.HasMore()) {
iter.GetNext()->WindowAudioCaptureChanged(aInnerWindowID);
}
}
/* static */ const nsAttrValue::EnumTable*
AudioChannelService::GetAudioChannelTable()
{

View File

@ -102,14 +102,6 @@ public:
void RefreshAgentsVolume(nsPIDOMWindow* aWindow);
// This method needs to know the inner window that wants to capture audio. We
// group agents per top outer window, but we can have multiple innerWindow per
// top outerWindow (subiframes, etc.) and we have to identify all the agents
// just for a particular innerWindow.
void RefreshAgentsCapture(nsPIDOMWindow* aWindow,
uint64_t aInnerWindowID);
#ifdef MOZ_WIDGET_GONK
void RegisterSpeakerManager(SpeakerManagerService* aSpeakerManager)
{

View File

@ -6,18 +6,13 @@
interface nsIDOMWindow;
[uuid(5fe83b24-38b9-4901-a4a1-d1bd57d3fe18)]
[uuid(4f537c88-3722-4946-9a09-ce559fa0591d)]
interface nsIAudioChannelAgentCallback : nsISupports
{
/**
* Notified when the window volume/mute is changed
*/
void windowVolumeChanged(in float aVolume, in bool aMuted);
/**
* Notified when the capture state is changed.
*/
void windowAudioCaptureChanged();
};
/**

View File

@ -564,7 +564,7 @@ nsPIDOMWindow::nsPIDOMWindow(nsPIDOMWindow *aOuterWindow)
mMayHavePointerEnterLeaveEventListener(false),
mIsModalContentWindow(false),
mIsActive(false), mIsBackground(false),
mAudioMuted(false), mAudioVolume(1.0), mAudioCaptured(false),
mAudioMuted(false), mAudioVolume(1.0),
mDesktopModeViewport(false), mInnerWindow(nullptr),
mOuterWindow(aOuterWindow),
// Make sure no actual window ends up with mWindowID == 0
@ -3745,26 +3745,6 @@ nsPIDOMWindow::RefreshMediaElements()
service->RefreshAgentsVolume(GetOuterWindow());
}
bool
nsPIDOMWindow::GetAudioCaptured() const
{
MOZ_ASSERT(IsInnerWindow());
return mAudioCaptured;
}
nsresult
nsPIDOMWindow::SetAudioCapture(bool aCapture)
{
MOZ_ASSERT(IsInnerWindow());
mAudioCaptured = aCapture;
nsRefPtr<AudioChannelService> service = AudioChannelService::GetOrCreate();
service->RefreshAgentsCapture(GetOuterWindow(), mWindowID);
return NS_OK;
}
// nsISpeechSynthesisGetter
#ifdef MOZ_WEBSPEECH

View File

@ -185,9 +185,6 @@ public:
float GetAudioVolume() const;
nsresult SetAudioVolume(float aVolume);
bool GetAudioCaptured() const;
nsresult SetAudioCapture(bool aCapture);
virtual void SetServiceWorkersTestingEnabled(bool aEnabled)
{
MOZ_ASSERT(IsOuterWindow());
@ -825,8 +822,6 @@ protected:
bool mAudioMuted;
float mAudioVolume;
bool mAudioCaptured;
// current desktop mode flag.
bool mDesktopModeViewport;

View File

@ -471,12 +471,6 @@ FMRadio::WindowVolumeChanged(float aVolume, bool aMuted)
return NS_OK;
}
NS_IMETHODIMP
FMRadio::WindowAudioCaptureChanged()
{
return NS_OK;
}
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(FMRadio)
NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference)
NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)

View File

@ -2030,7 +2030,6 @@ HTMLMediaElement::HTMLMediaElement(already_AddRefed<mozilla::dom::NodeInfo>& aNo
mAllowCasting(false),
mIsCasting(false),
mAudioCaptured(false),
mAudioCapturedByWindow(false),
mPlayingBeforeSeek(false),
mPlayingThroughTheAudioChannelBeforeSeek(false),
mPausedForInactiveDocumentOrChannel(false),
@ -2098,11 +2097,6 @@ HTMLMediaElement::~HTMLMediaElement()
EndSrcMediaStreamPlayback();
}
if (mCaptureStreamPort) {
mCaptureStreamPort->Destroy();
mCaptureStreamPort = nullptr;
}
NS_ASSERTION(MediaElementTableCount(this, mLoadingSrc) == 0,
"Destroyed media element should no longer be in element table");
@ -4481,7 +4475,8 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
(!mPaused &&
(HasAttr(kNameSpaceID_None, nsGkAtoms::loop) ||
(mReadyState >= nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA &&
!IsPlaybackEnded()) ||
!IsPlaybackEnded() &&
(!mSrcStream || HasAudio())) ||
mPlayingThroughTheAudioChannelBeforeSeek));
if (playingThroughTheAudioChannel != mPlayingThroughTheAudioChannel) {
mPlayingThroughTheAudioChannel = playingThroughTheAudioChannel;
@ -4497,7 +4492,7 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
if (!mAudioChannelAgent) {
return;
}
mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetInnerWindow(),
mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetWindow(),
static_cast<int32_t>(mAudioChannel),
this);
}
@ -4509,10 +4504,6 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
void
HTMLMediaElement::NotifyAudioChannelAgent(bool aPlaying)
{
// Immediately check if this should go to the MSG instead of the normal
// media playback route.
WindowAudioCaptureChanged();
// This is needed to pass nsContentUtils::IsCallerChrome().
// AudioChannel API should not called from content but it can happen that
// this method has some content JS in its stack.
@ -4683,53 +4674,6 @@ HTMLMediaElement::GetTopLevelPrincipal()
}
#endif // MOZ_EME
NS_IMETHODIMP HTMLMediaElement::WindowAudioCaptureChanged()
{
MOZ_ASSERT(mAudioChannelAgent);
if (!OwnerDoc()->GetInnerWindow()) {
return NS_OK;
}
bool captured = OwnerDoc()->GetInnerWindow()->GetAudioCaptured();
if (captured != mAudioCapturedByWindow) {
if (captured) {
mAudioCapturedByWindow = true;
nsCOMPtr<nsPIDOMWindow> window =
do_QueryInterface(OwnerDoc()->GetParentObject());
uint64_t id = window->WindowID();
MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
if (!mPlaybackStream) {
nsRefPtr<DOMMediaStream> stream = CaptureStreamInternal(false, msg);
mCaptureStreamPort = msg->ConnectToCaptureStream(id, stream->GetStream());
} else {
mCaptureStreamPort = msg->ConnectToCaptureStream(id, mPlaybackStream->GetStream());
}
} else {
mAudioCapturedByWindow = false;
if (mDecoder) {
ProcessedMediaStream* ps =
mCaptureStreamPort->GetSource()->AsProcessedStream();
MOZ_ASSERT(ps);
for (uint32_t i = 0; i < mOutputStreams.Length(); i++) {
if (mOutputStreams[i].mStream->GetStream() == ps) {
mOutputStreams.RemoveElementAt(i);
break;
}
}
mDecoder->RemoveOutputStream(ps);
}
mCaptureStreamPort->Destroy();
mCaptureStreamPort = nullptr;
}
}
return NS_OK;
}
AudioTrackList*
HTMLMediaElement::AudioTracks()
{

View File

@ -1074,9 +1074,6 @@ protected:
// Holds a reference to a MediaInputPort connecting mSrcStream to mPlaybackStream.
nsRefPtr<MediaInputPort> mPlaybackStreamInputPort;
// Holds a reference to the stream connecting this stream to the capture sink.
nsRefPtr<MediaInputPort> mCaptureStreamPort;
// Holds a reference to a stream with mSrcStream as input but intended for
// playback. Used so we don't block playback of other video elements
// playing the same mSrcStream.
@ -1286,9 +1283,6 @@ protected:
// True if the sound is being captured.
bool mAudioCaptured;
// True if the sound is being captured by the window.
bool mAudioCapturedByWindow;
// If TRUE then the media element was actively playing before the currently
// in progress seeking. If FALSE then the media element is either not seeking
// or was not actively playing before the current seek. Used to decide whether

View File

@ -1,133 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaStreamGraphImpl.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/unused.h"
#include "AudioSegment.h"
#include "mozilla/Logging.h"
#include "mozilla/Attributes.h"
#include "AudioCaptureStream.h"
#include "ImageContainer.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioNodeExternalInputStream.h"
#include "webaudio/MediaStreamAudioDestinationNode.h"
#include <algorithm>
#include "DOMMediaStream.h"
using namespace mozilla::layers;
using namespace mozilla::dom;
using namespace mozilla::gfx;
namespace mozilla
{
// We are mixing to mono until PeerConnection can accept stereo
static const uint32_t MONO = 1;
AudioCaptureStream::AudioCaptureStream(DOMMediaStream* aWrapper)
: ProcessedMediaStream(aWrapper), mTrackCreated(false)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_COUNT_CTOR(AudioCaptureStream);
mMixer.AddCallback(this);
}
AudioCaptureStream::~AudioCaptureStream()
{
MOZ_COUNT_DTOR(AudioCaptureStream);
mMixer.RemoveCallback(this);
}
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
uint32_t inputCount = mInputs.Length();
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
// Notify the DOM everything is in order.
if (!mTrackCreated) {
for (uint32_t i = 0; i < mListeners.Length(); i++) {
MediaStreamListener* l = mListeners[i];
AudioSegment tmp;
l->NotifyQueuedTrackChanges(
Graph(), AUDIO_TRACK, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
l->NotifyFinishedTrackCreation(Graph());
}
mTrackCreated = true;
}
// If the captured stream is connected back to a object on the page (be it an
// HTMLMediaElement with a stream as source, or an AudioContext), a cycle
// situation occur. This can work if it's an AudioContext with at least one
// DelayNode, but the MSG will mute the whole cycle otherwise.
bool blocked = mFinished || mBlocked.GetAt(aFrom);
if (blocked || InMutedCycle() || inputCount == 0) {
track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
} else {
// We mix down all the tracks of all inputs, to a stereo track. Everything
// is {up,down}-mixed to stereo.
mMixer.StartMixing();
AudioSegment output;
for (uint32_t i = 0; i < inputCount; i++) {
MediaStream* s = mInputs[i]->GetSource();
StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
while (!tracks.IsEnded()) {
AudioSegment* inputSegment = tracks->Get<AudioSegment>();
StreamTime inputStart = s->GraphTimeToStreamTime(aFrom);
StreamTime inputEnd = s->GraphTimeToStreamTime(aTo);
AudioSegment toMix;
toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
// Care for streams blocked in the [aTo, aFrom] range.
if (inputEnd - inputStart < aTo - aFrom) {
toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
}
toMix.Mix(mMixer, MONO, Graph()->GraphRate());
tracks.Next();
}
}
// This calls MixerCallback below
mMixer.FinishMixing();
}
// Regardless of the status of the input tracks, we go foward.
mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime((aTo)));
}
void
AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat, uint32_t aChannels,
uint32_t aFrames, uint32_t aSampleRate)
{
nsAutoTArray<nsTArray<AudioDataValue>, MONO> output;
nsAutoTArray<const AudioDataValue*, MONO> bufferPtrs;
output.SetLength(MONO);
bufferPtrs.SetLength(MONO);
uint32_t written = 0;
// We need to copy here, because the mixer will reuse the storage, we should
// not hold onto it. Buffers are in planar format.
for (uint32_t channel = 0; channel < aChannels; channel++) {
AudioDataValue* out = output[channel].AppendElements(aFrames);
PodCopy(out, aMixedBuffer + written, aFrames);
bufferPtrs[channel] = out;
written += aFrames;
}
AudioChunk chunk;
chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
chunk.mDuration = aFrames;
chunk.mBufferFormat = aFormat;
chunk.mVolume = 1.0f;
chunk.mChannelData.SetLength(MONO);
for (uint32_t channel = 0; channel < aChannels; channel++) {
chunk.mChannelData[channel] = bufferPtrs[channel];
}
// Now we have mixed data, simply append it to out track.
EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
}
}

View File

@ -1,133 +0,0 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaStreamGraphImpl.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/unused.h"
+
+#include "AudioSegment.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Attributes.h"
+#include "AudioCaptureStream.h"
+#include "ImageContainer.h"
+#include "AudioNodeEngine.h"
+#include "AudioNodeStream.h"
+#include "AudioNodeExternalInputStream.h"
+#include "webaudio/MediaStreamAudioDestinationNode.h"
+#include <algorithm>
+#include "DOMMediaStream.h"
+
+using namespace mozilla::layers;
+using namespace mozilla::dom;
+using namespace mozilla::gfx;
+
+namespace mozilla
+{
+
+// We are mixing to mono until PeerConnection can accept stereo
+static const uint32_t MONO = 1;
+
+AudioCaptureStream::AudioCaptureStream(DOMMediaStream* aWrapper)
+ : ProcessedMediaStream(aWrapper), mTrackCreated(false)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_COUNT_CTOR(AudioCaptureStream);
+ mMixer.AddCallback(this);
+}
+
+AudioCaptureStream::~AudioCaptureStream()
+{
+ MOZ_COUNT_DTOR(AudioCaptureStream);
+ mMixer.RemoveCallback(this);
+}
+
+void
+AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
+ uint32_t aFlags)
+{
+ uint32_t inputCount = mInputs.Length();
+ StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
+ // Notify the DOM everything is in order.
+ if (!mTrackCreated) {
+ for (uint32_t i = 0; i < mListeners.Length(); i++) {
+ MediaStreamListener* l = mListeners[i];
+ AudioSegment tmp;
+ l->NotifyQueuedTrackChanges(
+ Graph(), AUDIO_TRACK, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
+ l->NotifyFinishedTrackCreation(Graph());
+ }
+ mTrackCreated = true;
+ }
+
+ // If the captured stream is connected back to a object on the page (be it an
+ // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
+ // situation occur. This can work if it's an AudioContext with at least one
+ // DelayNode, but the MSG will mute the whole cycle otherwise.
+ bool blocked = mFinished || mBlocked.GetAt(aFrom);
+ if (blocked || InMutedCycle() || inputCount == 0) {
+ track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
+ } else {
+ // We mix down all the tracks of all inputs, to a stereo track. Everything
+ // is {up,down}-mixed to stereo.
+ mMixer.StartMixing();
+ AudioSegment output;
+ for (uint32_t i = 0; i < inputCount; i++) {
+ MediaStream* s = mInputs[i]->GetSource();
+ StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
+ while (!tracks.IsEnded()) {
+ AudioSegment* inputSegment = tracks->Get<AudioSegment>();
+ StreamTime inputStart = s->GraphTimeToStreamTime(aFrom);
+ StreamTime inputEnd = s->GraphTimeToStreamTime(aTo);
+ AudioSegment toMix;
+ toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
+ // Care for streams blocked in the [aTo, aFrom] range.
+ if (inputEnd - inputStart < aTo - aFrom) {
+ toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
+ }
+ toMix.Mix(mMixer, MONO, Graph()->GraphRate());
+ tracks.Next();
+ }
+ }
+ // This calls MixerCallback below
+ mMixer.FinishMixing();
+ }
+
+ // Regardless of the status of the input tracks, we go foward.
+ mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime((aTo)));
+}
+
+void
+AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
+ AudioSampleFormat aFormat, uint32_t aChannels,
+ uint32_t aFrames, uint32_t aSampleRate)
+{
+ nsAutoTArray<nsTArray<float>, MONO> output;
+ nsAutoTArray<const float*, MONO> bufferPtrs;
+ output.SetLength(MONO);
+ bufferPtrs.SetLength(MONO);
+
+ uint32_t written = 0;
+ // We need to copy here, because the mixer will reuse the storage, we should
+ // not hold onto it. Buffers are in planar format.
+ for (uint32_t channel = 0; channel < aChannels; channel++) {
+ float* out = output[channel].AppendElements(aFrames);
+ PodCopy(out, aMixedBuffer + written, aFrames);
+ bufferPtrs[channel] = out;
+ written += aFrames;
+ }
+ AudioChunk chunk;
+ chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<float>(&output);
+ chunk.mDuration = aFrames;
+ chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32;
+ chunk.mVolume = 1.0f;
+ chunk.mChannelData.SetLength(MONO);
+ for (uint32_t channel = 0; channel < aChannels; channel++) {
+ chunk.mChannelData[channel] = bufferPtrs[channel];
+ }
+
+ // Now we have mixed data, simply append it to out track.
+ EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
+}
+}

View File

@ -1,43 +0,0 @@
--- AudioCaptureStream.h
+++ AudioCaptureStream.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MOZILLA_AUDIOCAPTURESTREAM_H_
+#define MOZILLA_AUDIOCAPTURESTREAM_H_
+
+#include "MediaStreamGraph.h"
+#include "AudioMixer.h"
+#include <algorithm>
+
+namespace mozilla
+{
+
+class DOMMediaStream;
+
+/**
+ * See MediaStreamGraph::CreateAudioCaptureStream.
+ */
+class AudioCaptureStream : public ProcessedMediaStream,
+ public MixerCallbackReceiver
+{
+public:
+ explicit AudioCaptureStream(DOMMediaStream* aWrapper);
+ virtual ~AudioCaptureStream();
+
+ void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
+
+protected:
+ enum { AUDIO_TRACK = 1 };
+ void MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat,
+ uint32_t aChannels, uint32_t aFrames,
+ uint32_t aSampleRate) override;
+ AudioMixer mMixer;
+ bool mTrackCreated;
+};
+}
+
+#endif /* MOZILLA_AUDIOCAPTURESTREAM_H_ */

View File

@ -4,11 +4,26 @@
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioChannelFormat.h"
#include "nsTArray.h"
#include <algorithm>
namespace mozilla {
enum {
SURROUND_L,
SURROUND_R,
SURROUND_C,
SURROUND_LFE,
SURROUND_SL,
SURROUND_SR
};
static const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
static const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
static const float IGNORE_F = 0.0f;
uint32_t
GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2)
{
@ -48,6 +63,9 @@ gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{ { 0, 1, 2, 3, 4, IGNORE } }
};
static const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
{ 0, 5, 9, 12, 14 };
void
AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
@ -58,8 +76,8 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
NS_ASSERTION(outputChannelCount > inputChannelCount,
"No up-mix needed");
MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
NS_ASSERTION(inputChannelCount > 0, "Bad number of channels");
NS_ASSERTION(outputChannelCount > 0, "Bad number of channels");
aChannelArray->SetLength(outputChannelCount);
@ -90,4 +108,94 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
}
}
/**
* DownMixMatrix represents a conversion matrix efficiently by exploiting the
* fact that each input channel contributes to at most one output channel,
* except possibly for the C input channel in layouts that have one. Also,
* every input channel is multiplied by the same coefficient for every output
* channel it contributes to.
*/
struct DownMixMatrix {
// Every input channel c is copied to output channel mInputDestination[c]
// after multiplying by mInputCoefficient[c].
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
// If not IGNORE, then the C channel is copied to this output channel after
// multiplying by its coefficient.
uint8_t mCExtraDestination;
float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
};
static const DownMixMatrix
gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Downmixes to mono
{ { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
{ { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
// Downmixes to stereo
{ { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
{ { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
// Downmixes to 3-channel
{ { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
// Downmixes to quad
{ { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
// Downmixes to 5-channel
{ { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
};
void
AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
float** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
{
uint32_t inputChannelCount = aChannelArray.Length();
const void* const* inputChannels = aChannelArray.Elements();
NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
if (inputChannelCount > 6) {
// Just drop the unknown channels.
for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(float));
}
return;
}
// Ignore unknown channels, they're just dropped.
inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
const DownMixMatrix& m = gDownMixMatrices[
gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
inputChannelCount - aOutputChannelCount - 1];
// This is slow, but general. We can define custom code for special
// cases later.
for (uint32_t s = 0; s < aDuration; ++s) {
// Reserve an extra junk channel at the end for the cases where we
// want an input channel to contribute to nothing
float outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
memset(outputChannels, 0, sizeof(float)*(CUSTOM_CHANNEL_LAYOUTS));
for (uint32_t c = 0; c < inputChannelCount; ++c) {
outputChannels[m.mInputDestination[c]] +=
m.mInputCoefficient[c]*(static_cast<const float*>(inputChannels[c]))[s];
}
// Utilize the fact that in every layout, C is the third channel.
if (m.mCExtraDestination != IGNORE) {
outputChannels[m.mCExtraDestination] +=
m.mInputCoefficient[SURROUND_C]*(static_cast<const float*>(inputChannels[SURROUND_C]))[s];
}
for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
aOutputChannels[c][s] = outputChannels[c];
}
}
}
} // namespace mozilla

View File

@ -9,8 +9,6 @@
#include <stdint.h>
#include "nsTArrayForwardDeclare.h"
#include "AudioSampleFormat.h"
#include "nsTArray.h"
namespace mozilla {
@ -31,26 +29,6 @@ namespace mozilla {
* Only 1, 2, 4 and 6 are currently defined in Web Audio.
*/
enum {
SURROUND_L,
SURROUND_R,
SURROUND_C,
SURROUND_LFE,
SURROUND_SL,
SURROUND_SR
};
const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
// This is defined by some Windows SDK header.
#undef IGNORE
const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
const float IGNORE_F = 0.0f;
const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
{ 0, 5, 9, 12, 14 };
/**
* Return a channel count whose channel layout includes all the channels from
* aChannels1 and aChannels2.
@ -75,102 +53,19 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
const void* aZeroChannel);
/**
* DownMixMatrix represents a conversion matrix efficiently by exploiting the
* fact that each input channel contributes to at most one output channel,
* except possibly for the C input channel in layouts that have one. Also,
* every input channel is multiplied by the same coefficient for every output
* channel it contributes to.
* Given an array of input channels (which must be float format!),
* downmix to aOutputChannelCount, and copy the results to the
* channel buffers in aOutputChannels.
* Don't call this with input count <= output count.
*/
struct DownMixMatrix {
// Every input channel c is copied to output channel mInputDestination[c]
// after multiplying by mInputCoefficient[c].
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
// If not IGNORE, then the C channel is copied to this output channel after
// multiplying by its coefficient.
uint8_t mCExtraDestination;
float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
};
static const DownMixMatrix
gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Downmixes to mono
{ { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
{ { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
// Downmixes to stereo
{ { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
{ { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
// Downmixes to 3-channel
{ { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
// Downmixes to quad
{ { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
// Downmixes to 5-channel
{ { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
};
/**
* Given an array of input channels, downmix to aOutputChannelCount, and copy
* the results to the channel buffers in aOutputChannels. Don't call this with
* input count <= output count.
*/
template<typename T>
void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
T** aOutputChannels,
void
AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
float** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
{
uint32_t inputChannelCount = aChannelArray.Length();
const void* const* inputChannels = aChannelArray.Elements();
NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
if (inputChannelCount > 6) {
// Just drop the unknown channels.
for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(T));
}
return;
}
// Ignore unknown channels, they're just dropped.
inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
const DownMixMatrix& m = gDownMixMatrices[
gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
inputChannelCount - aOutputChannelCount - 1];
// This is slow, but general. We can define custom code for special
// cases later.
for (uint32_t s = 0; s < aDuration; ++s) {
// Reserve an extra junk channel at the end for the cases where we
// want an input channel to contribute to nothing
T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
memset(outputChannels, 0, sizeof(T)*(CUSTOM_CHANNEL_LAYOUTS));
for (uint32_t c = 0; c < inputChannelCount; ++c) {
outputChannels[m.mInputDestination[c]] +=
m.mInputCoefficient[c]*(static_cast<const T*>(inputChannels[c]))[s];
}
// Utilize the fact that in every layout, C is the third channel.
if (m.mCExtraDestination != IGNORE) {
outputChannels[m.mCExtraDestination] +=
m.mInputCoefficient[SURROUND_C]*(static_cast<const T*>(inputChannels[SURROUND_C]))[s];
}
for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
aOutputChannels[c][s] = outputChannels[c];
}
}
}
uint32_t aDuration);
// A version of AudioChannelsDownMix that downmixes int16_ts may be required.
} // namespace mozilla

View File

@ -26,9 +26,7 @@ struct MixerCallbackReceiver {
* stream.
*
* AudioMixer::Mix is to be called repeatedly with buffers that have the same
* length, sample rate, sample format and channel count. This class works with
* interleaved and plannar buffers, but the buffer mixed must be of the same
* type during a mixing cycle.
* length, sample rate, sample format and channel count.
*
* When all the tracks have been mixed, calling FinishMixing will call back with
* a buffer containing the mixed audio data.
@ -73,7 +71,7 @@ public:
mSampleRate = mChannels = mFrames = 0;
}
/* Add a buffer to the mix. */
/* Add a buffer to the mix. aSamples is interleaved. */
void Mix(AudioDataValue* aSamples,
uint32_t aChannels,
uint32_t aFrames,

View File

@ -146,103 +146,6 @@ void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInR
}
}
// This helps to to safely get a pointer to the position we want to start
// writing a planar audio buffer, depending on the channel and the offset in the
// buffer.
static AudioDataValue*
PointerForOffsetInChannel(AudioDataValue* aData, size_t aLengthSamples,
uint32_t aChannelCount, uint32_t aChannel,
uint32_t aOffsetSamples)
{
size_t samplesPerChannel = aLengthSamples / aChannelCount;
size_t beginningOfChannel = samplesPerChannel * aChannel;
MOZ_ASSERT(aChannel * samplesPerChannel + aOffsetSamples < aLengthSamples,
"Offset request out of bounds.");
return aData + beginningOfChannel + aOffsetSamples;
}
void
AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
uint32_t aSampleRate)
{
nsAutoTArray<AudioDataValue, AUDIO_PROCESSING_FRAMES* GUESS_AUDIO_CHANNELS>
buf;
nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channelData;
uint32_t offsetSamples = 0;
uint32_t duration = GetDuration();
if (duration <= 0) {
MOZ_ASSERT(duration == 0);
return;
}
uint32_t outBufferLength = duration * aOutputChannels;
buf.SetLength(outBufferLength);
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
AudioChunk& c = *ci;
uint32_t frames = c.mDuration;
// If the chunk is silent, simply write the right number of silence in the
// buffers.
if (c.mBufferFormat == AUDIO_FORMAT_SILENCE) {
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodZero(ptr, frames);
}
} else {
// Othewise, we need to upmix or downmix appropriately, depending on the
// desired input and output channels.
channelData.SetLength(c.mChannelData.Length());
for (uint32_t i = 0; i < channelData.Length(); ++i) {
channelData[i] = c.mChannelData[i];
}
if (channelData.Length() < aOutputChannels) {
// Up-mix.
AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
frames);
}
MOZ_ASSERT(channelData.Length() == aOutputChannels);
} else if (channelData.Length() > aOutputChannels) {
// Down mix.
nsAutoTArray<AudioDataValue*, GUESS_AUDIO_CHANNELS> outChannelPtrs;
outChannelPtrs.SetLength(aOutputChannels);
uint32_t offsetSamples = 0;
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
outChannelPtrs[channel] =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
}
AudioChannelsDownMix(channelData, outChannelPtrs.Elements(),
aOutputChannels, frames);
} else {
// The channel count is already what we want, just copy it over.
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
frames);
}
}
}
offsetSamples += frames;
}
if (offsetSamples) {
MOZ_ASSERT(offsetSamples == outBufferLength / aOutputChannels,
"We forgot to write some samples?");
aMixer.Mix(buf.Elements(), aOutputChannels, offsetSamples, aSampleRate);
}
}
void
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
{

View File

@ -299,14 +299,7 @@ public:
return chunk;
}
void ApplyVolume(float aVolume);
// Mix the segment into a mixer, interleaved. This is useful to output a
// segment to a system audio callback. It up or down mixes to aChannelCount
// channels.
void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount,
uint32_t aSampleRate);
// Mix the segment into a mixer, keeping it planar, up or down mixing to
// aChannelCount channels.
void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
int ChannelCount() {
NS_WARN_IF_FALSE(!mChunks.IsEmpty(),

View File

@ -301,18 +301,6 @@ DOMMediaStream::InitTrackUnionStream(nsIDOMWindow* aWindow,
InitStreamCommon(aGraph->CreateTrackUnionStream(this));
}
void
DOMMediaStream::InitAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
mWindow = aWindow;
if (!aGraph) {
aGraph = MediaStreamGraph::GetInstance();
}
InitStreamCommon(aGraph->CreateAudioCaptureStream(this));
}
void
DOMMediaStream::InitStreamCommon(MediaStream* aStream)
{
@ -341,15 +329,6 @@ DOMMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
return stream.forget();
}
already_AddRefed<DOMMediaStream>
DOMMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
nsRefPtr<DOMMediaStream> stream = new DOMMediaStream();
stream->InitAudioCaptureStream(aWindow, aGraph);
return stream.forget();
}
void
DOMMediaStream::SetTrackEnabled(TrackID aTrackID, bool aEnabled)
{
@ -674,15 +653,6 @@ DOMLocalMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
return stream.forget();
}
already_AddRefed<DOMLocalMediaStream>
DOMLocalMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
nsRefPtr<DOMLocalMediaStream> stream = new DOMLocalMediaStream();
stream->InitAudioCaptureStream(aWindow, aGraph);
return stream.forget();
}
DOMAudioNodeMediaStream::DOMAudioNodeMediaStream(AudioNode* aNode)
: mStreamNode(aNode)
{

View File

@ -198,13 +198,6 @@ public:
static already_AddRefed<DOMMediaStream> CreateTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
/**
* Create an nsDOMMediaStream whose underlying stream is an
* AudioCaptureStream
*/
static already_AddRefed<DOMMediaStream> CreateAudioCaptureStream(
nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
void SetLogicalStreamStartTime(StreamTime aTime)
{
mLogicalStreamStartTime = aTime;
@ -268,8 +261,6 @@ protected:
MediaStreamGraph* aGraph = nullptr);
void InitTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
void InitAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
void InitStreamCommon(MediaStream* aStream);
already_AddRefed<AudioTrack> CreateAudioTrack(AudioStreamTrack* aStreamTrack);
already_AddRefed<VideoTrack> CreateVideoTrack(VideoStreamTrack* aStreamTrack);
@ -360,12 +351,6 @@ public:
CreateTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
/**
* Create an nsDOMLocalMediaStream whose underlying stream is an
* AudioCaptureStream. */
static already_AddRefed<DOMLocalMediaStream> CreateAudioCaptureStream(
nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
protected:
virtual ~DOMLocalMediaStream();
};

View File

@ -289,14 +289,6 @@ DecodedStream::OutputStreams()
return mOutputStreams;
}
bool
DecodedStream::HasConsumers() const
{
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
return mOutputStreams.IsEmpty();
}
ReentrantMonitor&
DecodedStream::GetReentrantMonitor() const
{

View File

@ -114,7 +114,6 @@ public:
int64_t AudioEndTime() const;
int64_t GetPosition() const;
bool IsFinished() const;
bool HasConsumers() const;
// Return true if stream is finished.
bool SendData(double aVolume, bool aIsSameOrigin);

View File

@ -326,13 +326,6 @@ void MediaDecoder::AddOutputStream(ProcessedMediaStream* aStream,
mDecoderStateMachine->AddOutputStream(aStream, aFinishWhenEnded);
}
void MediaDecoder::RemoveOutputStream(MediaStream* aStream)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
mDecoderStateMachine->RemoveOutputStream(aStream);
}
double MediaDecoder::GetDuration()
{
MOZ_ASSERT(NS_IsMainThread());

View File

@ -399,8 +399,6 @@ public:
// The stream is initially blocked. The decoder is responsible for unblocking
// it while it is playing back.
virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
virtual void RemoveOutputStream(MediaStream* aStream);
// Return the duration of the video in seconds.
virtual double GetDuration();

View File

@ -3177,25 +3177,6 @@ void MediaDecoderStateMachine::DispatchAudioCaptured()
OwnerThread()->Dispatch(r.forget());
}
void MediaDecoderStateMachine::DispatchAudioUncaptured()
{
nsRefPtr<MediaDecoderStateMachine> self = this;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () -> void
{
MOZ_ASSERT(self->OnTaskQueue());
ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
if (self->mAudioCaptured) {
// Start again the audio sink
self->mAudioCaptured = false;
if (self->IsPlaying()) {
self->StartAudioThread();
}
self->ScheduleStateMachine();
}
});
OwnerThread()->Dispatch(r.forget());
}
void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
bool aFinishWhenEnded)
{
@ -3205,16 +3186,6 @@ void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
DispatchAudioCaptured();
}
void MediaDecoderStateMachine::RemoveOutputStream(MediaStream* aStream)
{
MOZ_ASSERT(NS_IsMainThread());
DECODER_LOG("RemoveOutputStream=%p!", aStream);
mDecodedStream->Remove(aStream);
if (!mDecodedStream->HasConsumers()) {
DispatchAudioUncaptured();
}
}
} // namespace mozilla
// avoid redefined macro in unified build

View File

@ -149,8 +149,6 @@ public:
};
void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
void RemoveOutputStream(MediaStream* aStream);
// Set/Unset dormant state.
void SetDormant(bool aDormant);
@ -162,7 +160,6 @@ private:
void InitializationTask();
void DispatchAudioCaptured();
void DispatchAudioUncaptured();
void Shutdown();
public:

View File

@ -300,8 +300,7 @@ protected:
NS_IMPL_ISUPPORTS(MediaDevice, nsIMediaDevice)
MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
: mMediaSource(aSource->GetMediaSource())
, mSource(aSource)
: mSource(aSource)
, mIsVideo(aIsVideo)
{
mSource->GetName(mName);
@ -312,7 +311,9 @@ MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
VideoDevice::VideoDevice(MediaEngineVideoSource* aSource)
: MediaDevice(aSource, true)
{}
{
mMediaSource = aSource->GetMediaSource();
}
/**
* Helper functions that implement the constraints algorithm from
@ -438,8 +439,6 @@ MediaDevice::GetMediaSource(nsAString& aMediaSource)
{
if (mMediaSource == dom::MediaSourceEnum::Microphone) {
aMediaSource.Assign(NS_LITERAL_STRING("microphone"));
} else if (mMediaSource == dom::MediaSourceEnum::AudioCapture) {
aMediaSource.Assign(NS_LITERAL_STRING("audioCapture"));
} else if (mMediaSource == dom::MediaSourceEnum::Window) { // this will go away
aMediaSource.Assign(NS_LITERAL_STRING("window"));
} else { // all the rest are shared
@ -785,55 +784,11 @@ public:
}
}
#endif
MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
nsRefPtr<SourceMediaStream> stream = msg->CreateSourceStream(nullptr);
nsRefPtr<DOMLocalMediaStream> domStream;
// AudioCapture is a special case, here, in the sense that we're not really
// using the audio source and the SourceMediaStream, which acts as
// placeholders. We re-route a number of stream internaly in the MSG and mix
// them down instead.
if (mAudioSource &&
mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
domStream = DOMLocalMediaStream::CreateAudioCaptureStream(window);
// It should be possible to pipe the capture stream to anything. CORS is
// not a problem here, we got explicit user content.
domStream->SetPrincipal(window->GetExtantDoc()->NodePrincipal());
msg->RegisterCaptureStreamForWindow(
mWindowID, domStream->GetStream()->AsProcessedStream());
window->SetAudioCapture(true);
} else {
// Normal case, connect the source stream to the track union stream to
// avoid us blocking
nsRefPtr<nsDOMUserMediaStream> trackunion =
nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
mAudioSource, mVideoSource);
trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
trackunion->mSourceStream = stream;
trackunion->mPort = port.forget();
// Log the relationship between SourceMediaStream and TrackUnion stream
// Make sure logger starts before capture
AsyncLatencyLogger::Get(true);
LogLatency(AsyncLatencyLogger::MediaStreamCreate,
reinterpret_cast<uint64_t>(stream.get()),
reinterpret_cast<int64_t>(trackunion->GetStream()));
nsCOMPtr<nsIPrincipal> principal;
if (mPeerIdentity) {
principal = nsNullPrincipal::Create();
trackunion->SetPeerIdentity(mPeerIdentity.forget());
} else {
principal = window->GetExtantDoc()->NodePrincipal();
}
trackunion->CombineWithPrincipal(principal);
domStream = trackunion.forget();
}
if (!domStream || sInShutdown) {
// Create a media stream.
nsRefPtr<nsDOMUserMediaStream> trackunion =
nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
mAudioSource, mVideoSource);
if (!trackunion || sInShutdown) {
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onFailure = mOnFailure.forget();
LOG(("Returning error for getUserMedia() - no stream"));
@ -847,6 +802,36 @@ public:
}
return NS_OK;
}
trackunion->AudioConfig(aec_on, (uint32_t) aec,
agc_on, (uint32_t) agc,
noise_on, (uint32_t) noise,
playout_delay);
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
// connect the source stream to the track union stream to avoid us blocking
trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
trackunion->mSourceStream = stream;
trackunion->mPort = port.forget();
// Log the relationship between SourceMediaStream and TrackUnion stream
// Make sure logger starts before capture
AsyncLatencyLogger::Get(true);
LogLatency(AsyncLatencyLogger::MediaStreamCreate,
reinterpret_cast<uint64_t>(stream.get()),
reinterpret_cast<int64_t>(trackunion->GetStream()));
nsCOMPtr<nsIPrincipal> principal;
if (mPeerIdentity) {
principal = nsNullPrincipal::Create();
trackunion->SetPeerIdentity(mPeerIdentity.forget());
} else {
principal = window->GetExtantDoc()->NodePrincipal();
}
trackunion->CombineWithPrincipal(principal);
// The listener was added at the beginning in an inactive state.
// Activate our listener. We'll call Start() on the source when get a callback
@ -856,7 +841,7 @@ public:
// Note: includes JS callbacks; must be released on MainThread
TracksAvailableCallback* tracksAvailableCallback =
new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, domStream);
new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, trackunion);
mListener->AudioConfig(aec_on, (uint32_t) aec,
agc_on, (uint32_t) agc,
@ -867,11 +852,11 @@ public:
// because that can take a while.
// Pass ownership of trackunion to the MediaOperationTask
// to ensure it's kept alive until the MediaOperationTask runs (at least).
MediaManager::PostTask(
FROM_HERE, new MediaOperationTask(MEDIA_START, mListener, domStream,
tracksAvailableCallback, mAudioSource,
mVideoSource, false, mWindowID,
mOnFailure.forget()));
MediaManager::PostTask(FROM_HERE,
new MediaOperationTask(MEDIA_START, mListener, trackunion,
tracksAvailableCallback,
mAudioSource, mVideoSource, false, mWindowID,
mOnFailure.forget()));
// We won't need mOnFailure now.
mOnFailure = nullptr;
@ -1260,9 +1245,7 @@ static auto& MediaManager_AnonymizeDevices = MediaManager::AnonymizeDevices;
*/
already_AddRefed<MediaManager::PledgeSourceSet>
MediaManager::EnumerateRawDevices(uint64_t aWindowId,
MediaSourceEnum aVideoType,
MediaSourceEnum aAudioType,
MediaManager::EnumerateRawDevices(uint64_t aWindowId, MediaSourceEnum aVideoType,
bool aFake, bool aFakeTracks)
{
MOZ_ASSERT(NS_IsMainThread());
@ -1292,8 +1275,7 @@ MediaManager::EnumerateRawDevices(uint64_t aWindowId,
MediaManager::PostTask(FROM_HERE, NewTaskFrom([id, aWindowId, audioLoopDev,
videoLoopDev, aVideoType,
aAudioType, aFake,
aFakeTracks]() mutable {
aFake, aFakeTracks]() mutable {
nsRefPtr<MediaEngine> backend;
if (aFake) {
backend = new MediaEngineDefault(aFakeTracks);
@ -1312,7 +1294,7 @@ MediaManager::EnumerateRawDevices(uint64_t aWindowId,
}
nsTArray<nsRefPtr<AudioDevice>> audios;
GetSources(backend, aAudioType,
GetSources(backend, dom::MediaSourceEnum::Microphone,
&MediaEngine::EnumerateAudioDevices, audios, audioLoopDev);
for (auto& source : audios) {
result->AppendElement(source);
@ -1634,7 +1616,6 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
}
MediaSourceEnum videoType = dom::MediaSourceEnum::Camera;
MediaSourceEnum audioType = dom::MediaSourceEnum::Microphone;
if (c.mVideo.IsMediaTrackConstraints()) {
auto& vc = c.mVideo.GetAsMediaTrackConstraints();
@ -1723,23 +1704,6 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
privileged = false;
}
}
if (c.mAudio.IsMediaTrackConstraints()) {
auto& ac = c.mAudio.GetAsMediaTrackConstraints();
audioType = StringToEnum(dom::MediaSourceEnumValues::strings,
ac.mMediaSource,
audioType);
// Only enable AudioCapture if the pref is enabled. If it's not, we can deny
// right away.
if (audioType == dom::MediaSourceEnum::AudioCapture &&
!Preferences::GetBool("media.getusermedia.audiocapture.enabled")) {
nsRefPtr<MediaStreamError> error =
new MediaStreamError(aWindow,
NS_LITERAL_STRING("PermissionDeniedError"));
onFailure->OnError(error);
return NS_OK;
}
}
StreamListeners* listeners = AddWindowID(windowID);
// Create a disabled listener to act as a placeholder
@ -1802,8 +1766,7 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
(!fake || Preferences::GetBool("media.navigator.permission.fake"));
nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowID, videoType,
audioType, fake,
fakeTracks);
fake, fakeTracks);
p->Then([this, onSuccess, onFailure, windowID, c, listener, askPermission,
prefs, isHTTPS, callID, origin](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
@ -1959,9 +1922,7 @@ MediaManager::ToJSArray(SourceSet& aDevices)
}
already_AddRefed<MediaManager::PledgeSourceSet>
MediaManager::EnumerateDevicesImpl(uint64_t aWindowId,
MediaSourceEnum aVideoType,
MediaSourceEnum aAudioType,
MediaManager::EnumerateDevicesImpl(uint64_t aWindowId, MediaSourceEnum aVideoType,
bool aFake, bool aFakeTracks)
{
MOZ_ASSERT(NS_IsMainThread());
@ -1990,13 +1951,12 @@ MediaManager::EnumerateDevicesImpl(uint64_t aWindowId,
nsRefPtr<Pledge<nsCString>> p = media::GetOriginKey(origin, privateBrowsing,
persist);
p->Then([id, aWindowId, aVideoType, aAudioType,
p->Then([id, aWindowId, aVideoType,
aFake, aFakeTracks](const nsCString& aOriginKey) mutable {
MOZ_ASSERT(NS_IsMainThread());
nsRefPtr<MediaManager> mgr = MediaManager_GetInstance();
nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId,
aVideoType, aAudioType,
nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId, aVideoType,
aFake, aFakeTracks);
p->Then([id, aWindowId, aOriginKey](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // secondary result
@ -2035,7 +1995,6 @@ MediaManager::EnumerateDevices(nsPIDOMWindow* aWindow,
nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowId,
dom::MediaSourceEnum::Camera,
dom::MediaSourceEnum::Microphone,
fake);
p->Then([onSuccess](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
@ -2116,7 +2075,7 @@ StopSharingCallback(MediaManager *aThis,
listener->Invalidate();
}
listener->Remove();
listener->StopSharing();
listener->StopScreenWindowSharing();
}
aListeners->Clear();
aThis->RemoveWindowID(aWindowID);
@ -2439,7 +2398,7 @@ MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
uint64_t windowID = PromiseFlatString(Substring(data, strlen("screen:"))).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
if (NS_SUCCEEDED(rv)) {
LOG(("Revoking Screen/windowCapture access for window %llu", windowID));
LOG(("Revoking Screeen/windowCapture access for window %llu", windowID));
StopScreensharing(windowID);
}
} else {
@ -2620,7 +2579,7 @@ StopScreensharingCallback(MediaManager *aThis,
if (aListeners) {
auto length = aListeners->Length();
for (size_t i = 0; i < length; ++i) {
aListeners->ElementAt(i)->StopSharing();
aListeners->ElementAt(i)->StopScreenWindowSharing();
}
}
}
@ -2782,7 +2741,7 @@ GetUserMediaCallbackMediaStreamListener::Invalidate()
// Doesn't kill audio
// XXX refactor to combine with Invalidate()?
void
GetUserMediaCallbackMediaStreamListener::StopSharing()
GetUserMediaCallbackMediaStreamListener::StopScreenWindowSharing()
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
if (mVideoSource && !mStopped &&
@ -2795,13 +2754,6 @@ GetUserMediaCallbackMediaStreamListener::StopSharing()
this, nullptr, nullptr,
nullptr, mVideoSource,
mFinished, mWindowID, nullptr));
} else if (mAudioSource &&
mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
nsCOMPtr<nsPIDOMWindow> window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
MOZ_ASSERT(window);
window->SetAudioCapture(false);
MediaStreamGraph::GetInstance()->UnregisterCaptureStreamForWindow(mWindowID);
mStream->Destroy();
}
}

View File

@ -103,7 +103,7 @@ public:
return mStream->AsSourceStream();
}
void StopSharing();
void StopScreenWindowSharing();
void StopTrack(TrackID aID, bool aIsAudio);
@ -597,14 +597,10 @@ public: // TODO: make private once we upgrade to GCC 4.8+ on linux.
static already_AddRefed<nsIWritableVariant> ToJSArray(SourceSet& aDevices);
private:
already_AddRefed<PledgeSourceSet>
EnumerateRawDevices(uint64_t aWindowId,
dom::MediaSourceEnum aVideoType,
dom::MediaSourceEnum aAudioType,
EnumerateRawDevices(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
bool aFake, bool aFakeTracks);
already_AddRefed<PledgeSourceSet>
EnumerateDevicesImpl(uint64_t aWindowId,
dom::MediaSourceEnum aVideoSrcType,
dom::MediaSourceEnum aAudioSrcType,
EnumerateDevicesImpl(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
bool aFake = false, bool aFakeTracks = false);
StreamListeners* AddWindowID(uint64_t aWindowId);

View File

@ -18,7 +18,6 @@
#include "mozilla/Attributes.h"
#include "TrackUnionStream.h"
#include "ImageContainer.h"
#include "AudioCaptureStream.h"
#include "AudioChannelService.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
@ -3193,17 +3192,6 @@ MediaStreamGraph::CreateTrackUnionStream(DOMMediaStream* aWrapper)
return stream;
}
ProcessedMediaStream*
MediaStreamGraph::CreateAudioCaptureStream(DOMMediaStream* aWrapper)
{
AudioCaptureStream* stream = new AudioCaptureStream(aWrapper);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
graph->AppendMessage(new CreateMessage(stream));
return stream;
}
AudioNodeExternalInputStream*
MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
{
@ -3568,65 +3556,4 @@ ProcessedMediaStream::AddInput(MediaInputPort* aPort)
GraphImpl()->SetStreamOrderDirty();
}
void
MediaStreamGraph::RegisterCaptureStreamForWindow(
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
{
MOZ_ASSERT(NS_IsMainThread());
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->RegisterCaptureStreamForWindow(aWindowId, aCaptureStream);
}
void
MediaStreamGraphImpl::RegisterCaptureStreamForWindow(
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
{
MOZ_ASSERT(NS_IsMainThread());
WindowAndStream winAndStream;
winAndStream.mWindowId = aWindowId;
winAndStream.mCaptureStreamSink = aCaptureStream;
mWindowCaptureStreams.AppendElement(winAndStream);
}
void
MediaStreamGraph::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
{
MOZ_ASSERT(NS_IsMainThread());
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->UnregisterCaptureStreamForWindow(aWindowId);
}
void
MediaStreamGraphImpl::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
{
MOZ_ASSERT(NS_IsMainThread());
for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
mWindowCaptureStreams.RemoveElementAt(i);
}
}
}
already_AddRefed<MediaInputPort>
MediaStreamGraph::ConnectToCaptureStream(uint64_t aWindowId,
MediaStream* aMediaStream)
{
return aMediaStream->GraphImpl()->ConnectToCaptureStream(aWindowId,
aMediaStream);
}
already_AddRefed<MediaInputPort>
MediaStreamGraphImpl::ConnectToCaptureStream(uint64_t aWindowId,
MediaStream* aMediaStream)
{
MOZ_ASSERT(NS_IsMainThread());
for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
ProcessedMediaStream* sink = mWindowCaptureStreams[i].mCaptureStreamSink;
return sink->AllocateInputPort(aMediaStream, 0);
}
}
return nullptr;
}
} // namespace mozilla

View File

@ -1262,10 +1262,6 @@ public:
* particular tracks of each input stream.
*/
ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
/**
* Create a stream that will mix all its audio input.
*/
ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
// Internal AudioNodeStreams can only pass their output to another
// AudioNode, whereas external AudioNodeStreams can pass their output
// to an nsAudioStream for playback.
@ -1322,12 +1318,6 @@ public:
*/
TrackRate GraphRate() const { return mSampleRate; }
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
ProcessedMediaStream* aCaptureStream);
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
already_AddRefed<MediaInputPort> ConnectToCaptureStream(
uint64_t aWindowId, MediaStream* aMediaStream);
protected:
explicit MediaStreamGraph(TrackRate aSampleRate)
: mNextGraphUpdateIndex(1)

View File

@ -532,13 +532,6 @@ public:
}
}
// Capture Stream API. This allows to get a mixed-down output for a window.
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
ProcessedMediaStream* aCaptureStream);
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
already_AddRefed<MediaInputPort>
ConnectToCaptureStream(uint64_t aWindowId, MediaStream* aMediaStream);
// Data members
//
/**
@ -762,16 +755,6 @@ private:
* Used to pass memory report information across threads.
*/
nsTArray<AudioNodeSizes> mAudioStreamSizes;
struct WindowAndStream
{
uint64_t mWindowId;
nsRefPtr<ProcessedMediaStream> mCaptureStreamSink;
};
/**
* Stream for window audio capture.
*/
nsTArray<WindowAndStream> mWindowCaptureStreams;
/**
* Indicates that the MSG thread should gather data for a memory report.
*/

View File

@ -196,7 +196,6 @@ EXPORTS.mozilla.dom += [
UNIFIED_SOURCES += [
'AbstractThread.cpp',
'AudioCaptureStream.cpp',
'AudioChannelFormat.cpp',
'AudioCompactor.cpp',
'AudioSegment.cpp',

View File

@ -20,114 +20,6 @@ try {
FAKE_ENABLED = true;
}
/**
* This class provides helpers around analysing the audio content in a stream
* using WebAudio AnalyserNodes.
*
* @constructor
* @param {object} stream
* A MediaStream object whose audio track we shall analyse.
*/
function AudioStreamAnalyser(ac, stream) {
if (stream.getAudioTracks().length === 0) {
throw new Error("No audio track in stream");
}
this.audioContext = ac;
this.stream = stream;
this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
this.analyser = this.audioContext.createAnalyser();
this.sourceNode.connect(this.analyser);
this.data = new Uint8Array(this.analyser.frequencyBinCount);
}
AudioStreamAnalyser.prototype = {
/**
* Get an array of frequency domain data for our stream's audio track.
*
* @returns {array} A Uint8Array containing the frequency domain data.
*/
getByteFrequencyData: function() {
this.analyser.getByteFrequencyData(this.data);
return this.data;
},
/**
* Append a canvas to the DOM where the frequency data are drawn.
* Useful to debug tests.
*/
enableDebugCanvas: function() {
var cvs = document.createElement("canvas");
document.getElementById("content").appendChild(cvs);
// Easy: 1px per bin
cvs.width = this.analyser.frequencyBinCount;
cvs.height = 256;
cvs.style.border = "1px solid red";
var c = cvs.getContext('2d');
var self = this;
function render() {
c.clearRect(0, 0, cvs.width, cvs.height);
var array = self.getByteFrequencyData();
for (var i = 0; i < array.length; i++) {
c.fillRect(i, (256 - (array[i])), 1, 256);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
},
/**
* Return a Promise, that will be resolved when the function passed as
* argument, when called, returns true (meaning the analysis was a
* success).
*
* @param {function} analysisFunction
* A fonction that performs an analysis, and returns true if the
* analysis was a success (i.e. it found what it was looking for)
*/
waitForAnalysisSuccess: function(analysisFunction) {
var self = this;
return new Promise((resolve, reject) => {
function analysisLoop() {
var success = analysisFunction(self.getByteFrequencyData());
if (success) {
resolve();
return;
}
// else, we need more time
requestAnimationFrame(analysisLoop);
}
analysisLoop();
});
},
/**
* Return the FFT bin index for a given frequency.
*
* @param {double} frequency
* The frequency for whicht to return the bin number.
* @returns {integer} the index of the bin in the FFT array.
*/
binIndexForFrequency: function(frequency) {
return 1 + Math.round(frequency *
this.analyser.fftSize /
this.audioContext.sampleRate);
},
/**
* Reverse operation, get the frequency for a bin index.
*
* @param {integer} index an index in an FFT array
* @returns {double} the frequency for this bin
*/
frequencyForBinIndex: function(index) {
return (index - 1) *
this.audioContext.sampleRate /
this.analyser.fftSize;
}
};
/**
* Create the necessary HTML elements for head and body as used by Mochitests
@ -244,10 +136,7 @@ function setupEnvironment() {
['media.navigator.permission.disabled', true],
['media.navigator.streams.fake', FAKE_ENABLED],
['media.getusermedia.screensharing.enabled', true],
['media.getusermedia.screensharing.allowed_domains', "mochi.test"],
['media.getusermedia.audiocapture.enabled', true],
['media.useAudioChannelService', true],
['media.recorder.audio_node.enabled', true]
['media.getusermedia.screensharing.allowed_domains', "mochi.test"]
]
}, setTestOptions);

View File

@ -30,7 +30,6 @@ skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g emulator seems to be to
[test_dataChannel_noOffer.html]
[test_enumerateDevices.html]
skip-if = buildapp == 'mulet'
[test_getUserMedia_audioCapture.html]
[test_getUserMedia_basicAudio.html]
skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
[test_getUserMedia_basicVideo.html]

View File

@ -642,6 +642,39 @@ DataChannelWrapper.prototype = {
};
/**
* This class provides helpers around analysing the audio content in a stream
* using WebAudio AnalyserNodes.
*
* @constructor
* @param {object} stream
* A MediaStream object whose audio track we shall analyse.
*/
function AudioStreamAnalyser(stream) {
if (stream.getAudioTracks().length === 0) {
throw new Error("No audio track in stream");
}
this.stream = stream;
this.audioContext = new AudioContext();
this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
this.analyser = this.audioContext.createAnalyser();
this.sourceNode.connect(this.analyser);
this.data = new Uint8Array(this.analyser.frequencyBinCount);
}
AudioStreamAnalyser.prototype = {
/**
* Get an array of frequency domain data for our stream's audio track.
*
* @returns {array} A Uint8Array containing the frequency domain data.
*/
getByteFrequencyData: function() {
this.analyser.getByteFrequencyData(this.data);
return this.data;
}
};
/**
* This class acts as a wrapper around a PeerConnection instance.
*
@ -1526,20 +1559,20 @@ PeerConnectionWrapper.prototype = {
* @returns {Promise}
* A promise that resolves when we're receiving the tone from |from|.
*/
checkReceivingToneFrom : function(audiocontext, from) {
checkReceivingToneFrom : function(from) {
var inputElem = from.localMediaElements[0];
// As input we use the stream of |from|'s first available audio sender.
var inputSenderTracks = from._pc.getSenders().map(sn => sn.track);
var inputAudioStream = from._pc.getLocalStreams()
.find(s => s.getAudioTracks().some(t => inputSenderTracks.some(t2 => t == t2)));
var inputAnalyser = new AudioStreamAnalyser(audiocontext, inputAudioStream);
var inputAnalyser = new AudioStreamAnalyser(inputAudioStream);
// It would have been nice to have a working getReceivers() here, but until
// we do, let's use what remote streams we have.
var outputAudioStream = this._pc.getRemoteStreams()
.find(s => s.getAudioTracks().length > 0);
var outputAnalyser = new AudioStreamAnalyser(audiocontext, outputAudioStream);
var outputAnalyser = new AudioStreamAnalyser(outputAudioStream);
var maxWithIndex = (a, b, i) => (b >= a.value) ? { value: b, index: i } : a;
var initial = { value: -1, index: -1 };

View File

@ -1,110 +0,0 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test AudioCapture </title>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
</head>
<body>
<pre id="test">
<script>
createHTML({
bug: "1156472",
title: "Test AudioCapture with regular HTMLMediaElement, AudioContext, and HTMLMediaElement playing a MediaStream",
visible: true
});
scriptsReady
.then(() => FAKE_ENABLED = false)
.then(() => {
runTestWhenReady(function() {
// Get an opus file containing a sine wave at maximum amplitude, of duration
// `lengthSeconds`, and of frequency `frequency`.
function getSineWaveFile(frequency, lengthSeconds, callback) {
var chunks = [];
var off = new OfflineAudioContext(1, lengthSeconds * 48000, 48000);
var osc = off.createOscillator();
var rec = new MediaRecorder(osc);
rec.ondataavailable = function(e) {
chunks.push(e.data);
};
rec.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
callback(blob);
}
osc.frequency.value = frequency;
osc.start();
rec.start();
off.startRendering().then(function(buffer) {
rec.stop();
});
}
/**
* Get two HTMLMediaElements:
* - One playing a sine tone from a blob (of an opus file created on the fly)
* - One being the output for an AudioContext's OscillatorNode, connected to
* a MediaSourceDestinationNode.
*
* Also, use the AudioContext playing through its AudioDestinationNode another
* tone, using another OscillatorNode.
*
* Capture the output of the document, feed that back into the AudioContext,
* with an AnalyserNode, and check the frequency content to make sure we
* have recorded the three sources.
*
* The three sine tones have frequencies far apart from each other, so that we
* can check that the spectrum of the capture stream contains three
* components with a high magnitude.
*/
var wavtone = createMediaElement("audio", "WaveTone");
var acTone = createMediaElement("audio", "audioContextTone");
var ac = new AudioContext();
var oscThroughMediaElement = ac.createOscillator();
oscThroughMediaElement.frequency.value = 1000;
var oscThroughAudioDestinationNode = ac.createOscillator();
oscThroughAudioDestinationNode.frequency.value = 5000;
var msDest = ac.createMediaStreamDestination();
oscThroughMediaElement.connect(msDest);
oscThroughAudioDestinationNode.connect(ac.destination);
acTone.mozSrcObject = msDest.stream;
getSineWaveFile(10000, 10, function(blob) {
wavtone.src = URL.createObjectURL(blob);
oscThroughMediaElement.start();
oscThroughAudioDestinationNode.start();
wavtone.loop = true;
wavtone.play();
acTone.play();
});
var constraints = {audio: {mediaSource: "audioCapture"}};
return getUserMedia(constraints).then((stream) => {
checkMediaStreamTracks(constraints, stream);
window.grip = stream;
var analyser = new AudioStreamAnalyser(ac, stream);
analyser.enableDebugCanvas();
return analyser.waitForAnalysisSuccess(function(array) {
// We want to find three frequency components here, around 1000, 5000
// and 10000Hz. Frequency are logarithmic. Also make sure we have low
// energy in between, not just a flat white noise.
return (array[analyser.binIndexForFrequency(50)] < 50 &&
array[analyser.binIndexForFrequency(1000)] > 200 &&
array[analyser.binIndexForFrequency(2500)] < 50 &&
array[analyser.binIndexForFrequency(5000)] > 200 &&
array[analyser.binIndexForFrequency(7500)] < 50 &&
array[analyser.binIndexForFrequency(10000)] > 200);
}).then(finish);
}).catch(finish);
});
});
</script>
</pre>
</body>
</html>

View File

@ -136,7 +136,7 @@
]);
test.chain.append([
function PC_LOCAL_CHECK_WEBAUDIO_FLOW_PRESENT(test) {
return test.pcRemote.checkReceivingToneFrom(test.audioCtx, test.pcLocal);
return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
}
]);
test.chain.append([

View File

@ -32,7 +32,7 @@ runNetworkTest(function() {
]);
test.chain.append([
function CHECK_AUDIO_FLOW(test) {
return test.pcRemote.checkReceivingToneFrom(test.audioContext, test.pcLocal);
return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
}
]);
test.run();

View File

@ -313,9 +313,12 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
bool aIsOffline,
AudioChannel aChannel,
uint32_t aNumberOfChannels,
uint32_t aLength, float aSampleRate)
: AudioNode(aContext, aIsOffline ? aNumberOfChannels : 2,
ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
uint32_t aLength,
float aSampleRate)
: AudioNode(aContext,
aIsOffline ? aNumberOfChannels : 2,
ChannelCountMode::Explicit,
ChannelInterpretation::Speakers)
, mFramesToProduce(aLength)
, mAudioChannel(AudioChannel::Normal)
, mIsOffline(aIsOffline)
@ -323,7 +326,6 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
, mExtraCurrentTime(0)
, mExtraCurrentTimeSinceLastStartedBlocking(0)
, mExtraCurrentTimeUpdatedSinceLastStableState(false)
, mCaptured(false)
{
bool startWithAudioDriver = true;
MediaStreamGraph* graph = aIsOffline ?
@ -503,33 +505,6 @@ AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted)
return NS_OK;
}
NS_IMETHODIMP
AudioDestinationNode::WindowAudioCaptureChanged()
{
MOZ_ASSERT(mAudioChannelAgent);
if (!mStream || Context()->IsOffline()) {
return NS_OK;
}
bool captured = GetOwner()->GetAudioCaptured();
if (captured != mCaptured) {
if (captured) {
nsCOMPtr<nsPIDOMWindow> window = Context()->GetParentObject();
uint64_t id = window->WindowID();
mCaptureStreamPort =
mStream->Graph()->ConnectToCaptureStream(id, mStream);
} else {
mCaptureStreamPort->Disconnect();
mCaptureStreamPort->Destroy();
}
mCaptured = captured;
}
return NS_OK;
}
AudioChannel
AudioDestinationNode::MozAudioChannelType() const
{
@ -616,8 +591,6 @@ AudioDestinationNode::CreateAudioChannelAgent()
// The AudioChannelAgent must start playing immediately in order to avoid
// race conditions with mozinterruptbegin/end events.
InputMuted(false);
WindowAudioCaptureChanged();
}
void
@ -709,7 +682,6 @@ AudioDestinationNode::InputMuted(bool aMuted)
return;
}
WindowAudioCaptureChanged();
WindowVolumeChanged(volume, muted);
}

View File

@ -99,7 +99,6 @@ private:
uint32_t mFramesToProduce;
nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent;
nsRefPtr<MediaInputPort> mCaptureStreamPort;
nsRefPtr<Promise> mOfflineRenderingPromise;
@ -112,7 +111,6 @@ private:
double mExtraCurrentTime;
double mExtraCurrentTimeSinceLastStartedBlocking;
bool mExtraCurrentTimeUpdatedSinceLastStableState;
bool mCaptured;
};
} // namespace dom

View File

@ -291,13 +291,6 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
// We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex);
if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
nsRefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
new MediaEngineWebRTCAudioCaptureSource(nullptr);
aASources->AppendElement(audioCaptureSource);
return;
}
#ifdef MOZ_WIDGET_ANDROID
jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
@ -365,14 +358,15 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
}
nsRefPtr<MediaEngineAudioSource> aSource;
nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
// We've already seen this device, just append.
aASources->AppendElement(aSource.get());
} else {
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
deviceName, uniqueId);
aSource = new MediaEngineWebRTCAudioSource(
mThread, mVoiceEngine, i, deviceName, uniqueId
);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource);
}
@ -391,8 +385,9 @@ ClearVideoSource (const nsAString&, // unused
}
static PLDHashOperator
ClearAudioSource(const nsAString &, // unused
MediaEngineAudioSource *aData, void *userArg)
ClearAudioSource (const nsAString&, // unused
MediaEngineWebRTCAudioSource* aData,
void *userArg)
{
if (aData) {
aData->Shutdown();

View File

@ -133,77 +133,13 @@ private:
void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
};
class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
{
public:
NS_DECL_THREADSAFE_ISUPPORTS
explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
: MediaEngineAudioSource(kReleased)
{
}
void GetName(nsAString& aName) override;
void GetUUID(nsACString& aUUID) override;
nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId) override
{
// Nothing to do here, everything is managed in MediaManager.cpp
return NS_OK;
}
nsresult Deallocate() override
{
// Nothing to do here, everything is managed in MediaManager.cpp
return NS_OK;
}
void Shutdown() override
{
// Nothing to do here, everything is managed in MediaManager.cpp
}
nsresult Start(SourceMediaStream* aMediaStream, TrackID aId) override;
nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
void SetDirectListeners(bool aDirect) override
{}
nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) override
{
return NS_OK;
}
void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
TrackID aID, StreamTime aDesiredTime) override
{}
const dom::MediaSourceEnum GetMediaSource() override
{
return dom::MediaSourceEnum::AudioCapture;
}
bool IsFake() override
{
return false;
}
nsresult TakePhoto(PhotoCallback* aCallback) override
{
return NS_ERROR_NOT_IMPLEMENTED;
}
uint32_t GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId) override;
protected:
virtual ~MediaEngineWebRTCAudioCaptureSource() { Shutdown(); }
nsCString mUUID;
};
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
{
public:
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex,
const char* name,
const char* uuid)
MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex, const char* name, const char* uuid)
: MediaEngineAudioSource(kReleased)
, mVoiceEngine(aVoiceEnginePtr)
, mMonitor("WebRTCMic.Monitor")
@ -271,7 +207,7 @@ public:
virtual void Shutdown() override;
protected:
~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
~MediaEngineWebRTCAudioSource() { Shutdown(); }
private:
void Init();
@ -358,7 +294,7 @@ private:
// Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video).
nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
};
}

View File

@ -41,10 +41,9 @@ extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
/**
* Webrtc microphone source source.
* Webrtc audio source.
*/
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
// XXX temp until MSG supports registration
StaticRefPtr<AudioOutputObserver> gFarendObserver;
@ -178,7 +177,7 @@ AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrame
}
void
MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
{
if (mInitDone) {
aName.Assign(mDeviceName);
@ -188,7 +187,7 @@ MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
}
void
MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
{
if (mInitDone) {
aUUID.Assign(mDeviceUUID);
@ -198,10 +197,10 @@ MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
}
nsresult
MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay)
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay)
{
LOG(("Audio config: aec: %d, agc: %d, noise: %d",
aEchoOn ? aEcho : -1,
@ -268,7 +267,7 @@ MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
// Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
// A finite result may be used to calculate this device's ranking as a choice.
uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId)
{
@ -282,9 +281,9 @@ uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
}
nsresult
MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId)
MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId)
{
if (mState == kReleased) {
if (mInitDone) {
@ -310,7 +309,7 @@ MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aC
}
nsresult
MediaEngineWebRTCMicrophoneSource::Deallocate()
MediaEngineWebRTCAudioSource::Deallocate()
{
bool empty;
{
@ -332,8 +331,7 @@ MediaEngineWebRTCMicrophoneSource::Deallocate()
}
nsresult
MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
TrackID aID)
MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
{
if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE;
@ -386,7 +384,7 @@ MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
}
nsresult
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
{
{
MonitorAutoLock lock(mMonitor);
@ -423,17 +421,17 @@ MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
}
void
MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime)
MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime)
{
// Ignore - we push audio data
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
}
void
MediaEngineWebRTCMicrophoneSource::Init()
MediaEngineWebRTCAudioSource::Init()
{
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
@ -498,7 +496,7 @@ MediaEngineWebRTCMicrophoneSource::Init()
}
void
MediaEngineWebRTCMicrophoneSource::Shutdown()
MediaEngineWebRTCAudioSource::Shutdown()
{
if (!mInitDone) {
// duplicate these here in case we failed during Init()
@ -553,10 +551,9 @@ MediaEngineWebRTCMicrophoneSource::Shutdown()
typedef int16_t sample;
void
MediaEngineWebRTCMicrophoneSource::Process(int channel,
webrtc::ProcessingTypes type,
sample *audio10ms, int length,
int samplingFreq, bool isStereo)
MediaEngineWebRTCAudioSource::Process(int channel,
webrtc::ProcessingTypes type, sample* audio10ms,
int length, int samplingFreq, bool isStereo)
{
// On initial capture, throw away all far-end data except the most recent sample
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
@ -621,55 +618,4 @@ MediaEngineWebRTCMicrophoneSource::Process(int channel,
return;
}
void
MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
{
aName.AssignLiteral("AudioCapture");
}
void
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
{
nsID uuid;
char uuidBuffer[NSID_LENGTH];
nsCString asciiString;
ErrorResult rv;
rv = nsContentUtils::GenerateUUIDInPlace(uuid);
if (rv.Failed()) {
aUUID.AssignLiteral("");
return;
}
uuid.ToProvidedString(uuidBuffer);
asciiString.AssignASCII(uuidBuffer);
// Remove {} and the null terminator
aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
}
nsresult
MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
TrackID aId)
{
aMediaStream->AddTrack(aId, 0, new AudioSegment());
return NS_OK;
}
nsresult
MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
TrackID aId)
{
aMediaStream->EndAllTrackAndFinish();
return NS_OK;
}
uint32_t
MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId)
{
// There is only one way of capturing audio for now, and it's always adequate.
return 0;
}
}

View File

@ -25,7 +25,6 @@ enum MediaSourceEnum {
"window",
"browser",
"microphone",
"audioCapture",
"other"
};

View File

@ -445,8 +445,6 @@ pref("media.getusermedia.screensharing.allowed_domains", "mozilla.github.io,webe
// OS/X 10.6 and XP have screen/window sharing off by default due to various issues - Caveat emptor
pref("media.getusermedia.screensharing.allow_on_old_platforms", false);
pref("media.getusermedia.audiocapture.enabled", false);
// TextTrack support
pref("media.webvtt.enabled", true);
pref("media.webvtt.regions.enabled", false);