diff --git a/dom/media/webrtc/MediaEnginePrefs.h b/dom/media/webrtc/MediaEnginePrefs.h index a6b82857b089..380cedd0ce5b 100644 --- a/dom/media/webrtc/MediaEnginePrefs.h +++ b/dom/media/webrtc/MediaEnginePrefs.h @@ -54,6 +54,11 @@ public: bool mFakeDeviceChangeEventOn; int32_t mChannels; + bool operator ==(const MediaEnginePrefs& aRhs) + { + return memcmp(this, &aRhs, sizeof(MediaEnginePrefs)) == 0; + }; + // mWidth and/or mHeight may be zero (=adaptive default), so use functions. int32_t GetWidth(bool aHD = false) const { diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp index 593c64a45295..48c44487ef74 100644 --- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -8,7 +8,6 @@ #include #include -#include "AllocationHandle.h" #include "AudioConverter.h" #include "MediaManager.h" #include "MediaStreamGraphImpl.h" @@ -33,9 +32,6 @@ using namespace webrtc; #define MAX_CHANNELS 2 #define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100 -#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10 -static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH"); - #ifdef MOZ_PULSEAUDIO static uint32_t sInputStreamsOpen = 0; #endif @@ -124,116 +120,22 @@ uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance( } nsresult -MediaEngineWebRTCMicrophoneSource::ReevaluateAllocation( - const RefPtr& aHandle, - const NormalizedConstraints* aConstraintsUpdate, - const MediaEnginePrefs& aPrefs, - const nsString& aDeviceId, +MediaEngineWebRTCMicrophoneSource::EvaluateSettings( + const NormalizedConstraints& aConstraintsUpdate, + const MediaEnginePrefs& aInPrefs, + MediaEnginePrefs* aOutPrefs, const char** aOutBadConstraint) { AssertIsOnOwningThread(); - // aHandle and/or aConstraintsUpdate may be nullptr (see below) - AutoTArray allConstraints; - if (mHandle && !(aConstraintsUpdate && mHandle == aHandle)) { - allConstraints.AppendElement(&mHandle->mConstraints); - } + MediaEnginePrefs prefs = aInPrefs; - if (aConstraintsUpdate) { - allConstraints.AppendElement(aConstraintsUpdate); - } else if (aHandle) { - // In the case of AddShareOfSingleSource, the handle isn't registered yet. - allConstraints.AppendElement(&aHandle->mConstraints); - } + FlattenedConstraints c(aConstraintsUpdate); - NormalizedConstraints netConstraints(allConstraints); - if (netConstraints.mBadConstraint) { - *aOutBadConstraint = netConstraints.mBadConstraint; - return NS_ERROR_FAILURE; - } - - nsresult rv = UpdateSingleSource(aHandle, - netConstraints, - aPrefs, - aDeviceId, - aOutBadConstraint); - if (NS_FAILED(rv)) { - return rv; - } - if (aHandle && aConstraintsUpdate) { - aHandle->mConstraints = *aConstraintsUpdate; - } - return NS_OK; -} - -nsresult -MediaEngineWebRTCMicrophoneSource::Reconfigure(const RefPtr& aHandle, - const dom::MediaTrackConstraints& aConstraints, - const MediaEnginePrefs& aPrefs, - const nsString& aDeviceId, - const char** aOutBadConstraint) -{ - AssertIsOnOwningThread(); - MOZ_ASSERT(aHandle); - MOZ_ASSERT(mStream); - - LOG(("Mic source %p allocation %p Reconfigure()", this, aHandle.get())); - - NormalizedConstraints constraints(aConstraints); - nsresult rv = ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId, - aOutBadConstraint); - if (NS_FAILED(rv)) { - if (aOutBadConstraint) { - return NS_ERROR_INVALID_ARG; - } - - nsAutoCString name; - GetErrorName(rv, name); - LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s", - this, name.Data())); - Stop(aHandle); - return NS_ERROR_UNEXPECTED; - } - - ApplySettings(mNetPrefs, mStream->GraphImpl()); - - return NS_OK; -} - -void MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr& aHandle, - const RefPtr& aStream, - TrackID aTrackID, - StreamTime aDesiredTime, - const PrincipalHandle& aPrincipalHandle) -{ - // If pull is enabled, it means that the audio input is not open, and we - // should fill it out with silence. This is the only method called on the - // MSG thread. - mInputProcessing->Pull(aHandle, aStream, aTrackID, aDesiredTime, aPrincipalHandle); -} - -bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b) -{ - return !memcmp(&a, &b, sizeof(MediaEnginePrefs)); -}; - -nsresult -MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( - const RefPtr& aHandle, - const NormalizedConstraints& aNetConstraints, - const MediaEnginePrefs& aPrefs, - const nsString& aDeviceId, - const char** aOutBadConstraint) -{ - AssertIsOnOwningThread(); - - FlattenedConstraints c(aNetConstraints); - - MediaEnginePrefs prefs = aPrefs; - prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn); - prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn); - prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn); + prefs.mAecOn = c.mEchoCancellation.Get(aInPrefs.mAecOn); + prefs.mAgcOn = c.mAutoGainControl.Get(aInPrefs.mAgcOn); + prefs.mNoiseOn = c.mNoiseSuppression.Get(aInPrefs.mNoiseOn); // Determine an actual channel count to use for this source. Three factors at // play here: the device capabilities, the constraints passed in by content, @@ -248,54 +150,73 @@ MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( } // A pref can force the channel count to use. If the pref has a value of zero // or lower, it has no effect. - if (prefs.mChannels <= 0) { + if (aInPrefs.mChannels <= 0) { prefs.mChannels = maxChannels; } // Get the number of channels asked for by content, and clamp it between the // pref and the maximum number of channels that the device supports. - prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels, - maxChannels)); + prefs.mChannels = c.mChannelCount.Get(std::min(aInPrefs.mChannels, + maxChannels)); prefs.mChannels = std::max(1, std::min(prefs.mChannels, maxChannels)); LOG(("Audio config: aec: %d, agc: %d, noise: %d, channels: %d", - prefs.mAecOn ? prefs.mAec : -1, - prefs.mAgcOn ? prefs.mAgc : -1, - prefs.mNoiseOn ? prefs.mNoise : -1, - prefs.mChannels)); + prefs.mAecOn ? prefs.mAec : -1, + prefs.mAgcOn ? prefs.mAgc : -1, + prefs.mNoiseOn ? prefs.mNoise : -1, + prefs.mChannels)); - switch (mState) { - case kReleased: - MOZ_ASSERT(aHandle); - mState = kAllocated; - LOG(("Audio device %s allocated", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get())); - break; + *aOutPrefs = prefs; - case kStarted: - case kStopped: - if (prefs == mNetPrefs) { - LOG(("UpdateSingleSource: new prefs for %s are the same as the current prefs, returning.", - NS_ConvertUTF16toUTF8(mDeviceName).get())); - return NS_OK; - } - break; - - default: - LOG(("Audio device %s in ignored state %d", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get(), MediaEngineSourceState(mState))); - break; - } - - if (mStream) { - UpdateAGCSettingsIfNeeded(prefs.mAgcOn, static_cast(prefs.mAgc)); - UpdateNSSettingsIfNeeded(prefs.mNoiseOn, static_cast(prefs.mNoise)); - UpdateAECSettingsIfNeeded(prefs.mAecOn, static_cast(prefs.mAec)); - - UpdateAPMExtraOptions(mExtendedFilter, mDelayAgnostic); - } - mNetPrefs = prefs; return NS_OK; } +nsresult +MediaEngineWebRTCMicrophoneSource::Reconfigure(const RefPtr&, + const dom::MediaTrackConstraints& aConstraints, + const MediaEnginePrefs& aPrefs, + const nsString& /* aDeviceId */, + const char** aOutBadConstraint) +{ + AssertIsOnOwningThread(); + MOZ_ASSERT(mStream); + + LOG(("Mic source %p Reconfigure ", this)); + + NormalizedConstraints constraints(aConstraints); + MediaEnginePrefs outputPrefs; + nsresult rv = EvaluateSettings(constraints, aPrefs, &outputPrefs, + aOutBadConstraint); + if (NS_FAILED(rv)) { + if (aOutBadConstraint) { + return NS_ERROR_INVALID_ARG; + } + + nsAutoCString name; + GetErrorName(rv, name); + LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s", + this, name.Data())); + Stop(nullptr); + return NS_ERROR_UNEXPECTED; + } + + ApplySettings(outputPrefs); + + return NS_OK; +} + +void MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr&, + const RefPtr& aStream, + TrackID aTrackID, + StreamTime aDesiredTime, + const PrincipalHandle& aPrincipalHandle) +{ + // If pull is enabled, it means that the audio input is not open, and we + // should fill it out with silence. This is the only method called on the + // MSG thread. + mInputProcessing->Pull(aStream, aTrackID, aDesiredTime, aPrincipalHandle); +} + void MediaEngineWebRTCMicrophoneSource::UpdateAECSettingsIfNeeded( bool aEnable, @@ -477,18 +398,28 @@ MediaEngineWebRTCMicrophoneSource::UpdateAPMExtraOptions(bool aExtendedFilter, } void -MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs, - RefPtr aGraph) +MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs) { AssertIsOnOwningThread(); - MOZ_DIAGNOSTIC_ASSERT(aGraph); + + MOZ_ASSERT(mStream, + "ApplySetting is to be called only after SetTrack has been called"); + + if (mStream) { + UpdateAGCSettingsIfNeeded(aPrefs.mAgcOn, static_cast(aPrefs.mAgc)); + UpdateNSSettingsIfNeeded(aPrefs.mNoiseOn, static_cast(aPrefs.mNoise)); + UpdateAECSettingsIfNeeded(aPrefs.mAecOn, static_cast(aPrefs.mAec)); + + UpdateAPMExtraOptions(mExtendedFilter, mDelayAgnostic); + } RefPtr that = this; - NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(aGraph), aPrefs]() mutable { - that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn; - that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn; - that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn; - that->mSettings->mChannelCount.Value() = aPrefs.mChannels; + RefPtr graphImpl = mStream->GraphImpl(); + NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(graphImpl), prefs = aPrefs]() mutable { + that->mSettings->mEchoCancellation.Value() = prefs.mAecOn; + that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn; + that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn; + that->mSettings->mChannelCount.Value() = prefs.mChannels; class Message : public ControlMessage { public: @@ -514,10 +445,10 @@ MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs, uint32_t mRequestedInputChannelCount; }; - bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn); + bool passThrough = !(prefs.mAecOn || prefs.mAgcOn || prefs.mNoiseOn); if (graph) { graph->AppendMessage(MakeUnique( - that->mInputProcessing, passThrough, aPrefs.mChannels)); + that->mInputProcessing, passThrough, prefs.mChannels)); } return NS_OK; @@ -534,25 +465,33 @@ MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aC { AssertIsOnOwningThread(); MOZ_ASSERT(aOutHandle); - // This is going away in bug 1497254 - auto handle = MakeRefPtr(aConstraints, aPrincipalInfo, - aDeviceId); - nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId, - aOutBadConstraint); + + *aOutHandle = nullptr; + + mState = kAllocated; + + NormalizedConstraints normalized(aConstraints); + MediaEnginePrefs outputPrefs; + nsresult rv = EvaluateSettings(normalized, aPrefs, &outputPrefs, aOutBadConstraint); if (NS_FAILED(rv)) { return rv; } - MOZ_ASSERT(!mHandle, "Only allocate once."); - mHandle = handle; + RefPtr that = this; + NS_DispatchToMainThread(media::NewRunnableFrom([that, prefs = outputPrefs]() mutable { + that->mSettings->mEchoCancellation.Value() = prefs.mAecOn; + that->mSettings->mAutoGainControl.Value() = prefs.mAgcOn; + that->mSettings->mNoiseSuppression.Value() = prefs.mNoiseOn; + that->mSettings->mChannelCount.Value() = prefs.mChannels; + return NS_OK; + })); - handle.forget(aOutHandle); - return NS_OK; + return rv; } nsresult -MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr& aHandle) +MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr&) { AssertIsOnOwningThread(); @@ -599,11 +538,10 @@ MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr& aHandle, +MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr&, const RefPtr& aStream, TrackID aTrackID, const PrincipalHandle& aPrincipal) @@ -685,7 +623,7 @@ protected: }; nsresult -MediaEngineWebRTCMicrophoneSource::Start(const RefPtr& aHandle) +MediaEngineWebRTCMicrophoneSource::Start(const RefPtr&) { AssertIsOnOwningThread(); @@ -743,17 +681,15 @@ MediaEngineWebRTCMicrophoneSource::Start(const RefPtr& a MOZ_ASSERT(mState != kReleased); mState = kStarted; - ApplySettings(mNetPrefs, mStream->GraphImpl()); - return NS_OK; } nsresult -MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr& aHandle) +MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr&) { AssertIsOnOwningThread(); - LOG(("Mic source %p allocation %p Stop()", this, aHandle.get())); + LOG(("Mic source %p Stop()", this)); MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop"); @@ -830,12 +766,12 @@ MediaEngineWebRTCMicrophoneSource::Shutdown() AssertIsOnOwningThread(); if (mState == kStarted) { - Stop(mHandle); + Stop(nullptr); MOZ_ASSERT(mState == kStopped); } MOZ_ASSERT(mState == kAllocated || mState == kStopped); - Deallocate(mHandle); + Deallocate(nullptr); MOZ_ASSERT(mState == kReleased); } @@ -1029,8 +965,7 @@ AudioInputProcessing::Stop() } void -AudioInputProcessing::Pull(const RefPtr& aHandle, - const RefPtr& aStream, +AudioInputProcessing::Pull(const RefPtr& aStream, TrackID aTrackID, StreamTime aDesiredTime, const PrincipalHandle& aPrincipalHandle) @@ -1076,9 +1011,8 @@ AudioInputProcessing::Pull(const RefPtr& aHandle, } } - LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p", - delta, - aHandle.get())); + LOG_FRAMES(("Pulling %" PRId64 " frames of silence.", + delta)); // This assertion fails when we append silence here in the same iteration // as there were real audio samples already appended by the audio callback. @@ -1453,7 +1387,7 @@ MediaEngineWebRTCAudioCaptureSource::GetUUID() const } nsresult -MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr& aHandle, +MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr&, const RefPtr& aStream, TrackID aTrackID, const PrincipalHandle& aPrincipalHandle) @@ -1464,14 +1398,14 @@ MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr& aHandle) +MediaEngineWebRTCAudioCaptureSource::Start(const RefPtr&) { AssertIsOnOwningThread(); return NS_OK; } nsresult -MediaEngineWebRTCAudioCaptureSource::Stop(const RefPtr& aHandle) +MediaEngineWebRTCAudioCaptureSource::Stop(const RefPtr&) { AssertIsOnOwningThread(); return NS_OK; @@ -1479,13 +1413,12 @@ MediaEngineWebRTCAudioCaptureSource::Stop(const RefPtr& nsresult MediaEngineWebRTCAudioCaptureSource::Reconfigure( - const RefPtr& aHandle, + const RefPtr&, const dom::MediaTrackConstraints& aConstraints, const MediaEnginePrefs &aPrefs, const nsString& aDeviceId, const char** aOutBadConstraint) { - MOZ_ASSERT(!aHandle); return NS_OK; } diff --git a/dom/media/webrtc/MediaEngineWebRTCAudio.h b/dom/media/webrtc/MediaEngineWebRTCAudio.h index 4c9620a9e8be..a041399167b7 100644 --- a/dom/media/webrtc/MediaEngineWebRTCAudio.h +++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h @@ -95,45 +95,32 @@ protected: private: /** - * Reevaluates the aggregated constraints of all allocations and restarts the - * underlying device if necessary. + * From a set of constraints and about:config preferences, output the correct + * set of preferences that can be sent to AudioInputProcessing. * - * If the given AllocationHandle was already registered, its constraints will - * be updated before reevaluation. If not, they will be added before - * reevaluation. + * This can fail if the number of channels requested is zero, negative, or + * more than the device supports. */ - nsresult ReevaluateAllocation(const RefPtr& aHandle, - const NormalizedConstraints* aConstraintsUpdate, - const MediaEnginePrefs& aPrefs, - const nsString& aDeviceId, - const char** aOutBadConstraint); + nsresult EvaluateSettings(const NormalizedConstraints& aConstraintsUpdate, + const MediaEnginePrefs& aInPrefs, + MediaEnginePrefs* aOutPrefs, + const char** aOutBadConstraint); + /** + * From settings output by EvaluateSettings, send those settings to the + * AudioInputProcessing instance and the main thread (for use in GetSettings). + */ + void ApplySettings(const MediaEnginePrefs& aPrefs); /** - * Updates the underlying (single) device with the aggregated constraints - * aNetConstraints. If the chosen settings for the device changes based on - * these new constraints, and capture is active, the device will be restarted. + * Sent the AudioProcessingModule parameter for a given processing algorithm. */ - nsresult UpdateSingleSource(const RefPtr& aHandle, - const NormalizedConstraints& aNetConstraints, - const MediaEnginePrefs& aPrefs, - const nsString& aDeviceId, - const char** aOutBadConstraint); - - // These methods send a message to the AudioInputProcessing instance. void UpdateAECSettingsIfNeeded(bool aEnable, webrtc::EcModes aMode); void UpdateAGCSettingsIfNeeded(bool aEnable, webrtc::AgcModes aMode); void UpdateNSSettingsIfNeeded(bool aEnable, webrtc::NsModes aMode); void UpdateAPMExtraOptions(bool aExtendedFilter, bool aDelayAgnostic); - void ApplySettings(const MediaEnginePrefs& aPrefs, - RefPtr aGraph); - - bool HasEnabledTrack() const; - - RefPtr mHandle; TrackID mTrackID = TRACK_NONE; PrincipalHandle mPrincipal = PRINCIPAL_HANDLE_NONE; - bool mEnabled = false; const RefPtr mDeviceInfo; const bool mDelayAgnostic; @@ -147,10 +134,6 @@ private: // Constructed on the MediaManager thread, and then only ever accessed on the // main thread. const nsMainThreadPtrHandle> mSettings; - // To only update microphone when needed, we keep track of the prefs - // representing the currently applied settings for this source. This is the - // net result of the prefs across all allocations. - MediaEnginePrefs mNetPrefs; // Current state of the resource for this source. MediaEngineSourceState mState; @@ -174,8 +157,7 @@ public: TrackID aTrackID, const PrincipalHandle& aPrincipalHandle); - void Pull(const RefPtr& aHandle, - const RefPtr& aStream, + void Pull(const RefPtr& aStream, TrackID aTrackID, StreamTime aDesiredTime, const PrincipalHandle& aPrincipalHandle); diff --git a/dom/media/webrtc/MediaTrackConstraints.cpp b/dom/media/webrtc/MediaTrackConstraints.cpp index 658f48a448b8..fcc55bafc88d 100644 --- a/dom/media/webrtc/MediaTrackConstraints.cpp +++ b/dom/media/webrtc/MediaTrackConstraints.cpp @@ -312,73 +312,6 @@ NormalizedConstraints::NormalizedConstraints( } } -// Merge constructor. Create net constraints out of merging a set of others. -// This is only used to resolve competing constraints from concurrent requests, -// something the spec doesn't cover. - -NormalizedConstraints::NormalizedConstraints( - const nsTArray& aOthers) - : NormalizedConstraintSet(*aOthers[0]) - , mBadConstraint(nullptr) -{ - for (auto& entry : aOthers[0]->mAdvanced) { - mAdvanced.push_back(entry); - } - - // Create a list of member pointers. - nsTArray list; - NormalizedConstraints dummy(dom::MediaTrackConstraints(), &list); - - // Do intersection of all required constraints, and average of ideals, - - for (uint32_t i = 1; i < aOthers.Length(); i++) { - auto& other = *aOthers[i]; - - for (auto& memberPtr : list) { - auto& member = this->*memberPtr; - auto& otherMember = other.*memberPtr; - - if (!member.Merge(otherMember)) { - mBadConstraint = member.mName; - return; - } - } - - for (auto& entry : other.mAdvanced) { - mAdvanced.push_back(entry); - } - } - for (auto& memberPtr : list) { - (this->*memberPtr).FinalizeMerge(); - } - - // ...except for resolution and frame rate where we take the highest ideal. - // This is a bit of a hack based on the perception that people would be more - // surprised if they were to get lower resolution than they ideally requested. - // - // The spec gives browsers leeway here, saying they "SHOULD use the one with - // the smallest fitness distance", and also does not directly address the - // problem of competing constraints at all. There is no real web interop issue - // here since this is more about interop with other tabs on the same browser. - // - // We should revisit this logic once we support downscaling of resolutions and - // decimating of frame rates, per track. - - for (auto& other : aOthers) { - mWidth.TakeHighestIdeal(other->mWidth); - mHeight.TakeHighestIdeal(other->mHeight); - - // Consider implicit 30 fps default in comparison of competing constraints. - // Avoids 160x90x10 and 640x480 becoming 1024x768x10 (fitness distance flaw) - // This pretty much locks in 30 fps or higher, except for single-tab use. - auto frameRate = other->mFrameRate; - if (frameRate.mIdeal.isNothing()) { - frameRate.mIdeal.emplace(30); - } - mFrameRate.TakeHighestIdeal(frameRate); - } -} - FlattenedConstraints::FlattenedConstraints(const NormalizedConstraints& aOther) : NormalizedConstraintSet(aOther) { diff --git a/dom/media/webrtc/MediaTrackConstraints.h b/dom/media/webrtc/MediaTrackConstraints.h index faa0f814d45a..a1acb00893e6 100644 --- a/dom/media/webrtc/MediaTrackConstraints.h +++ b/dom/media/webrtc/MediaTrackConstraints.h @@ -283,10 +283,6 @@ struct NormalizedConstraints : public NormalizedConstraintSet explicit NormalizedConstraints(const dom::MediaTrackConstraints& aOther, nsTArray* aList = nullptr); - // Merge constructor - explicit NormalizedConstraints( - const nsTArray& aOthers); - std::vector mAdvanced; const char* mBadConstraint; };