mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 14:52:16 +00:00
Bug 1887774 delay AudioProcessing initialization until required r=pehrsons
The benefit is that whether the DeviceInputTrack or reverse stream could be affected by clock drift is more likely to be known, and so recreation of mAudioProcessing will be less likely when, in a subsequent patch, the AudioProcessing needs to be replaced when the possibility of drift changes. mSkipProcessing and mRequestedInputChannelCount are now replaced by their MediaEnginePrefs equivalents. The AudioInputProcessing now starts in pass-through mode, consistent with its initial AEC, AGC, and noise suppression settings. Differential Revision: https://phabricator.services.mozilla.com/D206868
This commit is contained in:
parent
4385dce6fc
commit
9fcd2ec8df
@ -62,19 +62,21 @@ TEST(TestAudioInputProcessing, Buffering)
|
||||
GraphTime processedTime;
|
||||
GraphTime nextTime;
|
||||
AudioSegment output;
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = channels;
|
||||
|
||||
// Toggle pass-through mode without starting
|
||||
{
|
||||
EXPECT_EQ(aip->PassThrough(graph), false);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), true);
|
||||
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
|
||||
|
||||
aip->SetPassThrough(graph, true);
|
||||
settings.mAgcOn = true;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), false);
|
||||
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
|
||||
|
||||
aip->SetPassThrough(graph, false);
|
||||
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
|
||||
|
||||
aip->SetPassThrough(graph, true);
|
||||
settings.mAgcOn = false;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
|
||||
}
|
||||
|
||||
@ -95,7 +97,8 @@ TEST(TestAudioInputProcessing, Buffering)
|
||||
}
|
||||
|
||||
// Set aip to processing/non-pass-through mode
|
||||
aip->SetPassThrough(graph, false);
|
||||
settings.mAgcOn = true;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
{
|
||||
// Need (nextTime - processedTime) = 256 - 128 = 128 frames this round.
|
||||
// aip has not started yet, so output will be filled with silence data
|
||||
@ -184,7 +187,9 @@ TEST(TestAudioInputProcessing, Buffering)
|
||||
EXPECT_EQ(aip->NumBufferedFrames(graph), 64);
|
||||
}
|
||||
|
||||
aip->SetPassThrough(graph, true);
|
||||
// Set aip to pass-through mode
|
||||
settings.mAgcOn = false;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
{
|
||||
// Need (nextTime - processedTime) = 512 - 512 = 0 frames this round.
|
||||
// No buffering in pass-through mode
|
||||
@ -269,7 +274,11 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
|
||||
};
|
||||
|
||||
// Check the principals in audio-processing mode.
|
||||
EXPECT_EQ(aip->PassThrough(graph), false);
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = channels;
|
||||
settings.mAgcOn = true;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), false);
|
||||
aip->Start(graph);
|
||||
{
|
||||
AudioSegment output;
|
||||
@ -295,7 +304,9 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
|
||||
}
|
||||
|
||||
// Check the principals in pass-through mode.
|
||||
aip->SetPassThrough(graph, true);
|
||||
settings.mAgcOn = false;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), true);
|
||||
{
|
||||
AudioSegment output;
|
||||
aip->Process(graph, 0, 4800, &input, &output);
|
||||
@ -324,7 +335,11 @@ TEST(TestAudioInputProcessing, Downmixing)
|
||||
GraphTime processedTime;
|
||||
GraphTime nextTime;
|
||||
|
||||
aip->SetPassThrough(graph, false);
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = channels;
|
||||
settings.mAgcOn = true;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), false);
|
||||
aip->Start(graph);
|
||||
|
||||
processedTime = 0;
|
||||
@ -364,8 +379,11 @@ TEST(TestAudioInputProcessing, Downmixing)
|
||||
}
|
||||
}
|
||||
|
||||
// Now, repeat the test, checking we get the unmodified 4 channels.
|
||||
aip->SetPassThrough(graph, true);
|
||||
// Now, repeat the test in pass-through mode, checking we get the unmodified
|
||||
// 4 channels.
|
||||
settings.mAgcOn = false;
|
||||
aip->ApplySettings(graph, nullptr, settings);
|
||||
EXPECT_EQ(aip->IsPassThrough(graph), true);
|
||||
|
||||
AudioSegment input, output;
|
||||
processedTime = nextTime;
|
||||
|
@ -59,39 +59,27 @@ struct StopInputProcessing : public ControlMessage {
|
||||
void Run() override { mInputProcessing->Stop(mTrack->Graph()); }
|
||||
};
|
||||
|
||||
struct SetPassThrough : public ControlMessage {
|
||||
const RefPtr<AudioInputProcessing> mInputProcessing;
|
||||
const bool mPassThrough;
|
||||
void QueueApplySettings(AudioProcessingTrack* aTrack,
|
||||
AudioInputProcessing* aInputProcessing,
|
||||
const MediaEnginePrefs& aSettings) {
|
||||
aTrack->QueueControlMessageWithNoShutdown(
|
||||
[inputProcessing = RefPtr{aInputProcessing}, aSettings,
|
||||
// If the track is not connected to a device then the particular
|
||||
// AudioDeviceID (nullptr) passed to ReevaluateInputDevice() is not
|
||||
// important.
|
||||
deviceId = aTrack->DeviceId().valueOr(nullptr),
|
||||
graph = aTrack->Graph()] {
|
||||
inputProcessing->ApplySettings(graph, deviceId, aSettings);
|
||||
});
|
||||
}
|
||||
|
||||
SetPassThrough(MediaTrack* aTrack, AudioInputProcessing* aInputProcessing,
|
||||
bool aPassThrough)
|
||||
: ControlMessage(aTrack),
|
||||
mInputProcessing(aInputProcessing),
|
||||
mPassThrough(aPassThrough) {}
|
||||
void Run() override {
|
||||
EXPECT_EQ(mInputProcessing->PassThrough(mTrack->Graph()), !mPassThrough);
|
||||
mInputProcessing->SetPassThrough(mTrack->Graph(), mPassThrough);
|
||||
}
|
||||
};
|
||||
|
||||
struct SetRequestedInputChannelCount : public ControlMessage {
|
||||
const CubebUtils::AudioDeviceID mDeviceId;
|
||||
const RefPtr<AudioInputProcessing> mInputProcessing;
|
||||
const uint32_t mChannelCount;
|
||||
|
||||
SetRequestedInputChannelCount(MediaTrack* aTrack,
|
||||
CubebUtils::AudioDeviceID aDeviceId,
|
||||
AudioInputProcessing* aInputProcessing,
|
||||
uint32_t aChannelCount)
|
||||
: ControlMessage(aTrack),
|
||||
mDeviceId(aDeviceId),
|
||||
mInputProcessing(aInputProcessing),
|
||||
mChannelCount(aChannelCount) {}
|
||||
void Run() override {
|
||||
mInputProcessing->SetRequestedInputChannelCount(mTrack->Graph(), mDeviceId,
|
||||
mChannelCount);
|
||||
}
|
||||
};
|
||||
void QueueExpectIsPassThrough(AudioProcessingTrack* aTrack,
|
||||
AudioInputProcessing* aInputProcessing) {
|
||||
aTrack->QueueControlMessageWithNoShutdown(
|
||||
[inputProcessing = RefPtr{aInputProcessing}, graph = aTrack->Graph()] {
|
||||
EXPECT_EQ(inputProcessing->IsPassThrough(graph), true);
|
||||
});
|
||||
}
|
||||
#endif // MOZ_WEBRTC
|
||||
|
||||
class GoFaster : public ControlMessage {
|
||||
@ -1175,8 +1163,7 @@ TEST(TestAudioTrackGraph, ErrorCallback)
|
||||
auto started = Invoke([&] {
|
||||
processingTrack = AudioProcessingTrack::Create(graph);
|
||||
listener = new AudioInputProcessing(2);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(processingTrack, listener, true));
|
||||
QueueExpectIsPassThrough(processingTrack, listener);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(processingTrack, listener));
|
||||
@ -1246,8 +1233,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
|
||||
port = outputTrack->AllocateInputPort(processingTrack);
|
||||
/* Primary graph: Open Audio Input through SourceMediaTrack */
|
||||
listener = new AudioInputProcessing(2);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(processingTrack, listener, true));
|
||||
QueueExpectIsPassThrough(processingTrack, listener);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(processingTrack, listener));
|
||||
@ -1335,12 +1321,22 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
|
||||
outputTrack->QueueSetAutoend(false);
|
||||
outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
|
||||
port = outputTrack->AllocateInputPort(processingTrack);
|
||||
listener = new AudioInputProcessing(2);
|
||||
|
||||
const int32_t channelCount = 2;
|
||||
listener = new AudioInputProcessing(channelCount);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(processingTrack, listener));
|
||||
processingTrack->ConnectDeviceInput(deviceId, listener,
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = channelCount;
|
||||
settings.mAgcOn = true; // Turn off pass-through.
|
||||
// AGC1 Mode 0 interferes with AudioVerifier's frequency estimation
|
||||
// through zero-crossing counts.
|
||||
settings.mAgc2Forced = true;
|
||||
QueueApplySettings(processingTrack, listener, settings);
|
||||
|
||||
return graph->NotifyWhenDeviceStarted(nullptr);
|
||||
});
|
||||
|
||||
@ -1493,8 +1489,7 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
|
||||
port = outputTrack->AllocateInputPort(processingTrack);
|
||||
/* Primary graph: Open Audio Input through SourceMediaTrack */
|
||||
listener = new AudioInputProcessing(2);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(processingTrack, listener, true));
|
||||
QueueExpectIsPassThrough(processingTrack, listener);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->ConnectDeviceInput(deviceId, listener,
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
@ -1602,8 +1597,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
|
||||
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(2);
|
||||
track1->SetInputProcessing(listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track1, listener1, true));
|
||||
QueueExpectIsPassThrough(track1, listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track1, listener1));
|
||||
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
|
||||
@ -1624,8 +1618,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
|
||||
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(1);
|
||||
track2->SetInputProcessing(listener2);
|
||||
track2->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track2, listener2, true));
|
||||
QueueExpectIsPassThrough(track2, listener2);
|
||||
track2->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track2, listener2));
|
||||
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
|
||||
@ -1642,7 +1635,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
|
||||
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack> aTrack,
|
||||
const RefPtr<AudioInputProcessing>& aListener,
|
||||
RefPtr<SmartMockCubebStream>& aStream,
|
||||
uint32_t aChannelCount) {
|
||||
int32_t aChannelCount) {
|
||||
bool destroyed = false;
|
||||
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
|
||||
AbstractThread::GetCurrent(),
|
||||
@ -1657,11 +1650,9 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
|
||||
newStream = aCreated;
|
||||
});
|
||||
|
||||
DispatchFunction([&] {
|
||||
aTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetRequestedInputChannelCount>(aTrack, *aTrack->DeviceId(),
|
||||
aListener, aChannelCount));
|
||||
});
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = aChannelCount;
|
||||
QueueApplySettings(aTrack, aListener, settings);
|
||||
|
||||
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
|
||||
"TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)"_ns,
|
||||
@ -1733,14 +1724,12 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
|
||||
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack>& aTrack,
|
||||
const RefPtr<AudioInputProcessing>& aListener,
|
||||
RefPtr<SmartMockCubebStream>& aStream,
|
||||
uint32_t aChannelCount) {
|
||||
int32_t aChannelCount) {
|
||||
ASSERT_TRUE(!!aTrack);
|
||||
ASSERT_TRUE(!!aListener);
|
||||
ASSERT_TRUE(!!aStream);
|
||||
ASSERT_TRUE(aStream->mHasInput);
|
||||
ASSERT_NE(aChannelCount, 0U);
|
||||
|
||||
const CubebUtils::AudioDeviceID device = *aTrack->DeviceId();
|
||||
ASSERT_NE(aChannelCount, 0);
|
||||
|
||||
bool destroyed = false;
|
||||
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
|
||||
@ -1756,11 +1745,9 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
|
||||
newStream = aCreated;
|
||||
});
|
||||
|
||||
DispatchFunction([&] {
|
||||
aTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetRequestedInputChannelCount>(aTrack, device, aListener,
|
||||
aChannelCount));
|
||||
});
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = aChannelCount;
|
||||
QueueApplySettings(aTrack, aListener, settings);
|
||||
|
||||
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
|
||||
"TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged) #1"_ns,
|
||||
@ -1801,8 +1788,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
|
||||
aTrack = AudioProcessingTrack::Create(graph);
|
||||
aListener = new AudioInputProcessing(aChannelCount);
|
||||
aTrack->SetInputProcessing(aListener);
|
||||
aTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(aTrack, aListener, true));
|
||||
QueueExpectIsPassThrough(aTrack, aListener);
|
||||
aTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(aTrack, aListener));
|
||||
|
||||
@ -1836,8 +1822,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
|
||||
track1 = AudioProcessingTrack::Create(graph);
|
||||
listener1 = new AudioInputProcessing(1);
|
||||
track1->SetInputProcessing(listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track1, listener1, true));
|
||||
QueueExpectIsPassThrough(track1, listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track1, listener1));
|
||||
track1->ConnectDeviceInput(nativeDevice, listener1, PRINCIPAL_HANDLE_NONE);
|
||||
@ -1880,8 +1865,7 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
|
||||
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
|
||||
track3->SetInputProcessing(listener3);
|
||||
track3->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track3, listener3, true));
|
||||
QueueExpectIsPassThrough(track3, listener3);
|
||||
track3->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track3, listener3));
|
||||
track3->ConnectDeviceInput(nonNativeDevice, listener3,
|
||||
@ -1999,12 +1983,13 @@ TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
|
||||
DispatchFunction([&] {
|
||||
track = AudioProcessingTrack::Create(graph);
|
||||
listener = new AudioInputProcessing(2);
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track, listener, true));
|
||||
QueueExpectIsPassThrough(track, listener);
|
||||
track->SetInputProcessing(listener);
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetRequestedInputChannelCount>(track, deviceId, listener,
|
||||
1));
|
||||
|
||||
MediaEnginePrefs settings;
|
||||
settings.mChannels = 1;
|
||||
QueueApplySettings(track, listener, settings);
|
||||
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<GuardMessage>(track, std::move(h)));
|
||||
});
|
||||
@ -2065,8 +2050,7 @@ TEST(TestAudioTrackGraph, StartAudioDeviceBeforeStartingAudioProcessing)
|
||||
DispatchFunction([&] {
|
||||
track = AudioProcessingTrack::Create(graph);
|
||||
listener = new AudioInputProcessing(2);
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track, listener, true));
|
||||
QueueExpectIsPassThrough(track, listener);
|
||||
track->SetInputProcessing(listener);
|
||||
// Start audio device without starting audio processing.
|
||||
track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
|
||||
@ -2131,8 +2115,7 @@ TEST(TestAudioTrackGraph, StopAudioProcessingBeforeStoppingAudioDevice)
|
||||
DispatchFunction([&] {
|
||||
track = AudioProcessingTrack::Create(graph);
|
||||
listener = new AudioInputProcessing(2);
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track, listener, true));
|
||||
QueueExpectIsPassThrough(track, listener);
|
||||
track->SetInputProcessing(listener);
|
||||
track->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track, listener));
|
||||
@ -2267,8 +2250,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
|
||||
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(1);
|
||||
track1->SetInputProcessing(listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track1, listener1, true));
|
||||
QueueExpectIsPassThrough(track1, listener1);
|
||||
track1->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track1, listener1));
|
||||
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
|
||||
@ -2291,8 +2273,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
|
||||
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(2);
|
||||
track2->SetInputProcessing(listener2);
|
||||
track2->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track2, listener2, true));
|
||||
QueueExpectIsPassThrough(track2, listener2);
|
||||
track2->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track2, listener2));
|
||||
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
|
||||
@ -2311,8 +2292,7 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
|
||||
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
|
||||
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
|
||||
track3->SetInputProcessing(listener3);
|
||||
track3->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(track3, listener3, true));
|
||||
QueueExpectIsPassThrough(track3, listener3);
|
||||
track3->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(track3, listener3));
|
||||
track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
|
||||
@ -2417,8 +2397,7 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
|
||||
/* Primary graph: Create input track and open it */
|
||||
processingTrack = AudioProcessingTrack::Create(primary);
|
||||
listener = new AudioInputProcessing(2);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(processingTrack, listener, true));
|
||||
QueueExpectIsPassThrough(processingTrack, listener);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(processingTrack, listener));
|
||||
@ -2639,8 +2618,7 @@ TEST(TestAudioTrackGraph, SecondaryOutputDevice)
|
||||
/* Create an input track and connect it to a device */
|
||||
processingTrack = AudioProcessingTrack::Create(graph);
|
||||
listener = new AudioInputProcessing(2);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<SetPassThrough>(processingTrack, listener, true));
|
||||
QueueExpectIsPassThrough(processingTrack, listener);
|
||||
processingTrack->SetInputProcessing(listener);
|
||||
processingTrack->GraphImpl()->AppendMessage(
|
||||
MakeUnique<StartInputProcessing>(processingTrack, listener));
|
||||
|
@ -398,39 +398,35 @@ void MediaEngineWebRTCMicrophoneSource::GetSettings(
|
||||
}
|
||||
|
||||
AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount)
|
||||
: mAudioProcessing(AudioProcessingBuilder().Create().release()),
|
||||
mRequestedInputChannelCount(aMaxChannelCount),
|
||||
mSkipProcessing(false),
|
||||
mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
|
||||
: mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
|
||||
mEnabled(false),
|
||||
mEnded(false),
|
||||
mPacketCount(0) {}
|
||||
mPacketCount(0) {
|
||||
mSettings.mChannels = static_cast<int32_t>(std::min<uint32_t>(
|
||||
std::numeric_limits<int32_t>::max(), aMaxChannelCount));
|
||||
}
|
||||
|
||||
void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
|
||||
// This method is just for asserts.
|
||||
aGraph->AssertOnGraphThread();
|
||||
}
|
||||
|
||||
bool AudioInputProcessing::PassThrough(MediaTrackGraph* aGraph) const {
|
||||
bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
|
||||
aGraph->AssertOnGraphThread();
|
||||
return mSkipProcessing;
|
||||
// The high-pass filter is not taken into account when activating the
|
||||
// pass through, since it's not controllable from content.
|
||||
return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
|
||||
}
|
||||
|
||||
void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
|
||||
bool aPassThrough) {
|
||||
void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
|
||||
aGraph->AssertOnGraphThread();
|
||||
|
||||
if (aPassThrough == mSkipProcessing) {
|
||||
return;
|
||||
}
|
||||
mSkipProcessing = aPassThrough;
|
||||
|
||||
if (!mEnabled) {
|
||||
MOZ_ASSERT(!mPacketizerInput);
|
||||
return;
|
||||
}
|
||||
|
||||
if (aPassThrough) {
|
||||
if (IsPassThrough(aGraph)) {
|
||||
// Switching to pass-through. Clear state so that it doesn't affect any
|
||||
// future processing, if re-enabled.
|
||||
ResetAudioProcessing(aGraph);
|
||||
@ -442,14 +438,11 @@ void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
|
||||
}
|
||||
|
||||
uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
|
||||
return mRequestedInputChannelCount;
|
||||
return mSettings.mChannels;
|
||||
}
|
||||
|
||||
void AudioInputProcessing::SetRequestedInputChannelCount(
|
||||
MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId,
|
||||
uint32_t aRequestedInputChannelCount) {
|
||||
mRequestedInputChannelCount = aRequestedInputChannelCount;
|
||||
|
||||
void AudioInputProcessing::RequestedInputChannelCountChanged(
|
||||
MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId) {
|
||||
aGraph->ReevaluateInputDevice(aDeviceId);
|
||||
}
|
||||
|
||||
@ -461,10 +454,6 @@ void AudioInputProcessing::Start(MediaTrackGraph* aGraph) {
|
||||
}
|
||||
mEnabled = true;
|
||||
|
||||
if (mSkipProcessing) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mPacketizerInput);
|
||||
}
|
||||
|
||||
@ -477,7 +466,7 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
|
||||
|
||||
mEnabled = false;
|
||||
|
||||
if (mSkipProcessing) {
|
||||
if (IsPassThrough(aGraph)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -618,7 +607,7 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
|
||||
MOZ_ASSERT(aInput->GetDuration() == need,
|
||||
"Wrong data length from input port source");
|
||||
|
||||
if (PassThrough(aGraph)) {
|
||||
if (IsPassThrough(aGraph)) {
|
||||
LOG_FRAME(
|
||||
"(Graph %p, Driver %p) AudioInputProcessing %p Forwarding %" PRId64
|
||||
" frames of input data to output directly (PassThrough)",
|
||||
@ -627,11 +616,11 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
|
||||
return;
|
||||
}
|
||||
|
||||
// If mRequestedInputChannelCount is updated, create a new packetizer. No
|
||||
// need to change the pre-buffering since the rate is always the same. The
|
||||
// frames left in the packetizer would be replaced by null data and then
|
||||
// transferred to mSegment.
|
||||
EnsurePacketizer(aGraph, mRequestedInputChannelCount);
|
||||
// If the requested input channel count is updated, create a new
|
||||
// packetizer. No need to change the pre-buffering since the rate is always
|
||||
// the same. The frames left in the packetizer would be replaced by null
|
||||
// data and then transferred to mSegment.
|
||||
EnsurePacketizer(aGraph);
|
||||
|
||||
// Preconditions of the audio-processing logic.
|
||||
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
|
||||
@ -673,7 +662,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
|
||||
MOZ_ASSERT(aChunk.ChannelCount() > 0);
|
||||
aGraph->AssertOnGraphThread();
|
||||
|
||||
if (!mEnabled || PassThrough(aGraph)) {
|
||||
if (!mEnabled || IsPassThrough(aGraph)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -715,6 +704,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
|
||||
|
||||
if (mOutputBufferFrameCount == framesPerPacket) {
|
||||
// Have a complete packet. Analyze it.
|
||||
EnsureAudioProcessing(aGraph);
|
||||
for (uint32_t channel = 0; channel < channelCount; channel++) {
|
||||
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
|
||||
}
|
||||
@ -733,7 +723,7 @@ void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
|
||||
// Only called if we're not in passthrough mode
|
||||
void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
|
||||
const AudioSegment& aSegment) {
|
||||
MOZ_ASSERT(!PassThrough(aGraph),
|
||||
MOZ_ASSERT(!IsPassThrough(aGraph),
|
||||
"This should be bypassed when in PassThrough mode.");
|
||||
MOZ_ASSERT(mEnabled);
|
||||
MOZ_ASSERT(mPacketizerInput);
|
||||
@ -841,6 +831,7 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
|
||||
StreamConfig inputConfig(aGraph->GraphRate(), channelCountInput);
|
||||
StreamConfig outputConfig = inputConfig;
|
||||
|
||||
EnsureAudioProcessing(aGraph);
|
||||
// Bug 1404965: Get the right delay here, it saves some work down the line.
|
||||
mAudioProcessing->set_stream_delay_ms(0);
|
||||
|
||||
@ -959,7 +950,9 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
|
||||
aGraph->AssertOnGraphThread();
|
||||
|
||||
// Reset some processing
|
||||
mAudioProcessing->Initialize();
|
||||
if (mAudioProcessing) {
|
||||
mAudioProcessing->Initialize();
|
||||
}
|
||||
LOG_FRAME(
|
||||
"(Graph %p, Driver %p) AudioInputProcessing %p Reinitializing audio "
|
||||
"processing",
|
||||
@ -971,13 +964,22 @@ void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
|
||||
const MediaEnginePrefs& aSettings) {
|
||||
TRACE("AudioInputProcessing::ApplySettings");
|
||||
aGraph->AssertOnGraphThread();
|
||||
mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
|
||||
SetRequestedInputChannelCount(aGraph, aDeviceID, aSettings.mChannels);
|
||||
// The high-pass filter is not taken into account when activating the
|
||||
// pass through, since it's not controllable from content.
|
||||
bool passThrough =
|
||||
!(aSettings.mAecOn || aSettings.mAgcOn || aSettings.mNoiseOn);
|
||||
SetPassThrough(aGraph, passThrough);
|
||||
|
||||
// Read previous state from mSettings.
|
||||
uint32_t oldChannelCount = GetRequestedInputChannelCount();
|
||||
bool wasPassThrough = IsPassThrough(aGraph);
|
||||
|
||||
mSettings = aSettings;
|
||||
if (mAudioProcessing) {
|
||||
mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
|
||||
}
|
||||
|
||||
if (oldChannelCount != GetRequestedInputChannelCount()) {
|
||||
RequestedInputChannelCountChanged(aGraph, aDeviceID);
|
||||
}
|
||||
if (wasPassThrough != IsPassThrough(aGraph)) {
|
||||
PassThroughChanged(aGraph);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioInputProcessing::End() {
|
||||
@ -991,14 +993,14 @@ TrackTime AudioInputProcessing::NumBufferedFrames(
|
||||
return mSegment.GetDuration();
|
||||
}
|
||||
|
||||
void AudioInputProcessing::EnsurePacketizer(MediaTrackGraph* aGraph,
|
||||
uint32_t aChannels) {
|
||||
void AudioInputProcessing::EnsurePacketizer(MediaTrackGraph* aGraph) {
|
||||
aGraph->AssertOnGraphThread();
|
||||
MOZ_ASSERT(aChannels > 0);
|
||||
MOZ_ASSERT(mEnabled);
|
||||
MOZ_ASSERT(!mSkipProcessing);
|
||||
MOZ_ASSERT(!IsPassThrough(aGraph));
|
||||
|
||||
if (mPacketizerInput && mPacketizerInput->mChannels == aChannels) {
|
||||
uint32_t channelCount = GetRequestedInputChannelCount();
|
||||
MOZ_ASSERT(channelCount > 0);
|
||||
if (mPacketizerInput && mPacketizerInput->mChannels == channelCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1016,7 +1018,7 @@ void AudioInputProcessing::EnsurePacketizer(MediaTrackGraph* aGraph,
|
||||
mChunksInPacketizer.clear();
|
||||
}
|
||||
|
||||
mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), aChannels);
|
||||
mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), channelCount);
|
||||
|
||||
if (needPreBuffering) {
|
||||
LOG_FRAME(
|
||||
@ -1031,9 +1033,21 @@ void AudioInputProcessing::EnsurePacketizer(MediaTrackGraph* aGraph,
|
||||
}
|
||||
}
|
||||
|
||||
void AudioInputProcessing::EnsureAudioProcessing(MediaTrackGraph* aGraph) {
|
||||
aGraph->AssertOnGraphThread();
|
||||
|
||||
if (!mAudioProcessing) {
|
||||
TRACE("AudioProcessing creation");
|
||||
mAudioProcessing.reset(AudioProcessingBuilder()
|
||||
.SetConfig(ConfigForPrefs(mSettings))
|
||||
.Create()
|
||||
.release());
|
||||
}
|
||||
}
|
||||
|
||||
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
|
||||
aGraph->AssertOnGraphThread();
|
||||
MOZ_ASSERT(mSkipProcessing || !mEnabled);
|
||||
MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
|
||||
MOZ_ASSERT(mPacketizerInput);
|
||||
|
||||
LOG_FRAME(
|
||||
@ -1043,7 +1057,9 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
|
||||
|
||||
// Reset AudioProcessing so that if we resume processing in the future it
|
||||
// doesn't depend on old state.
|
||||
mAudioProcessing->Initialize();
|
||||
if (mAudioProcessing) {
|
||||
mAudioProcessing->Initialize();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
|
||||
mPacketizerInput->FramesAvailable() ==
|
||||
|
@ -116,7 +116,7 @@ class AudioInputProcessing : public AudioDataListener {
|
||||
// If we're passing data directly without AEC or any other process, this
|
||||
// means that all voice-processing has been disabled intentionaly. In this
|
||||
// case, consider that the device is not used for voice input.
|
||||
return !PassThrough(aGraph);
|
||||
return !IsPassThrough(aGraph);
|
||||
}
|
||||
|
||||
void Start(MediaTrackGraph* aGraph);
|
||||
@ -133,14 +133,10 @@ class AudioInputProcessing : public AudioDataListener {
|
||||
void PacketizeAndProcess(MediaTrackGraph* aGraph,
|
||||
const AudioSegment& aSegment);
|
||||
|
||||
void SetPassThrough(MediaTrackGraph* aGraph, bool aPassThrough);
|
||||
uint32_t GetRequestedInputChannelCount();
|
||||
void SetRequestedInputChannelCount(MediaTrackGraph* aGraph,
|
||||
CubebUtils::AudioDeviceID aDeviceId,
|
||||
uint32_t aRequestedInputChannelCount);
|
||||
// This is true when all processing is disabled, we can skip
|
||||
// This is true when all processing is disabled, in which case we can skip
|
||||
// packetization, resampling and other processing passes.
|
||||
bool PassThrough(MediaTrackGraph* aGraph) const;
|
||||
bool IsPassThrough(MediaTrackGraph* aGraph) const;
|
||||
|
||||
// This allow changing the APM options, enabling or disabling processing
|
||||
// steps. The settings get applied the next time we're about to process input
|
||||
@ -164,7 +160,11 @@ class AudioInputProcessing : public AudioDataListener {
|
||||
~AudioInputProcessing() = default;
|
||||
webrtc::AudioProcessing::Config ConfigForPrefs(
|
||||
const MediaEnginePrefs& aPrefs);
|
||||
void EnsurePacketizer(MediaTrackGraph* aGraph, uint32_t aChannels);
|
||||
void PassThroughChanged(MediaTrackGraph* aGraph);
|
||||
void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
|
||||
CubebUtils::AudioDeviceID aDeviceId);
|
||||
void EnsurePacketizer(MediaTrackGraph* aGraph);
|
||||
void EnsureAudioProcessing(MediaTrackGraph* aGraph);
|
||||
void ResetAudioProcessing(MediaTrackGraph* aGraph);
|
||||
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
|
||||
// This implements the processing algoritm to apply to the input (e.g. a
|
||||
@ -172,17 +172,13 @@ class AudioInputProcessing : public AudioDataListener {
|
||||
// class only accepts audio chunks of 10ms. It has two inputs and one output:
|
||||
// it is fed the speaker data and the microphone data. It outputs processed
|
||||
// input data.
|
||||
const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
|
||||
UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
|
||||
// Packetizer to be able to feed 10ms packets to the input side of
|
||||
// mAudioProcessing. Not used if the processing is bypassed.
|
||||
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
|
||||
// The number of channels asked for by content, after clamping to the range of
|
||||
// legal channel count for this particular device.
|
||||
uint32_t mRequestedInputChannelCount;
|
||||
// mSkipProcessing is true if none of the processing passes are enabled,
|
||||
// because of prefs or constraints. This allows simply copying the audio into
|
||||
// the MTG, skipping resampling and the whole webrtc.org code.
|
||||
bool mSkipProcessing;
|
||||
// The current settings from about:config preferences and content-provided
|
||||
// constraints.
|
||||
MediaEnginePrefs mSettings;
|
||||
// Buffer for up to one 10ms packet of planar mixed audio output for the
|
||||
// reverse-stream (speaker data) of mAudioProcessing AEC.
|
||||
// Length is packet size * channel count, regardless of how many frames are
|
||||
|
Loading…
Reference in New Issue
Block a user