Backed out 2 changesets (bug 1887774) for causing gtest failures. CLOSED TREE

Backed out changeset 29c59d5d9bee (bug 1887774)
Backed out changeset 210dbccad6f2 (bug 1887774)
This commit is contained in:
Cosmin Sabou 2024-04-11 05:17:30 +03:00
parent 6ddb2c49d9
commit e9bd3cebd5
5 changed files with 208 additions and 235 deletions

View File

@ -1115,14 +1115,12 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
const float mGlobalVolume;
#ifdef DEBUG
protected:
/**
* Used to assert when AppendMessage() runs control messages synchronously.
*/
bool mCanRunMessagesSynchronously;
#endif
private:
/**
* The graph's main-thread observable graph time.
* Updated by the stable state runnable after each iteration.

View File

@ -30,21 +30,11 @@ class MockGraph : public MediaTrackGraphImpl {
void Init(uint32_t aChannels) {
MediaTrackGraphImpl::Init(OFFLINE_THREAD_DRIVER, DIRECT_DRIVER, aChannels);
MonitorAutoLock lock(mMonitor);
// We don't need a graph driver. Advance to
// LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION so that the driver never
// starts. Graph control messages run as in shutdown, synchronously.
// This permits the main thread part of track initialization through
// AudioProcessingTrack::Create().
mLifecycleState = LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION;
#ifdef DEBUG
mCanRunMessagesSynchronously = true;
#endif
// Remove this graph's driver since it holds a ref. We are still kept
// alive by the self-ref. Destroy() must be called to break that cycle if
// no tracks are created and destroyed.
mDriver = nullptr;
// Remove this graph's driver since it holds a ref. If no AppendMessage
// takes place, the driver never starts. This will also make sure no-one
// tries to use it. We are still kept alive by the self-ref. Destroy() must
// be called to break that cycle.
SetCurrentDriver(nullptr);
}
MOCK_CONST_METHOD0(OnGraphThread, bool());
@ -63,7 +53,6 @@ TEST(TestAudioInputProcessing, Buffering)
const uint32_t channels = 1;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
@ -73,21 +62,19 @@ TEST(TestAudioInputProcessing, Buffering)
GraphTime processedTime;
GraphTime nextTime;
AudioSegment output;
MediaEnginePrefs settings;
settings.mChannels = channels;
// Toggle pass-through mode without starting
{
EXPECT_EQ(aip->IsPassThrough(graph), true);
EXPECT_EQ(aip->PassThrough(graph), false);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
settings.mAgcOn = true;
aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->IsPassThrough(graph), false);
aip->SetPassThrough(graph, true);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
settings.mAgcOn = false;
aip->ApplySettings(graph, nullptr, settings);
aip->SetPassThrough(graph, false);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
aip->SetPassThrough(graph, true);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
@ -101,15 +88,14 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
// Set aip to processing/non-pass-through mode
settings.mAgcOn = true;
aip->ApplySettings(graph, nullptr, settings);
aip->SetPassThrough(graph, false);
{
// Need (nextTime - processedTime) = 256 - 128 = 128 frames this round.
// aip has not started yet, so output will be filled with silence data
@ -120,7 +106,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
@ -138,7 +124,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
@ -159,7 +145,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
@ -173,7 +159,7 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 32);
@ -192,15 +178,13 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 64);
}
// Set aip to pass-through mode
settings.mAgcOn = false;
aip->ApplySettings(graph, nullptr, settings);
aip->SetPassThrough(graph, true);
{
// Need (nextTime - processedTime) = 512 - 512 = 0 frames this round.
// No buffering in pass-through mode
@ -210,14 +194,14 @@ TEST(TestAudioInputProcessing, Buffering)
AudioSegment input;
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), processedTime);
EXPECT_EQ(aip->NumBufferedFrames(graph), 0);
}
aip->Stop(graph);
track->Destroy();
graph->Destroy();
}
TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
@ -226,7 +210,6 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
const uint32_t channels = 2;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
AudioGenerator<AudioDataValue> generator(channels, rate);
@ -286,17 +269,13 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
};
// Check the principals in audio-processing mode.
MediaEnginePrefs settings;
settings.mChannels = channels;
settings.mAgcOn = true;
aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->IsPassThrough(graph), false);
EXPECT_EQ(aip->PassThrough(graph), false);
aip->Start(graph);
{
AudioSegment output;
{
AudioSegment data;
aip->Process(track, 0, 4800, &input, &data);
aip->Process(graph, 0, 4800, &input, &data);
EXPECT_EQ(input.GetDuration(), 4800);
EXPECT_EQ(data.GetDuration(), 4800);
@ -304,7 +283,7 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
EXPECT_EQ(aip->NumBufferedFrames(graph), 480);
AudioSegment dummy;
dummy.AppendNullData(480);
aip->Process(track, 0, 480, &dummy, &data);
aip->Process(graph, 0, 480, &dummy, &data);
EXPECT_EQ(dummy.GetDuration(), 480);
EXPECT_EQ(data.GetDuration(), 480 + 4800);
@ -316,12 +295,10 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
}
// Check the principals in pass-through mode.
settings.mAgcOn = false;
aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->IsPassThrough(graph), true);
aip->SetPassThrough(graph, true);
{
AudioSegment output;
aip->Process(track, 0, 4800, &input, &output);
aip->Process(graph, 0, 4800, &input, &output);
EXPECT_EQ(input.GetDuration(), 4800);
EXPECT_EQ(output.GetDuration(), 4800);
@ -329,7 +306,7 @@ TEST(TestAudioInputProcessing, ProcessDataWithDifferentPrincipals)
}
aip->Stop(graph);
track->Destroy();
graph->Destroy();
}
TEST(TestAudioInputProcessing, Downmixing)
@ -338,7 +315,6 @@ TEST(TestAudioInputProcessing, Downmixing)
const uint32_t channels = 4;
auto graph = MakeRefPtr<NiceMock<MockGraph>>(rate);
graph->Init(channels);
RefPtr track = AudioProcessingTrack::Create(graph);
auto aip = MakeRefPtr<AudioInputProcessing>(channels);
@ -348,11 +324,7 @@ TEST(TestAudioInputProcessing, Downmixing)
GraphTime processedTime;
GraphTime nextTime;
MediaEnginePrefs settings;
settings.mChannels = channels;
settings.mAgcOn = true;
aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->IsPassThrough(graph), false);
aip->SetPassThrough(graph, false);
aip->Start(graph);
processedTime = 0;
@ -372,7 +344,7 @@ TEST(TestAudioInputProcessing, Downmixing)
// downmix to mono, scaling the input by 1/4 in the process.
// We can't compare the input and output signal because the sine is going to
// be mangledui
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime);
EXPECT_EQ(output.MaxChannelCount(), 1u);
@ -392,18 +364,15 @@ TEST(TestAudioInputProcessing, Downmixing)
}
}
// Now, repeat the test in pass-through mode, checking we get the unmodified
// 4 channels.
settings.mAgcOn = false;
aip->ApplySettings(graph, nullptr, settings);
EXPECT_EQ(aip->IsPassThrough(graph), true);
// Now, repeat the test, checking we get the unmodified 4 channels.
aip->SetPassThrough(graph, true);
AudioSegment input, output;
processedTime = nextTime;
nextTime += MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(frames);
generator.Generate(input, nextTime - processedTime);
aip->Process(track, processedTime, nextTime, &input, &output);
aip->Process(graph, processedTime, nextTime, &input, &output);
EXPECT_EQ(input.GetDuration(), nextTime - processedTime);
EXPECT_EQ(output.GetDuration(), nextTime - processedTime);
// This time, no downmix: 4 channels of input, 4 channels of output
@ -419,5 +388,5 @@ TEST(TestAudioInputProcessing, Downmixing)
}
aip->Stop(graph);
track->Destroy();
graph->Destroy();
}

View File

@ -59,27 +59,39 @@ struct StopInputProcessing : public ControlMessage {
void Run() override { mInputProcessing->Stop(mTrack->Graph()); }
};
void QueueApplySettings(AudioProcessingTrack* aTrack,
AudioInputProcessing* aInputProcessing,
const MediaEnginePrefs& aSettings) {
aTrack->QueueControlMessageWithNoShutdown(
[inputProcessing = RefPtr{aInputProcessing}, aSettings,
// If the track is not connected to a device then the particular
// AudioDeviceID (nullptr) passed to ReevaluateInputDevice() is not
// important.
deviceId = aTrack->DeviceId().valueOr(nullptr),
graph = aTrack->Graph()] {
inputProcessing->ApplySettings(graph, deviceId, aSettings);
});
}
struct SetPassThrough : public ControlMessage {
const RefPtr<AudioInputProcessing> mInputProcessing;
const bool mPassThrough;
void QueueExpectIsPassThrough(AudioProcessingTrack* aTrack,
AudioInputProcessing* aInputProcessing) {
aTrack->QueueControlMessageWithNoShutdown(
[inputProcessing = RefPtr{aInputProcessing}, graph = aTrack->Graph()] {
EXPECT_EQ(inputProcessing->IsPassThrough(graph), true);
});
}
SetPassThrough(MediaTrack* aTrack, AudioInputProcessing* aInputProcessing,
bool aPassThrough)
: ControlMessage(aTrack),
mInputProcessing(aInputProcessing),
mPassThrough(aPassThrough) {}
void Run() override {
EXPECT_EQ(mInputProcessing->PassThrough(mTrack->Graph()), !mPassThrough);
mInputProcessing->SetPassThrough(mTrack->Graph(), mPassThrough);
}
};
struct SetRequestedInputChannelCount : public ControlMessage {
const CubebUtils::AudioDeviceID mDeviceId;
const RefPtr<AudioInputProcessing> mInputProcessing;
const uint32_t mChannelCount;
SetRequestedInputChannelCount(MediaTrack* aTrack,
CubebUtils::AudioDeviceID aDeviceId,
AudioInputProcessing* aInputProcessing,
uint32_t aChannelCount)
: ControlMessage(aTrack),
mDeviceId(aDeviceId),
mInputProcessing(aInputProcessing),
mChannelCount(aChannelCount) {}
void Run() override {
mInputProcessing->SetRequestedInputChannelCount(mTrack->Graph(), mDeviceId,
mChannelCount);
}
};
#endif // MOZ_WEBRTC
class GoFaster : public ControlMessage {
@ -1163,7 +1175,8 @@ TEST(TestAudioTrackGraph, ErrorCallback)
auto started = Invoke([&] {
processingTrack = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(processingTrack, listener, true));
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@ -1233,7 +1246,8 @@ TEST(TestAudioTrackGraph, AudioProcessingTrack)
port = outputTrack->AllocateInputPort(processingTrack);
/* Primary graph: Open Audio Input through SourceMediaTrack */
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(processingTrack, listener, true));
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@ -1321,22 +1335,12 @@ TEST(TestAudioTrackGraph, ReConnectDeviceInput)
outputTrack->QueueSetAutoend(false);
outputTrack->AddAudioOutput(reinterpret_cast<void*>(1), nullptr);
port = outputTrack->AllocateInputPort(processingTrack);
const int32_t channelCount = 2;
listener = new AudioInputProcessing(channelCount);
listener = new AudioInputProcessing(2);
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
processingTrack->ConnectDeviceInput(deviceId, listener,
PRINCIPAL_HANDLE_NONE);
MediaEnginePrefs settings;
settings.mChannels = channelCount;
settings.mAgcOn = true; // Turn off pass-through.
// AGC1 Mode 0 interferes with AudioVerifier's frequency estimation
// through zero-crossing counts.
settings.mAgc2Forced = true;
QueueApplySettings(processingTrack, listener, settings);
return graph->NotifyWhenDeviceStarted(nullptr);
});
@ -1489,7 +1493,8 @@ TEST(TestAudioTrackGraph, AudioProcessingTrackDisabling)
port = outputTrack->AllocateInputPort(processingTrack);
/* Primary graph: Open Audio Input through SourceMediaTrack */
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(processingTrack, listener, true));
processingTrack->SetInputProcessing(listener);
processingTrack->ConnectDeviceInput(deviceId, listener,
PRINCIPAL_HANDLE_NONE);
@ -1597,7 +1602,8 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(2);
track1->SetInputProcessing(listener1);
QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track1, listener1, true));
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
@ -1618,7 +1624,8 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(1);
track2->SetInputProcessing(listener2);
QueueExpectIsPassThrough(track2, listener2);
track2->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track2, listener2, true));
track2->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track2, listener2));
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
@ -1635,7 +1642,7 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack> aTrack,
const RefPtr<AudioInputProcessing>& aListener,
RefPtr<SmartMockCubebStream>& aStream,
int32_t aChannelCount) {
uint32_t aChannelCount) {
bool destroyed = false;
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
AbstractThread::GetCurrent(),
@ -1650,9 +1657,11 @@ TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)
newStream = aCreated;
});
MediaEnginePrefs settings;
settings.mChannels = aChannelCount;
QueueApplySettings(aTrack, aListener, settings);
DispatchFunction([&] {
aTrack->GraphImpl()->AppendMessage(
MakeUnique<SetRequestedInputChannelCount>(aTrack, *aTrack->DeviceId(),
aListener, aChannelCount));
});
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
"TEST(TestAudioTrackGraph, SetRequestedInputChannelCount)"_ns,
@ -1724,12 +1733,14 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
auto setNewChannelCount = [&](const RefPtr<AudioProcessingTrack>& aTrack,
const RefPtr<AudioInputProcessing>& aListener,
RefPtr<SmartMockCubebStream>& aStream,
int32_t aChannelCount) {
uint32_t aChannelCount) {
ASSERT_TRUE(!!aTrack);
ASSERT_TRUE(!!aListener);
ASSERT_TRUE(!!aStream);
ASSERT_TRUE(aStream->mHasInput);
ASSERT_NE(aChannelCount, 0);
ASSERT_NE(aChannelCount, 0U);
const CubebUtils::AudioDeviceID device = *aTrack->DeviceId();
bool destroyed = false;
MediaEventListener destroyListener = cubeb->StreamDestroyEvent().Connect(
@ -1745,9 +1756,11 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
newStream = aCreated;
});
MediaEnginePrefs settings;
settings.mChannels = aChannelCount;
QueueApplySettings(aTrack, aListener, settings);
DispatchFunction([&] {
aTrack->GraphImpl()->AppendMessage(
MakeUnique<SetRequestedInputChannelCount>(aTrack, device, aListener,
aChannelCount));
});
SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>(
"TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged) #1"_ns,
@ -1788,7 +1801,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
aTrack = AudioProcessingTrack::Create(graph);
aListener = new AudioInputProcessing(aChannelCount);
aTrack->SetInputProcessing(aListener);
QueueExpectIsPassThrough(aTrack, aListener);
aTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(aTrack, aListener, true));
aTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(aTrack, aListener));
@ -1822,7 +1836,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
track1 = AudioProcessingTrack::Create(graph);
listener1 = new AudioInputProcessing(1);
track1->SetInputProcessing(listener1);
QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track1, listener1, true));
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(nativeDevice, listener1, PRINCIPAL_HANDLE_NONE);
@ -1865,7 +1880,8 @@ TEST(TestAudioTrackGraph, RestartAudioIfProcessingMaxChannelCountChanged)
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
track3->SetInputProcessing(listener3);
QueueExpectIsPassThrough(track3, listener3);
track3->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track3, listener3, true));
track3->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track3, listener3));
track3->ConnectDeviceInput(nonNativeDevice, listener3,
@ -1983,13 +1999,12 @@ TEST(TestAudioTrackGraph, SetInputChannelCountBeforeAudioCallbackDriver)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(track, listener);
track->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track, listener, true));
track->SetInputProcessing(listener);
MediaEnginePrefs settings;
settings.mChannels = 1;
QueueApplySettings(track, listener, settings);
track->GraphImpl()->AppendMessage(
MakeUnique<SetRequestedInputChannelCount>(track, deviceId, listener,
1));
track->GraphImpl()->AppendMessage(
MakeUnique<GuardMessage>(track, std::move(h)));
});
@ -2050,7 +2065,8 @@ TEST(TestAudioTrackGraph, StartAudioDeviceBeforeStartingAudioProcessing)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(track, listener);
track->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track, listener, true));
track->SetInputProcessing(listener);
// Start audio device without starting audio processing.
track->ConnectDeviceInput(deviceId, listener, PRINCIPAL_HANDLE_NONE);
@ -2115,7 +2131,8 @@ TEST(TestAudioTrackGraph, StopAudioProcessingBeforeStoppingAudioDevice)
DispatchFunction([&] {
track = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(track, listener);
track->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track, listener, true));
track->SetInputProcessing(listener);
track->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track, listener));
@ -2250,7 +2267,8 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track1 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener1 = new AudioInputProcessing(1);
track1->SetInputProcessing(listener1);
QueueExpectIsPassThrough(track1, listener1);
track1->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track1, listener1, true));
track1->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track1, listener1));
track1->ConnectDeviceInput(device1, listener1, PRINCIPAL_HANDLE_NONE);
@ -2273,7 +2291,8 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track2 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener2 = new AudioInputProcessing(2);
track2->SetInputProcessing(listener2);
QueueExpectIsPassThrough(track2, listener2);
track2->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track2, listener2, true));
track2->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track2, listener2));
track2->ConnectDeviceInput(device2, listener2, PRINCIPAL_HANDLE_NONE);
@ -2292,7 +2311,8 @@ TEST(TestAudioTrackGraph, SwitchNativeAudioProcessingTrack)
RefPtr<AudioProcessingTrack> track3 = AudioProcessingTrack::Create(graph);
RefPtr<AudioInputProcessing> listener3 = new AudioInputProcessing(1);
track3->SetInputProcessing(listener3);
QueueExpectIsPassThrough(track3, listener3);
track3->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(track3, listener3, true));
track3->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(track3, listener3));
track3->ConnectDeviceInput(device3, listener3, PRINCIPAL_HANDLE_NONE);
@ -2397,7 +2417,8 @@ void TestCrossGraphPort(uint32_t aInputRate, uint32_t aOutputRate,
/* Primary graph: Create input track and open it */
processingTrack = AudioProcessingTrack::Create(primary);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(processingTrack, listener, true));
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));
@ -2618,7 +2639,8 @@ TEST(TestAudioTrackGraph, SecondaryOutputDevice)
/* Create an input track and connect it to a device */
processingTrack = AudioProcessingTrack::Create(graph);
listener = new AudioInputProcessing(2);
QueueExpectIsPassThrough(processingTrack, listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<SetPassThrough>(processingTrack, listener, true));
processingTrack->SetInputProcessing(listener);
processingTrack->GraphImpl()->AppendMessage(
MakeUnique<StartInputProcessing>(processingTrack, listener));

View File

@ -398,35 +398,39 @@ void MediaEngineWebRTCMicrophoneSource::GetSettings(
}
AudioInputProcessing::AudioInputProcessing(uint32_t aMaxChannelCount)
: mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
: mAudioProcessing(AudioProcessingBuilder().Create().release()),
mRequestedInputChannelCount(aMaxChannelCount),
mSkipProcessing(false),
mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100),
mEnabled(false),
mEnded(false),
mPacketCount(0) {
mSettings.mChannels = static_cast<int32_t>(std::min<uint32_t>(
std::numeric_limits<int32_t>::max(), aMaxChannelCount));
}
mPacketCount(0) {}
void AudioInputProcessing::Disconnect(MediaTrackGraph* aGraph) {
// This method is just for asserts.
aGraph->AssertOnGraphThread();
}
bool AudioInputProcessing::IsPassThrough(MediaTrackGraph* aGraph) const {
bool AudioInputProcessing::PassThrough(MediaTrackGraph* aGraph) const {
aGraph->AssertOnGraphThread();
// The high-pass filter is not taken into account when activating the
// pass through, since it's not controllable from content.
return !(mSettings.mAecOn || mSettings.mAgcOn || mSettings.mNoiseOn);
return mSkipProcessing;
}
void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
void AudioInputProcessing::SetPassThrough(MediaTrackGraph* aGraph,
bool aPassThrough) {
aGraph->AssertOnGraphThread();
if (aPassThrough == mSkipProcessing) {
return;
}
mSkipProcessing = aPassThrough;
if (!mEnabled) {
MOZ_ASSERT(!mPacketizerInput);
return;
}
if (IsPassThrough(aGraph)) {
if (aPassThrough) {
// Switching to pass-through. Clear state so that it doesn't affect any
// future processing, if re-enabled.
ResetAudioProcessing(aGraph);
@ -438,11 +442,14 @@ void AudioInputProcessing::PassThroughChanged(MediaTrackGraph* aGraph) {
}
uint32_t AudioInputProcessing::GetRequestedInputChannelCount() {
return mSettings.mChannels;
return mRequestedInputChannelCount;
}
void AudioInputProcessing::RequestedInputChannelCountChanged(
MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId) {
void AudioInputProcessing::SetRequestedInputChannelCount(
MediaTrackGraph* aGraph, CubebUtils::AudioDeviceID aDeviceId,
uint32_t aRequestedInputChannelCount) {
mRequestedInputChannelCount = aRequestedInputChannelCount;
aGraph->ReevaluateInputDevice(aDeviceId);
}
@ -454,6 +461,10 @@ void AudioInputProcessing::Start(MediaTrackGraph* aGraph) {
}
mEnabled = true;
if (mSkipProcessing) {
return;
}
MOZ_ASSERT(!mPacketizerInput);
}
@ -466,7 +477,7 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
mEnabled = false;
if (IsPassThrough(aGraph)) {
if (mSkipProcessing) {
return;
}
@ -584,11 +595,10 @@ void AudioInputProcessing::Stop(MediaTrackGraph* aGraph) {
//
// The D(N) frames of data are just forwarded from input to output without any
// processing
void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
GraphTime aFrom, GraphTime aTo,
AudioSegment* aInput,
void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
GraphTime aTo, AudioSegment* aInput,
AudioSegment* aOutput) {
aTrack->AssertOnGraphThread();
aGraph->AssertOnGraphThread();
MOZ_ASSERT(aFrom <= aTo);
MOZ_ASSERT(!mEnded);
@ -597,11 +607,10 @@ void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
return;
}
MediaTrackGraph* graph = aTrack->Graph();
if (!mEnabled) {
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Filling %" PRId64
" frames of silence to output (disabled)",
graph, graph->CurrentDriver(), this, need);
aGraph, aGraph->CurrentDriver(), this, need);
aOutput->AppendNullData(need);
return;
}
@ -609,20 +618,20 @@ void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
MOZ_ASSERT(aInput->GetDuration() == need,
"Wrong data length from input port source");
if (IsPassThrough(graph)) {
if (PassThrough(aGraph)) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Forwarding %" PRId64
" frames of input data to output directly (PassThrough)",
graph, graph->CurrentDriver(), this, aInput->GetDuration());
aGraph, aGraph->CurrentDriver(), this, aInput->GetDuration());
aOutput->AppendSegment(aInput);
return;
}
// If the requested input channel count is updated, create a new
// packetizer. No need to change the pre-buffering since the rate is always
// the same. The frames left in the packetizer would be replaced by null
// data and then transferred to mSegment.
EnsurePacketizer(aTrack);
// If mRequestedInputChannelCount is updated, create a new packetizer. No
// need to change the pre-buffering since the rate is always the same. The
// frames left in the packetizer would be replaced by null data and then
// transferred to mSegment.
EnsurePacketizer(aGraph, mRequestedInputChannelCount);
// Preconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@ -634,10 +643,10 @@ void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
MOZ_ASSERT(mSegment.GetDuration() >= 1);
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
PacketizeAndProcess(aTrack, *aInput);
PacketizeAndProcess(aGraph, *aInput);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p Buffer has %" PRId64
" frames of data now, after packetizing and processing",
graph, graph->CurrentDriver(), this, mSegment.GetDuration());
aGraph, aGraph->CurrentDriver(), this, mSegment.GetDuration());
// By setting pre-buffering to the number of frames of one packet, and
// because the maximum number of frames stuck in the packetizer before
@ -648,7 +657,8 @@ void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
mSegment.RemoveLeading(need);
LOG_FRAME("(Graph %p, Driver %p) AudioInputProcessing %p moving %" PRId64
" frames of data to output, leaving %" PRId64 " frames in buffer",
graph, graph->CurrentDriver(), this, need, mSegment.GetDuration());
aGraph, aGraph->CurrentDriver(), this, need,
mSegment.GetDuration());
// Postconditions of the audio-processing logic.
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
@ -658,16 +668,16 @@ void AudioInputProcessing::Process(AudioProcessingTrack* aTrack,
MOZ_ASSERT(mSegment.GetDuration() <= mPacketizerInput->mPacketSize);
}
void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
const AudioChunk& aChunk) {
MOZ_ASSERT(aChunk.ChannelCount() > 0);
aTrack->AssertOnGraphThread();
aGraph->AssertOnGraphThread();
if (!mEnabled || IsPassThrough(aTrack->Graph())) {
if (!mEnabled || PassThrough(aGraph)) {
return;
}
TrackRate sampleRate = aTrack->mSampleRate;
TrackRate sampleRate = aGraph->GraphRate();
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
// Downmix from aChannels to MAX_CHANNELS if needed.
uint32_t channelCount =
@ -705,7 +715,6 @@ void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
if (mOutputBufferFrameCount == framesPerPacket) {
// Have a complete packet. Analyze it.
EnsureAudioProcessing(aTrack);
for (uint32_t channel = 0; channel < channelCount; channel++) {
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
}
@ -722,15 +731,14 @@ void AudioInputProcessing::ProcessOutputData(AudioProcessingTrack* aTrack,
}
// Only called if we're not in passthrough mode
void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraph* aGraph,
const AudioSegment& aSegment) {
MediaTrackGraph* graph = aTrack->Graph();
MOZ_ASSERT(!IsPassThrough(graph),
MOZ_ASSERT(!PassThrough(aGraph),
"This should be bypassed when in PassThrough mode.");
MOZ_ASSERT(mEnabled);
MOZ_ASSERT(mPacketizerInput);
MOZ_ASSERT(mPacketizerInput->mPacketSize ==
GetPacketSize(aTrack->mSampleRate));
GetPacketSize(aGraph->GraphRate()));
// Calculate number of the pending frames in mChunksInPacketizer.
auto pendingFrames = [&]() {
@ -772,7 +780,7 @@ void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Packetizing %zu frames. "
"Packetizer has %u frames (enough for %u packets) now",
graph, graph->CurrentDriver(), this, frameCount,
aGraph, aGraph->CurrentDriver(), this, frameCount,
mPacketizerInput->FramesAvailable(),
mPacketizerInput->PacketsAvailable());
@ -830,10 +838,9 @@ void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
deinterleavedPacketizedInputDataChannelPointers.Elements());
}
StreamConfig inputConfig(aTrack->mSampleRate, channelCountInput);
StreamConfig inputConfig(aGraph->GraphRate(), channelCountInput);
StreamConfig outputConfig = inputConfig;
EnsureAudioProcessing(aTrack);
// Bug 1404965: Get the right delay here, it saves some work down the line.
mAudioProcessing->set_stream_delay_ms(0);
@ -939,7 +946,7 @@ void AudioInputProcessing::PacketizeAndProcess(AudioProcessingTrack* aTrack,
"(Graph %p, Driver %p) AudioInputProcessing %p Appending %u frames of "
"packetized audio, leaving %u frames in packetizer (%" PRId64
" frames in mChunksInPacketizer)",
graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize,
mPacketizerInput->FramesAvailable(), pendingFrames());
// Postcondition of the Principal-labelling logic.
@ -952,9 +959,7 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
// Reset some processing
if (mAudioProcessing) {
mAudioProcessing->Initialize();
}
mAudioProcessing->Initialize();
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p Reinitializing audio "
"processing",
@ -966,22 +971,13 @@ void AudioInputProcessing::ApplySettings(MediaTrackGraph* aGraph,
const MediaEnginePrefs& aSettings) {
TRACE("AudioInputProcessing::ApplySettings");
aGraph->AssertOnGraphThread();
// Read previous state from mSettings.
uint32_t oldChannelCount = GetRequestedInputChannelCount();
bool wasPassThrough = IsPassThrough(aGraph);
mSettings = aSettings;
if (mAudioProcessing) {
mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
}
if (oldChannelCount != GetRequestedInputChannelCount()) {
RequestedInputChannelCountChanged(aGraph, aDeviceID);
}
if (wasPassThrough != IsPassThrough(aGraph)) {
PassThroughChanged(aGraph);
}
mAudioProcessing->ApplyConfig(ConfigForPrefs(aSettings));
SetRequestedInputChannelCount(aGraph, aDeviceID, aSettings.mChannels);
// The high-pass filter is not taken into account when activating the
// pass through, since it's not controllable from content.
bool passThrough =
!(aSettings.mAecOn || aSettings.mAgcOn || aSettings.mNoiseOn);
SetPassThrough(aGraph, passThrough);
}
void AudioInputProcessing::End() {
@ -995,15 +991,14 @@ TrackTime AudioInputProcessing::NumBufferedFrames(
return mSegment.GetDuration();
}
void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
aTrack->AssertOnGraphThread();
void AudioInputProcessing::EnsurePacketizer(MediaTrackGraph* aGraph,
uint32_t aChannels) {
aGraph->AssertOnGraphThread();
MOZ_ASSERT(aChannels > 0);
MOZ_ASSERT(mEnabled);
MediaTrackGraph* graph = aTrack->Graph();
MOZ_ASSERT(!IsPassThrough(graph));
MOZ_ASSERT(!mSkipProcessing);
uint32_t channelCount = GetRequestedInputChannelCount();
MOZ_ASSERT(channelCount > 0);
if (mPacketizerInput && mPacketizerInput->mChannels == channelCount) {
if (mPacketizerInput && mPacketizerInput->mChannels == aChannels) {
return;
}
@ -1011,7 +1006,7 @@ void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
// need to change pre-buffering since the packet size is the same as the old
// one, since the rate is a constant.
MOZ_ASSERT_IF(mPacketizerInput, mPacketizerInput->mPacketSize ==
GetPacketSize(aTrack->mSampleRate));
GetPacketSize(aGraph->GraphRate()));
bool needPreBuffering = !mPacketizerInput;
if (mPacketizerInput) {
const TrackTime numBufferedFrames =
@ -1021,36 +1016,24 @@ void AudioInputProcessing::EnsurePacketizer(AudioProcessingTrack* aTrack) {
mChunksInPacketizer.clear();
}
mPacketizerInput.emplace(GetPacketSize(aTrack->mSampleRate), channelCount);
mPacketizerInput.emplace(GetPacketSize(aGraph->GraphRate()), aChannels);
if (needPreBuffering) {
LOG_FRAME(
"(Graph %p, Driver %p) AudioInputProcessing %p: Adding %u frames of "
"silence as pre-buffering",
graph, graph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
aGraph, aGraph->CurrentDriver(), this, mPacketizerInput->mPacketSize);
AudioSegment buffering;
buffering.AppendNullData(
static_cast<TrackTime>(mPacketizerInput->mPacketSize));
PacketizeAndProcess(aTrack, buffering);
}
}
void AudioInputProcessing::EnsureAudioProcessing(AudioProcessingTrack* aTrack) {
aTrack->AssertOnGraphThread();
if (!mAudioProcessing) {
TRACE("AudioProcessing creation");
mAudioProcessing.reset(AudioProcessingBuilder()
.SetConfig(ConfigForPrefs(mSettings))
.Create()
.release());
PacketizeAndProcess(aGraph, buffering);
}
}
void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
aGraph->AssertOnGraphThread();
MOZ_ASSERT(IsPassThrough(aGraph) || !mEnabled);
MOZ_ASSERT(mSkipProcessing || !mEnabled);
MOZ_ASSERT(mPacketizerInput);
LOG_FRAME(
@ -1060,9 +1043,7 @@ void AudioInputProcessing::ResetAudioProcessing(MediaTrackGraph* aGraph) {
// Reset AudioProcessing so that if we resume processing in the future it
// doesn't depend on old state.
if (mAudioProcessing) {
mAudioProcessing->Initialize();
}
mAudioProcessing->Initialize();
MOZ_ASSERT(static_cast<uint32_t>(mSegment.GetDuration()) +
mPacketizerInput->FramesAvailable() ==
@ -1140,7 +1121,7 @@ void AudioProcessingTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
MOZ_ASSERT(mInputs.Length() == 1);
AudioSegment data;
DeviceInputConsumerTrack::GetInputSourceData(data, aFrom, aTo);
mInputProcessing->Process(this, aFrom, aTo, &data,
mInputProcessing->Process(Graph(), aFrom, aTo, &data,
GetData<AudioSegment>());
}
MOZ_ASSERT(TrackTimeToGraphTime(GetEnd()) == aTo);
@ -1156,7 +1137,7 @@ void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
AssertOnGraphThread();
if (mInputProcessing) {
mInputProcessing->ProcessOutputData(this, aChunk);
mInputProcessing->ProcessOutputData(aGraph, aChunk);
}
}

View File

@ -108,16 +108,15 @@ class MediaEngineWebRTCMicrophoneSource : public MediaEngineSource {
class AudioInputProcessing : public AudioDataListener {
public:
explicit AudioInputProcessing(uint32_t aMaxChannelCount);
void Process(AudioProcessingTrack* aTrack, GraphTime aFrom, GraphTime aTo,
void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
AudioSegment* aInput, AudioSegment* aOutput);
void ProcessOutputData(AudioProcessingTrack* aTrack,
const AudioChunk& aChunk);
void ProcessOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
// If we're passing data directly without AEC or any other process, this
// means that all voice-processing has been disabled intentionaly. In this
// case, consider that the device is not used for voice input.
return !IsPassThrough(aGraph);
return !PassThrough(aGraph);
}
void Start(MediaTrackGraph* aGraph);
@ -131,13 +130,17 @@ class AudioInputProcessing : public AudioDataListener {
void Disconnect(MediaTrackGraph* aGraph) override;
void PacketizeAndProcess(AudioProcessingTrack* aTrack,
void PacketizeAndProcess(MediaTrackGraph* aGraph,
const AudioSegment& aSegment);
void SetPassThrough(MediaTrackGraph* aGraph, bool aPassThrough);
uint32_t GetRequestedInputChannelCount();
// This is true when all processing is disabled, in which case we can skip
void SetRequestedInputChannelCount(MediaTrackGraph* aGraph,
CubebUtils::AudioDeviceID aDeviceId,
uint32_t aRequestedInputChannelCount);
// This is true when all processing is disabled, we can skip
// packetization, resampling and other processing passes.
bool IsPassThrough(MediaTrackGraph* aGraph) const;
bool PassThrough(MediaTrackGraph* aGraph) const;
// This allow changing the APM options, enabling or disabling processing
// steps. The settings get applied the next time we're about to process input
@ -161,11 +164,7 @@ class AudioInputProcessing : public AudioDataListener {
~AudioInputProcessing() = default;
webrtc::AudioProcessing::Config ConfigForPrefs(
const MediaEnginePrefs& aPrefs);
void PassThroughChanged(MediaTrackGraph* aGraph);
void RequestedInputChannelCountChanged(MediaTrackGraph* aGraph,
CubebUtils::AudioDeviceID aDeviceId);
void EnsurePacketizer(AudioProcessingTrack* aTrack);
void EnsureAudioProcessing(AudioProcessingTrack* aTrack);
void EnsurePacketizer(MediaTrackGraph* aGraph, uint32_t aChannels);
void ResetAudioProcessing(MediaTrackGraph* aGraph);
PrincipalHandle GetCheckedPrincipal(const AudioSegment& aSegment);
// This implements the processing algoritm to apply to the input (e.g. a
@ -173,13 +172,17 @@ class AudioInputProcessing : public AudioDataListener {
// class only accepts audio chunks of 10ms. It has two inputs and one output:
// it is fed the speaker data and the microphone data. It outputs processed
// input data.
UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
// Packetizer to be able to feed 10ms packets to the input side of
// mAudioProcessing. Not used if the processing is bypassed.
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
// The current settings from about:config preferences and content-provided
// constraints.
MediaEnginePrefs mSettings;
// The number of channels asked for by content, after clamping to the range of
// legal channel count for this particular device.
uint32_t mRequestedInputChannelCount;
// mSkipProcessing is true if none of the processing passes are enabled,
// because of prefs or constraints. This allows simply copying the audio into
// the MTG, skipping resampling and the whole webrtc.org code.
bool mSkipProcessing;
// Buffer for up to one 10ms packet of planar mixed audio output for the
// reverse-stream (speaker data) of mAudioProcessing AEC.
// Length is packet size * channel count, regardless of how many frames are