mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-12-01 17:23:59 +00:00
Bug 1543622 - Make number of channels out param of GetAudioFrame; r=pehrsons
The number of channels is available in mAudioFrame in GetAudioFrame so there is no reason to calculate it after the fact in MediaPipeline. Differential Revision: https://phabricator.services.mozilla.com/D50934 --HG-- extra : moz-landing-system : lando
This commit is contained in:
parent
5d8c10a424
commit
1f3dd67cb9
@ -197,7 +197,8 @@ void AudioSendAndReceive::GenerateAndReadSamples() {
|
||||
auto audioInput = mozilla::MakeUnique<int16_t[]>(PLAYOUT_SAMPLE_LENGTH);
|
||||
auto audioOutput = mozilla::MakeUnique<int16_t[]>(PLAYOUT_SAMPLE_LENGTH);
|
||||
short* inbuf;
|
||||
int sampleLengthDecoded = 0;
|
||||
size_t numChannels = 0;
|
||||
size_t sampleLengthDecoded = 0;
|
||||
unsigned int SAMPLES = (PLAYOUT_SAMPLE_FREQUENCY / 100); // 10 milliseconds
|
||||
int CHANNELS = 1; // mono audio
|
||||
int sampleLengthInBytes = sizeof(int16_t) * PLAYOUT_SAMPLE_LENGTH;
|
||||
@ -246,7 +247,7 @@ void AudioSendAndReceive::GenerateAndReadSamples() {
|
||||
|
||||
PR_Sleep(PR_MillisecondsToInterval(10));
|
||||
mOtherSession->GetAudioFrame(audioOutput.get(), PLAYOUT_SAMPLE_FREQUENCY,
|
||||
10, sampleLengthDecoded);
|
||||
10, numChannels, sampleLengthDecoded);
|
||||
if (sampleLengthDecoded == 0) {
|
||||
cerr << " Zero length Sample " << endl;
|
||||
}
|
||||
|
@ -583,7 +583,8 @@ MediaConduitErrorCode WebrtcAudioConduit::SendAudioFrame(
|
||||
MediaConduitErrorCode WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
|
||||
int32_t samplingFreqHz,
|
||||
int32_t capture_delay,
|
||||
int& lengthSamples) {
|
||||
size_t& numChannels,
|
||||
size_t& lengthSamples) {
|
||||
CSFLogDebug(LOGTAG, "%s ", __FUNCTION__);
|
||||
|
||||
// validate params
|
||||
@ -614,16 +615,23 @@ MediaConduitErrorCode WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
|
||||
return kMediaConduitSessionNotInited;
|
||||
}
|
||||
|
||||
int lengthSamplesAllowed = lengthSamples;
|
||||
size_t lengthSamplesAllowed = lengthSamples;
|
||||
lengthSamples = 0; // output paramter
|
||||
|
||||
mRecvChannelProxy->GetAudioFrameWithInfo(samplingFreqHz, &mAudioFrame);
|
||||
numChannels = mAudioFrame.num_channels_;
|
||||
|
||||
if (numChannels == 0) {
|
||||
CSFLogError(LOGTAG, "%s Audio frame has zero channels", __FUNCTION__);
|
||||
return kMediaConduitPlayoutError;
|
||||
}
|
||||
|
||||
// XXX Annoying, have to copy to our buffers -- refactor?
|
||||
lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
|
||||
MOZ_RELEASE_ASSERT(lengthSamples <= lengthSamplesAllowed);
|
||||
PodCopy(speechData, mAudioFrame.data(), lengthSamples);
|
||||
|
||||
CSFLogDebug(LOGTAG, "%s GetAudioFrame:Got samples: length %d ", __FUNCTION__,
|
||||
CSFLogDebug(LOGTAG, "%s GetAudioFrame:Got samples: length %zu ", __FUNCTION__,
|
||||
lengthSamples);
|
||||
return kMediaConduitNoError;
|
||||
}
|
||||
|
@ -137,6 +137,8 @@ class WebrtcAudioConduit : public AudioSessionConduit,
|
||||
* @param capture_delay [in]: Estimated Time between reading of the samples
|
||||
* to rendering/playback
|
||||
* @param lengthSamples [in]: Contain maximum length of speechData array.
|
||||
* @param numChannels [out]: Number of channels in the audio frame,
|
||||
* guaranteed to be non-zero.
|
||||
* @param lengthSamples [out]: Will contain length of the audio frame in
|
||||
* samples at return.
|
||||
* Ex: A value of 160 implies 160 samples each of
|
||||
@ -151,7 +153,8 @@ class WebrtcAudioConduit : public AudioSessionConduit,
|
||||
MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
|
||||
int32_t samplingFreqHz,
|
||||
int32_t capture_delay,
|
||||
int& lengthSamples) override;
|
||||
size_t& numChannels,
|
||||
size_t& lengthSamples) override;
|
||||
|
||||
/**
|
||||
* Webrtc transport implementation to send and receive RTP packet.
|
||||
|
@ -536,6 +536,8 @@ class AudioSessionConduit : public MediaSessionConduit {
|
||||
* Hertz (16000, 32000,..)
|
||||
* @param capture_delay [in]: Estimated Time between reading of the samples
|
||||
* to rendering/playback
|
||||
* @param numChannels [out]: Number of channels in the audio frame,
|
||||
* guaranteed to be non-zero.
|
||||
* @param lengthSamples [out]: Will contain length of the audio frame in
|
||||
* samples at return.
|
||||
* Ex: A value of 160 implies 160 samples each of
|
||||
@ -549,7 +551,8 @@ class AudioSessionConduit : public MediaSessionConduit {
|
||||
virtual MediaConduitErrorCode GetAudioFrame(int16_t speechData[],
|
||||
int32_t samplingFreqHz,
|
||||
int32_t capture_delay,
|
||||
int& lengthSamples) = 0;
|
||||
size_t& numChannels,
|
||||
size_t& lengthSamples) = 0;
|
||||
|
||||
/**
|
||||
* Checks if given sampling frequency is supported
|
||||
|
@ -1320,11 +1320,12 @@ class MediaPipelineReceiveAudio::PipelineListener
|
||||
// in the graph rate.
|
||||
|
||||
while (mPlayedTicks < aDesiredTime) {
|
||||
const int scratchBufferLength =
|
||||
constexpr size_t scratchBufferLength =
|
||||
AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t);
|
||||
int16_t scratchBuffer[scratchBufferLength];
|
||||
|
||||
int samplesLength = scratchBufferLength;
|
||||
size_t channelCount = 0;
|
||||
size_t samplesLength = scratchBufferLength;
|
||||
|
||||
// This fetches 10ms of data, either mono or stereo
|
||||
MediaConduitErrorCode err =
|
||||
@ -1332,7 +1333,7 @@ class MediaPipelineReceiveAudio::PipelineListener
|
||||
->GetAudioFrame(scratchBuffer, mRate,
|
||||
0, // TODO(ekr@rtfm.com): better estimate of
|
||||
// "capture" (really playout) delay
|
||||
samplesLength);
|
||||
channelCount, samplesLength);
|
||||
|
||||
if (err != kMediaConduitNoError) {
|
||||
// Insert silence on conduit/GIPS failure (extremely unlikely)
|
||||
@ -1341,6 +1342,7 @@ class MediaPipelineReceiveAudio::PipelineListener
|
||||
" (desired %" PRId64 " -> %f)",
|
||||
err, mPlayedTicks, aDesiredTime,
|
||||
mSource->TrackTimeToSeconds(aDesiredTime)));
|
||||
channelCount = 1;
|
||||
// if this is not enough we'll loop and provide more
|
||||
samplesLength = samplesPer10ms;
|
||||
PodArrayZero(scratchBuffer);
|
||||
@ -1349,16 +1351,12 @@ class MediaPipelineReceiveAudio::PipelineListener
|
||||
MOZ_RELEASE_ASSERT(samplesLength <= scratchBufferLength);
|
||||
|
||||
MOZ_LOG(gMediaPipelineLog, LogLevel::Debug,
|
||||
("Audio conduit returned buffer of length %u", samplesLength));
|
||||
("Audio conduit returned buffer of length %zu", samplesLength));
|
||||
|
||||
RefPtr<SharedBuffer> samples =
|
||||
SharedBuffer::Create(samplesLength * sizeof(uint16_t));
|
||||
int16_t* samplesData = static_cast<int16_t*>(samples->Data());
|
||||
AudioSegment segment;
|
||||
// We derive the number of channels of the stream from the number of
|
||||
// samples the AudioConduit gives us, considering it gives us packets of
|
||||
// 10ms and we know the rate.
|
||||
uint32_t channelCount = samplesLength / samplesPer10ms;
|
||||
AutoTArray<int16_t*, 2> channels;
|
||||
AutoTArray<const int16_t*, 2> outputChannels;
|
||||
size_t frames = samplesLength / channelCount;
|
||||
|
Loading…
Reference in New Issue
Block a user