Bug 901633 - Part 2 - Make AudioChannelFormat and AudioSegment more generic. r=roc

--HG--
extra : rebase_source : 2db81db6341466607917070eaf9a9a9d66a04059
This commit is contained in:
Paul Adenot 2015-07-29 18:24:15 +02:00
parent a7ae94ef7e
commit c135b555a7
4 changed files with 213 additions and 235 deletions

View File

@ -15,79 +15,4 @@ GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2)
return std::max(aChannels1, aChannels2);
}
/**
* UpMixMatrix represents a conversion matrix by exploiting the fact that
* each output channel comes from at most one input channel.
*/
struct UpMixMatrix {
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
};
static const UpMixMatrix
gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Upmixes from mono
{ { 0, 0 } },
{ { 0, IGNORE, IGNORE } },
{ { 0, 0, IGNORE, IGNORE } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE } },
{ { IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE } },
// Upmixes from stereo
{ { 0, 1, IGNORE } },
{ { 0, 1, IGNORE, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, IGNORE, IGNORE } },
// Upmixes from 3-channel
{ { 0, 1, 2, IGNORE } },
{ { 0, 1, 2, IGNORE, IGNORE } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE } },
// Upmixes from quad
{ { 0, 1, 2, 3, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, 2, 3 } },
// Upmixes from 5-channel
{ { 0, 1, 2, 3, 4, IGNORE } }
};
void
AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
const void* aZeroChannel)
{
uint32_t inputChannelCount = aChannelArray->Length();
uint32_t outputChannelCount =
GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
NS_ASSERTION(outputChannelCount > inputChannelCount,
"No up-mix needed");
MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
aChannelArray->SetLength(outputChannelCount);
if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS &&
outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) {
const UpMixMatrix& m = gUpMixMatrices[
gMixingMatrixIndexByChannels[inputChannelCount - 1] +
outputChannelCount - inputChannelCount - 1];
const void* outputChannels[CUSTOM_CHANNEL_LAYOUTS];
for (uint32_t i = 0; i < outputChannelCount; ++i) {
uint8_t channelIndex = m.mInputDestination[i];
if (channelIndex == IGNORE) {
outputChannels[i] = aZeroChannel;
} else {
outputChannels[i] = aChannelArray->ElementAt(channelIndex);
}
}
for (uint32_t i = 0; i < outputChannelCount; ++i) {
aChannelArray->ElementAt(i) = outputChannels[i];
}
return;
}
for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) {
aChannelArray->ElementAt(i) = aZeroChannel;
}
}
} // namespace mozilla

View File

@ -58,24 +58,6 @@ const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
uint32_t
GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2);
/**
* Given an array of input channel data, and an output channel count,
* replaces the array with an array of upmixed channels.
* This shuffles the array and may set some channel buffers to aZeroChannel.
* Don't call this with input count >= output count.
* This may return *more* channels than requested. In that case, downmixing
* is required to to get to aOutputChannelCount. (This is how we handle
* odd cases like 3 -> 4 upmixing.)
* If aChannelArray.Length() was the input to one of a series of
* GetAudioChannelsSuperset calls resulting in aOutputChannelCount,
* no downmixing will be required.
*/
void
AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
const void* aZeroChannel);
/**
* DownMixMatrix represents a conversion matrix efficiently by exploiting the
* fact that each input channel contributes to at most one output channel,
@ -124,19 +106,19 @@ gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
* input count <= output count.
*/
template<typename T>
void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
T** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
void AudioChannelsDownMix(const nsTArray<const T*>& aChannelArray,
T** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
{
uint32_t inputChannelCount = aChannelArray.Length();
const void* const* inputChannels = aChannelArray.Elements();
const T* const* inputChannels = aChannelArray.Elements();
NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
if (inputChannelCount > 6) {
// Just drop the unknown channels.
for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(T));
PodCopy(aOutputChannels[o], inputChannels[o], aDuration);
}
return;
}
@ -153,8 +135,7 @@ void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
for (uint32_t s = 0; s < aDuration; ++s) {
// Reserve an extra junk channel at the end for the cases where we
// want an input channel to contribute to nothing
T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
memset(outputChannels, 0, sizeof(T)*(CUSTOM_CHANNEL_LAYOUTS));
T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1] = {0};
for (uint32_t c = 0; c < inputChannelCount; ++c) {
outputChannels[m.mInputDestination[c]] +=
m.mInputCoefficient[c]*(static_cast<const T*>(inputChannels[c]))[s];
@ -171,6 +152,94 @@ void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
}
}
/**
* UpMixMatrix represents a conversion matrix by exploiting the fact that
* each output channel comes from at most one input channel.
*/
struct UpMixMatrix {
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
};
static const UpMixMatrix
gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Upmixes from mono
{ { 0, 0 } },
{ { 0, IGNORE, IGNORE } },
{ { 0, 0, IGNORE, IGNORE } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE } },
{ { IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE } },
// Upmixes from stereo
{ { 0, 1, IGNORE } },
{ { 0, 1, IGNORE, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, IGNORE, IGNORE } },
// Upmixes from 3-channel
{ { 0, 1, 2, IGNORE } },
{ { 0, 1, 2, IGNORE, IGNORE } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE } },
// Upmixes from quad
{ { 0, 1, 2, 3, IGNORE } },
{ { 0, 1, IGNORE, IGNORE, 2, 3 } },
// Upmixes from 5-channel
{ { 0, 1, 2, 3, 4, IGNORE } }
};
/**
* Given an array of input channel data, and an output channel count,
* replaces the array with an array of upmixed channels.
* This shuffles the array and may set some channel buffers to aZeroChannel.
* Don't call this with input count >= output count.
* This may return *more* channels than requested. In that case, downmixing
* is required to to get to aOutputChannelCount. (This is how we handle
* odd cases like 3 -> 4 upmixing.)
* If aChannelArray.Length() was the input to one of a series of
* GetAudioChannelsSuperset calls resulting in aOutputChannelCount,
* no downmixing will be required.
*/
template<typename T>
void
AudioChannelsUpMix(nsTArray<const T*>* aChannelArray,
uint32_t aOutputChannelCount,
const T* aZeroChannel)
{
uint32_t inputChannelCount = aChannelArray->Length();
uint32_t outputChannelCount =
GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
NS_ASSERTION(outputChannelCount > inputChannelCount,
"No up-mix needed");
MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
aChannelArray->SetLength(outputChannelCount);
if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS &&
outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) {
const UpMixMatrix& m = gUpMixMatrices[
gMixingMatrixIndexByChannels[inputChannelCount - 1] +
outputChannelCount - inputChannelCount - 1];
const T* outputChannels[CUSTOM_CHANNEL_LAYOUTS];
for (uint32_t i = 0; i < outputChannelCount; ++i) {
uint8_t channelIndex = m.mInputDestination[i];
if (channelIndex == IGNORE) {
outputChannels[i] = aZeroChannel;
} else {
outputChannels[i] = aChannelArray->ElementAt(channelIndex);
}
}
for (uint32_t i = 0; i < outputChannelCount; ++i) {
aChannelArray->ElementAt(i) = outputChannels[i];
}
return;
}
for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) {
aChannelArray->ElementAt(i) = aZeroChannel;
}
}
} // namespace mozilla

View File

@ -5,7 +5,6 @@
#include "AudioSegment.h"
#include "AudioStream.h"
#include "AudioMixer.h"
#include "AudioChannelFormat.h"
#include "Latency.h"
@ -13,49 +12,18 @@
namespace mozilla {
template <class SrcT, class DestT>
static void
InterleaveAndConvertBuffer(const SrcT** aSourceChannels,
int32_t aLength, float aVolume,
int32_t aChannels,
DestT* aOutput)
const uint8_t SilentChannel::gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*SilentChannel::AUDIO_PROCESSING_FRAMES] = {0};
template<>
const float* SilentChannel::ZeroChannel<float>()
{
DestT* output = aOutput;
for (int32_t i = 0; i < aLength; ++i) {
for (int32_t channel = 0; channel < aChannels; ++channel) {
float v = AudioSampleToFloat(aSourceChannels[channel][i])*aVolume;
*output = FloatToAudioSample<DestT>(v);
++output;
}
}
return reinterpret_cast<const float*>(SilentChannel::gZeroChannel);
}
void
InterleaveAndConvertBuffer(const void** aSourceChannels,
AudioSampleFormat aSourceFormat,
int32_t aLength, float aVolume,
int32_t aChannels,
AudioDataValue* aOutput)
template<>
const int16_t* SilentChannel::ZeroChannel<int16_t>()
{
switch (aSourceFormat) {
case AUDIO_FORMAT_FLOAT32:
InterleaveAndConvertBuffer(reinterpret_cast<const float**>(aSourceChannels),
aLength,
aVolume,
aChannels,
aOutput);
break;
case AUDIO_FORMAT_S16:
InterleaveAndConvertBuffer(reinterpret_cast<const int16_t**>(aSourceChannels),
aLength,
aVolume,
aChannels,
aOutput);
break;
case AUDIO_FORMAT_SILENCE:
// nothing to do here.
break;
}
return reinterpret_cast<const int16_t*>(SilentChannel::gZeroChannel);
}
void
@ -66,54 +34,6 @@ AudioSegment::ApplyVolume(float aVolume)
}
}
static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */
static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES] = {0};
void
DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
AudioSampleFormat aSourceFormat, int32_t aDuration,
float aVolume, uint32_t aOutputChannels,
AudioDataValue* aOutput)
{
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixConversionBuffer;
nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixOutputBuffer;
channelData.SetLength(aChannelData.Length());
if (aSourceFormat != AUDIO_FORMAT_FLOAT32) {
NS_ASSERTION(aSourceFormat == AUDIO_FORMAT_S16, "unknown format");
downmixConversionBuffer.SetLength(aDuration*aChannelData.Length());
for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
float* conversionBuf = downmixConversionBuffer.Elements() + (i*aDuration);
const int16_t* sourceBuf = static_cast<const int16_t*>(aChannelData[i]);
for (uint32_t j = 0; j < (uint32_t)aDuration; ++j) {
conversionBuf[j] = AudioSampleToFloat(sourceBuf[j]);
}
channelData[i] = conversionBuf;
}
} else {
for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
channelData[i] = aChannelData[i];
}
}
downmixOutputBuffer.SetLength(aDuration*aOutputChannels);
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannelBuffers;
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> outputChannelData;
outputChannelBuffers.SetLength(aOutputChannels);
outputChannelData.SetLength(aOutputChannels);
for (uint32_t i = 0; i < (uint32_t)aOutputChannels; ++i) {
outputChannelData[i] = outputChannelBuffers[i] =
downmixOutputBuffer.Elements() + aDuration*i;
}
if (channelData.Length() > aOutputChannels) {
AudioChannelsDownMix(channelData, outputChannelBuffers.Elements(),
aOutputChannels, aDuration);
}
InterleaveAndConvertBuffer(outputChannelData.Elements(), AUDIO_FORMAT_FLOAT32,
aDuration, aVolume, aOutputChannels, aOutput);
}
void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate)
{
if (mChunks.IsEmpty()) {
@ -165,9 +85,9 @@ void
AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
uint32_t aSampleRate)
{
nsAutoTArray<AudioDataValue, AUDIO_PROCESSING_FRAMES* GUESS_AUDIO_CHANNELS>
nsAutoTArray<AudioDataValue, SilentChannel::AUDIO_PROCESSING_FRAMES* GUESS_AUDIO_CHANNELS>
buf;
nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channelData;
nsAutoTArray<const AudioDataValue*, GUESS_AUDIO_CHANNELS> channelData;
uint32_t offsetSamples = 0;
uint32_t duration = GetDuration();
@ -197,11 +117,11 @@ AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
// desired input and output channels.
channelData.SetLength(c.mChannelData.Length());
for (uint32_t i = 0; i < channelData.Length(); ++i) {
channelData[i] = c.mChannelData[i];
channelData[i] = static_cast<const AudioDataValue*>(c.mChannelData[i]);
}
if (channelData.Length() < aOutputChannels) {
// Up-mix.
AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
AudioChannelsUpMix(&channelData, aOutputChannels, SilentChannel::ZeroChannel<AudioDataValue>());
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
@ -246,9 +166,8 @@ AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
void
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
{
nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
// Offset in the buffer that will end up sent to the AudioStream, in samples.
nsAutoTArray<AudioDataValue,SilentChannel::AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
// Offset in the buffer that will be written to the mixer, in samples.
uint32_t offset = 0;
if (GetDuration() <= 0) {
@ -262,39 +181,23 @@ AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
AudioChunk& c = *ci;
uint32_t frames = c.mDuration;
// If we have written data in the past, or we have real (non-silent) data
// to write, we can proceed. Otherwise, it means we just started the
// AudioStream, and we don't have real data to write to it (just silence).
// To avoid overbuffering in the AudioStream, we simply drop the silence,
// here. The stream will underrun and output silence anyways.
if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
channelData.SetLength(c.mChannelData.Length());
for (uint32_t i = 0; i < channelData.Length(); ++i) {
channelData[i] = c.mChannelData[i];
}
if (channelData.Length() < aOutputChannels) {
// Up-mix. Note that this might actually make channelData have more
// than aOutputChannels temporarily.
AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
}
if (channelData.Length() > aOutputChannels) {
// Down-mix.
DownmixAndInterleave(channelData, c.mBufferFormat, frames,
c.mVolume, aOutputChannels, buf.Elements() + offset);
} else {
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
frames, c.mVolume,
aOutputChannels,
buf.Elements() + offset);
}
} else {
// Assumes that a bit pattern of zeroes == 0.0f
memset(buf.Elements() + offset, 0, aOutputChannels * frames * sizeof(AudioDataValue));
switch (c.mBufferFormat) {
case AUDIO_FORMAT_S16:
WriteChunk<int16_t>(c, aOutputChannels, buf.Elements() + offset);
break;
case AUDIO_FORMAT_FLOAT32:
WriteChunk<float>(c, aOutputChannels, buf.Elements() + offset);
break;
case AUDIO_FORMAT_SILENCE:
// The mixer is expecting interleaved data, so this is ok.
PodZero(buf.Elements() + offset, c.mDuration * aOutputChannels);
break;
default:
MOZ_ASSERT(false, "Not handled");
}
offset += frames * aOutputChannels;
offset += c.mDuration * aOutputChannels;
#if !defined(MOZILLA_XPCOMRT_API)
if (!c.mTimeStamp.IsNull()) {

View File

@ -8,6 +8,7 @@
#include "MediaSegment.h"
#include "AudioSampleFormat.h"
#include "AudioChannelFormat.h"
#include "SharedBuffer.h"
#include "WebAudioUtils.h"
#ifdef MOZILLA_INTERNAL_API
@ -56,21 +57,66 @@ const int GUESS_AUDIO_CHANNELS = 2;
const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7;
const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS;
void InterleaveAndConvertBuffer(const void** aSourceChannels,
AudioSampleFormat aSourceFormat,
int32_t aLength, float aVolume,
int32_t aChannels,
AudioDataValue* aOutput);
template <class SrcT, class DestT>
static void
InterleaveAndConvertBuffer(const SrcT* const* aSourceChannels,
int32_t aLength, float aVolume,
int32_t aChannels,
DestT* aOutput)
{
DestT* output = aOutput;
for (int32_t i = 0; i < aLength; ++i) {
for (int32_t channel = 0; channel < aChannels; ++channel) {
float v = AudioSampleToFloat(aSourceChannels[channel][i])*aVolume;
*output = FloatToAudioSample<DestT>(v);
++output;
}
}
}
class SilentChannel
{
public:
static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */
static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES];
// We take advantage of the fact that zero in float and zero in int have the
// same all-zeros bit layout.
template<typename T>
static const T* ZeroChannel();
};
/**
* Given an array of input channels (aChannelData), downmix to aOutputChannels,
* interleave the channel data. A total of aOutputChannels*aDuration
* interleaved samples will be copied to a channel buffer in aOutput.
*/
void DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
AudioSampleFormat aSourceFormat, int32_t aDuration,
float aVolume, uint32_t aOutputChannels,
AudioDataValue* aOutput);
template <typename SrcT, typename DestT>
void
DownmixAndInterleave(const nsTArray<const SrcT*>& aChannelData,
int32_t aDuration, float aVolume, uint32_t aOutputChannels,
DestT* aOutput)
{
if (aChannelData.Length() == aOutputChannels) {
InterleaveAndConvertBuffer(aChannelData.Elements(),
aDuration, aVolume, aOutputChannels, aOutput);
} else {
nsAutoTArray<SrcT*,GUESS_AUDIO_CHANNELS> outputChannelData;
nsAutoTArray<SrcT, SilentChannel::AUDIO_PROCESSING_FRAMES * GUESS_AUDIO_CHANNELS> outputBuffers;
outputChannelData.SetLength(aOutputChannels);
outputBuffers.SetLength(aDuration * aOutputChannels);
for (uint32_t i = 0; i < aOutputChannels; i++) {
outputChannelData[i] = outputBuffers.Elements() + aDuration * i;
}
AudioChannelsDownMix(aChannelData,
outputChannelData.Elements(),
aOutputChannels,
aDuration);
InterleaveAndConvertBuffer(outputChannelData.Elements(),
aDuration, aVolume, aOutputChannels, aOutput);
}
}
/**
* An AudioChunk represents a multi-channel buffer of audio samples.
@ -190,6 +236,13 @@ struct AudioChunk {
return amount;
}
template<typename T>
const nsTArray<const T*>& ChannelData()
{
MOZ_ASSERT(AudioSampleTypeToFormat<T>::Format == mBufferFormat);
return *reinterpret_cast<nsTArray<const T*>*>(&mChannelData);
}
StreamTime mDuration; // in frames within the buffer
nsRefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes
nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null
@ -356,6 +409,34 @@ public:
}
};
template<typename SrcT>
void WriteChunk(AudioChunk& aChunk,
uint32_t aOutputChannels,
AudioDataValue* aOutputBuffer)
{
nsAutoTArray<const SrcT*,GUESS_AUDIO_CHANNELS> channelData;
channelData = aChunk.ChannelData<SrcT>();
if (channelData.Length() < aOutputChannels) {
// Up-mix. Note that this might actually make channelData have more
// than aOutputChannels temporarily.
AudioChannelsUpMix(&channelData, aOutputChannels, SilentChannel::ZeroChannel<SrcT>());
}
if (channelData.Length() > aOutputChannels) {
// Down-mix.
DownmixAndInterleave(channelData, aChunk.mDuration,
aChunk.mVolume, aOutputChannels, aOutputBuffer);
} else {
InterleaveAndConvertBuffer(channelData.Elements(),
aChunk.mDuration, aChunk.mVolume,
aOutputChannels,
aOutputBuffer);
}
}
} // namespace mozilla
#endif /* MOZILLA_AUDIOSEGMENT_H_ */