Bug 901633 - Part 3 - Fix TrackEncoder to use the new AudioChunk methods. r=jesup

--HG--
extra : rebase_source : 58be44dff12ef0168a09d7dde5a8e24061f21f3c
This commit is contained in:
Paul Adenot 2015-07-29 18:25:34 +02:00
parent c135b555a7
commit cd7cf1f8ca
2 changed files with 45 additions and 16 deletions

View File

@ -123,9 +123,6 @@ AudioTrackEncoder::AppendAudioSegment(const AudioSegment& aSegment)
return NS_OK;
}
static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */
static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES] = {0};
/*static*/
void
AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
@ -133,19 +130,29 @@ AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
uint32_t aOutputChannels,
AudioDataValue* aOutput)
{
if (aChunk.mChannelData.Length() < aOutputChannels) {
// Up-mix. This might make the mChannelData have more than aChannels.
AudioChannelsUpMix(&aChunk.mChannelData, aOutputChannels, gZeroChannel);
}
if (aChunk.mChannelData.Length() > aOutputChannels) {
DownmixAndInterleave(aChunk.mChannelData, aChunk.mBufferFormat, aDuration,
aChunk.mVolume, aOutputChannels, aOutput);
} else {
InterleaveAndConvertBuffer(aChunk.mChannelData.Elements(),
aChunk.mBufferFormat, aDuration, aChunk.mVolume,
aOutputChannels, aOutput);
}
switch(aChunk.mBufferFormat) {
case AUDIO_FORMAT_S16: {
nsAutoTArray<const int16_t*, 2> array;
array.SetLength(aOutputChannels);
for (uint32_t i = 0; i < array.Length(); i++) {
array[i] = static_cast<const int16_t*>(aChunk.mChannelData[i]);
}
InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume);
break;
}
case AUDIO_FORMAT_FLOAT32: {
nsAutoTArray<const float*, 2> array;
array.SetLength(aOutputChannels);
for (uint32_t i = 0; i < array.Length(); i++) {
array[i] = static_cast<const float*>(aChunk.mChannelData[i]);
}
InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume);
break;
}
case AUDIO_FORMAT_SILENCE: {
MOZ_ASSERT(false, "To implement.");
}
};
}
/*static*/

View File

@ -148,6 +148,28 @@ public:
uint32_t aTrackEvents,
const MediaSegment& aQueuedMedia) override;
template<typename T>
static
void InterleaveTrackData(nsTArray<const T*>& aInput,
int32_t aDuration,
uint32_t aOutputChannels,
AudioDataValue* aOutput,
float aVolume)
{
if (aInput.Length() < aOutputChannels) {
// Up-mix. This might make the mChannelData have more than aChannels.
AudioChannelsUpMix(&aInput, aOutputChannels, SilentChannel::ZeroChannel<T>());
}
if (aInput.Length() > aOutputChannels) {
DownmixAndInterleave(aInput, aDuration,
aVolume, aOutputChannels, aOutput);
} else {
InterleaveAndConvertBuffer(aInput.Elements(), aDuration, aVolume,
aOutputChannels, aOutput);
}
}
/**
* Interleaves the track data and stores the result into aOutput. Might need
* to up-mix or down-mix the channel data if the channels number of this chunk