Backed out changeset b83be2d0614b (bug 1264199)

--HG--
extra : rebase_source : f8962f950f2fa089ca7eb297aca3b8711c5586c5
This commit is contained in:
Carsten "Tomcat" Book 2016-04-21 16:31:54 +02:00
parent 9d468452aa
commit baaa1f0ede
4 changed files with 27 additions and 121 deletions

View File

@ -33,7 +33,19 @@ AudioConverter::AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut)
MOZ_DIAGNOSTIC_ASSERT(aOut.Interleaved(), "planar audio format not supported");
mIn.Layout().MappingTable(mOut.Layout(), mChannelOrderMap);
if (aIn.Rate() != aOut.Rate()) {
RecreateResampler();
int error;
mResampler = speex_resampler_init(aOut.Channels(),
aIn.Rate(),
aOut.Rate(),
SPEEX_RESAMPLER_QUALITY_DEFAULT,
&error);
if (error == RESAMPLER_ERR_SUCCESS) {
speex_resampler_skip_zeros(mResampler);
} else {
NS_WARNING("Failed to initialize resampler.");
mResampler = nullptr;
}
}
}
@ -270,46 +282,6 @@ AudioConverter::ResampleAudio(void* aOut, const void* aIn, size_t aFrames)
return outframes;
}
void
AudioConverter::RecreateResampler()
{
if (mResampler) {
speex_resampler_destroy(mResampler);
}
int error;
mResampler = speex_resampler_init(mOut.Channels(),
mIn.Rate(),
mOut.Rate(),
SPEEX_RESAMPLER_QUALITY_DEFAULT,
&error);
if (error == RESAMPLER_ERR_SUCCESS) {
speex_resampler_skip_zeros(mResampler);
} else {
NS_WARNING("Failed to initialize resampler.");
mResampler = nullptr;
}
}
size_t
AudioConverter::DrainResampler(void* aOut)
{
if (!mResampler) {
return 0;
}
int frames = speex_resampler_get_input_latency(mResampler);
AlignedByteBuffer buffer(FramesOutToSamples(frames) *
AudioConfig::SampleSize(mOut.Format()));
if (!buffer) {
// OOM
return 0;
}
frames = ResampleAudio(aOut, buffer.Data(), frames);
// Tore down the resampler as it's easier than handling follow-up.
RecreateResampler();
return frames;
}
size_t
AudioConverter::UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const
{
@ -355,13 +327,7 @@ AudioConverter::UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const
size_t
AudioConverter::ResampleRecipientFrames(size_t aFrames) const
{
if (!aFrames && mIn.Rate() != mOut.Rate()) {
// The resampler will be drained, account for frames currently buffered
// in the resampler.
return speex_resampler_get_output_latency(mResampler);
} else {
return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1;
}
return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1;
}
size_t

View File

@ -123,8 +123,6 @@ public:
// Convert the AudioDataBuffer.
// Conversion will be done in place if possible. Otherwise a new buffer will
// be returned.
// Providing an empty buffer and resampling is expected, the resampler
// will be drained.
template <AudioConfig::SampleFormat Format, typename Value>
AudioDataBuffer<Format, Value> Process(AudioDataBuffer<Format, Value>&& aBuffer)
{
@ -154,7 +152,7 @@ public:
return AudioDataBuffer<Format, Value>(Move(temp1));
}
frames = ProcessInternal(temp1.Data(), aBuffer.Data(), frames);
if (mIn.Rate() == mOut.Rate()) {
if (!frames || mIn.Rate() == mOut.Rate()) {
temp1.SetLength(FramesOutToSamples(frames));
return AudioDataBuffer<Format, Value>(Move(temp1));
}
@ -163,17 +161,13 @@ public:
// If we are downsampling we can re-use it.
AlignedBuffer<Value>* outputBuffer = &temp1;
AlignedBuffer<Value> temp2;
if (!frames || mOut.Rate() > mIn.Rate()) {
// We are upsampling or about to drain, we can't work in place.
// Allocate another temporary buffer where the upsampling will occur.
if (mOut.Rate() > mIn.Rate()) {
// We are upsampling, we can't work in place. Allocate another temporary
// buffer where the upsampling will occur.
temp2.SetLength(FramesOutToSamples(ResampleRecipientFrames(frames)));
outputBuffer = &temp2;
}
if (!frames) {
frames = DrainResampler(outputBuffer->Data());
} else {
frames = ResampleAudio(outputBuffer->Data(), temp1.Data(), frames);
}
frames = ResampleAudio(outputBuffer->Data(), temp1.Data(), frames);
outputBuffer->SetLength(FramesOutToSamples(frames));
return AudioDataBuffer<Format, Value>(Move(*outputBuffer));
}
@ -229,8 +223,6 @@ private:
SpeexResamplerState* mResampler;
size_t ResampleAudio(void* aOut, const void* aIn, size_t aFrames);
size_t ResampleRecipientFrames(size_t aFrames) const;
void RecreateResampler();
size_t DrainResampler(void* aOut);
};
} // namespace mozilla

View File

@ -323,12 +323,6 @@ DecodedAudioDataSink::NotifyAudioNeeded()
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(),
"Not called from the owner's thread");
if (AudioQueue().IsFinished() && !AudioQueue().GetSize()) {
// We have reached the end of the data, drain the resampler.
DrainConverter();
return;
}
// Always ensure we have two processed frames pending to allow for processing
// latency.
while (AudioQueue().GetSize() && (mProcessedQueueLength < LOW_AUDIO_USECS ||
@ -349,8 +343,6 @@ DecodedAudioDataSink::NotifyAudioNeeded()
mConverter ? mConverter->InputConfig().Rate() : 0,
data->mChannels, data->mRate);
DrainConverter();
// mFramesParsed indicates the current playtime in frames at the current
// input sampling rate. Recalculate it per the new sampling rate.
if (mFramesParsed) {
@ -397,19 +389,14 @@ DecodedAudioDataSink::NotifyAudioNeeded()
// time.
missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value());
mFramesParsed += missingFrames.value();
// We need to insert silence, first use drained frames if any.
missingFrames -= DrainConverter(missingFrames.value());
// Insert silence is still needed.
if (missingFrames.value()) {
AlignedAudioBuffer silenceData(missingFrames.value() * mOutputChannels);
if (!silenceData) {
NS_WARNING("OOM in DecodedAudioDataSink");
mErrored = true;
return;
}
RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
PushProcessedAudio(silence);
AlignedAudioBuffer silenceData(missingFrames.value() * mOutputChannels);
if (!silenceData) {
NS_WARNING("OOM in DecodedAudioDataSink");
mErrored = true;
return;
}
RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
PushProcessedAudio(silence);
}
mLastEndTime = data->GetEndTime();
@ -420,9 +407,7 @@ DecodedAudioDataSink::NotifyAudioNeeded()
mConverter->Process(AudioSampleBuffer(Move(data->mAudioData))).Forget();
data = CreateAudioFromBuffer(Move(convertedData), data);
}
if (PushProcessedAudio(data)) {
mLastProcessedPacket = Some(data);
}
PushProcessedAudio(data);
}
}
@ -462,38 +447,5 @@ DecodedAudioDataSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
return data.forget();
}
uint32_t
DecodedAudioDataSink::DrainConverter(uint32_t aMaxFrames)
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
if (!mConverter || !mLastProcessedPacket) {
// nothing to drain.
return 0;
}
RefPtr<AudioData> lastPacket = mLastProcessedPacket.ref();
mLastProcessedPacket.reset();
// To drain we simply provide an empty packet to the audio converter.
AlignedAudioBuffer convertedData =
mConverter->Process(AudioSampleBuffer(AlignedAudioBuffer())).Forget();
uint32_t frames = convertedData.Length() / mOutputChannels;
if (!convertedData.SetLength(std::min(frames, aMaxFrames) * mOutputChannels)) {
// This can never happen as we were reducing the length of convertData.
mErrored = true;
return 0;
}
RefPtr<AudioData> data =
CreateAudioFromBuffer(Move(convertedData), lastPacket);
if (!data) {
return 0;
}
mProcessedQueue.Push(data);
return data->mFrames;
}
} // namespace media
} // namespace mozilla

View File

@ -114,9 +114,6 @@ private:
void OnAudioPopped(const RefPtr<MediaData>& aSample);
void OnAudioPushed(const RefPtr<MediaData>& aSample);
void NotifyAudioNeeded();
// Drain the converter and add the output to the processed audio queue.
// A maximum of aMaxFrames will be added.
uint32_t DrainConverter(uint32_t aMaxFrames = UINT32_MAX);
already_AddRefed<AudioData> CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
AudioData* aReference);
// Add data to the processsed queue, update mProcessedQueueLength and
@ -132,7 +129,6 @@ private:
// the input stream. It indicates the time in frames since playback started
// at the current input framerate.
int64_t mFramesParsed;
Maybe<RefPtr<AudioData>> mLastProcessedPacket;
int64_t mLastEndTime;
// Never modifed after construction.
uint32_t mOutputRate;