diff --git a/dom/media/AudioPacketizer.h b/dom/media/AudioPacketizer.h index 18adedf24248..3a202f78b553 100644 --- a/dom/media/AudioPacketizer.h +++ b/dom/media/AudioPacketizer.h @@ -24,10 +24,13 @@ namespace mozilla { * Input and output, as well as length units in the public interface are * interleaved frames. * - * Allocations of output buffer are performed by this class. Buffers can simply - * be delete-d. This is because packets are intended to be sent off to + * Allocations of output buffer can be performed by this class. Buffers can + * simply be delete-d. This is because packets are intended to be sent off to * non-gecko code using normal pointers/length pairs * + * Alternatively, consumers can pass in a buffer in which the output is copied. + * The buffer needs to be large enough to store a packet worth of audio. + * * The implementation uses a circular buffer using absolute virtual indices. */ template @@ -98,6 +101,15 @@ public: uint32_t samplesNeeded = mPacketSize * mChannels; OutputType* out = new OutputType[samplesNeeded]; + Output(out); + + return out; + } + + void Output(OutputType* aOutputBuffer) + { + uint32_t samplesNeeded = mPacketSize * mChannels; + // Under-run. Pad the end of the buffer with silence. if (AvailableSamples() < samplesNeeded) { #ifdef LOG_PACKETIZER_UNDERRUN @@ -108,26 +120,24 @@ public: NS_WARNING(buf); #endif uint32_t zeros = samplesNeeded - AvailableSamples(); - PodZero(out + AvailableSamples(), zeros); + PodZero(aOutputBuffer + AvailableSamples(), zeros); samplesNeeded -= zeros; } if (ReadIndex() + samplesNeeded <= mLength) { ConvertAudioSamples(mStorage.get() + ReadIndex(), - out, + aOutputBuffer, samplesNeeded); } else { uint32_t firstPartLength = mLength - ReadIndex(); uint32_t secondPartLength = samplesNeeded - firstPartLength; ConvertAudioSamples(mStorage.get() + ReadIndex(), - out, + aOutputBuffer, firstPartLength); ConvertAudioSamples(mStorage.get(), - out + firstPartLength, + aOutputBuffer + firstPartLength, secondPartLength); } mReadIndex += samplesNeeded; - - return out; } uint32_t PacketsAvailable() const { diff --git a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp index da5fed010fd0..14bcb2e976fe 100644 --- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp +++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp @@ -953,6 +953,8 @@ void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk( // channels (since the WebRTC.org code below makes the assumption that the // input audio is either mono or stereo). uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2; + // XXX[padenot] We could remove this allocation for the common case, or stick + // it on the object so it only happens once. nsAutoArrayPtr convertedSamples( new int16_t[chunk.mDuration * outputChannels]); @@ -998,8 +1000,16 @@ void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk( while (packetizer_->PacketsAvailable()) { uint32_t samplesPerPacket = packetizer_->PacketSize() * packetizer_->Channels(); - conduit->SendAudioFrame(packetizer_->Output(), - samplesPerPacket , + + // We know that webrtc.org's code going to copy the samples down the line, + // so we can just use a stack buffer here instead of malloc-ing. + // Max size given stereo is 480*2*2 = 1920 (48KHz) + const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920; + int16_t packet[AUDIO_SAMPLE_BUFFER_MAX]; + + packetizer_->Output(packet); + conduit->SendAudioFrame(packet, + samplesPerPacket, rate, 0); } } @@ -1329,7 +1339,6 @@ NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) { // This comparison is done in total time to avoid accumulated roundoff errors. while (source_->TicksToTimeRoundDown(track_rate_, played_ticks_) < desired_time) { - // TODO(ekr@rtfm.com): Is there a way to avoid mallocating here? Or reduce the size? // Max size given stereo is 480*2*2 = 1920 (48KHz) const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920; MOZ_ASSERT((track_rate_/100)*sizeof(uint16_t) * 2 <= AUDIO_SAMPLE_BUFFER_MAX);