mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-04-03 13:02:57 +00:00
Backed out 965c62289427:cb894b5d342f for perma-orange on b2g emulator M10 r=backout
This commit is contained in:
parent
b42cc30cf7
commit
2dfec0638c
@ -1,85 +0,0 @@
|
|||||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
||||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
||||||
|
|
||||||
#ifndef MOZILLA_AUDIOMIXER_H_
|
|
||||||
#define MOZILLA_AUDIOMIXER_H_
|
|
||||||
|
|
||||||
#include "AudioSampleFormat.h"
|
|
||||||
#include "nsTArray.h"
|
|
||||||
#include "mozilla/PodOperations.h"
|
|
||||||
|
|
||||||
namespace mozilla {
|
|
||||||
typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer,
|
|
||||||
AudioSampleFormat aFormat,
|
|
||||||
uint32_t aChannels,
|
|
||||||
uint32_t aFrames);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class mixes multiple streams of audio together to output a single audio
|
|
||||||
* stream.
|
|
||||||
*
|
|
||||||
* AudioMixer::Mix is to be called repeatedly with buffers that have the same
|
|
||||||
* length, sample rate, sample format and channel count.
|
|
||||||
*
|
|
||||||
* When all the tracks have been mixed, calling FinishMixing will call back with
|
|
||||||
* a buffer containing the mixed audio data.
|
|
||||||
*
|
|
||||||
* This class is not thread safe.
|
|
||||||
*/
|
|
||||||
class AudioMixer
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
AudioMixer(MixerFunc aCallback)
|
|
||||||
: mCallback(aCallback),
|
|
||||||
mFrames(0),
|
|
||||||
mChannels(0)
|
|
||||||
{ }
|
|
||||||
|
|
||||||
/* Get the data from the mixer. This is supposed to be called when all the
|
|
||||||
* tracks have been mixed in. The caller should not hold onto the data. */
|
|
||||||
void FinishMixing() {
|
|
||||||
mCallback(mMixedAudio.Elements(),
|
|
||||||
AudioSampleTypeToFormat<AudioDataValue>::Format,
|
|
||||||
mChannels,
|
|
||||||
mFrames);
|
|
||||||
PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
|
|
||||||
mChannels = mFrames = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add a buffer to the mix. aSamples is interleaved. */
|
|
||||||
void Mix(AudioDataValue* aSamples, uint32_t aChannels, uint32_t aFrames) {
|
|
||||||
if (!mFrames && !mChannels) {
|
|
||||||
mFrames = aFrames;
|
|
||||||
mChannels = aChannels;
|
|
||||||
EnsureCapacityAndSilence();
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_ASSERT(aFrames == mFrames);
|
|
||||||
MOZ_ASSERT(aChannels == mChannels);
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < aFrames * aChannels; i++) {
|
|
||||||
mMixedAudio[i] += aSamples[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
private:
|
|
||||||
void EnsureCapacityAndSilence() {
|
|
||||||
if (mFrames * mChannels > mMixedAudio.Length()) {
|
|
||||||
mMixedAudio.SetLength(mFrames* mChannels);
|
|
||||||
}
|
|
||||||
PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Function that is called when the mixing is done. */
|
|
||||||
MixerFunc mCallback;
|
|
||||||
/* Number of frames for this mixing block. */
|
|
||||||
uint32_t mFrames;
|
|
||||||
/* Number of channels for this mixing block. */
|
|
||||||
uint32_t mChannels;
|
|
||||||
/* Buffer containing the mixed audio data. */
|
|
||||||
nsTArray<AudioDataValue> mMixedAudio;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // MOZILLA_AUDIOMIXER_H_
|
|
@ -107,6 +107,15 @@ ResampleChannelBuffer(SpeexResamplerState* aResampler, uint32_t aChannel,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class SharedChannelArrayBuffer : public ThreadSharedObject {
|
||||||
|
public:
|
||||||
|
SharedChannelArrayBuffer(nsTArray<nsTArray<float> >* aBuffers)
|
||||||
|
{
|
||||||
|
mBuffers.SwapElements(*aBuffers);
|
||||||
|
}
|
||||||
|
nsTArray<nsTArray<float> > mBuffers;
|
||||||
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers,
|
AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers,
|
||||||
uint32_t aInputDuration,
|
uint32_t aInputDuration,
|
||||||
@ -169,7 +178,7 @@ AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<con
|
|||||||
}
|
}
|
||||||
|
|
||||||
uint32_t length = resampledBuffers[0].Length();
|
uint32_t length = resampledBuffers[0].Length();
|
||||||
nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer<float>(&resampledBuffers);
|
nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer(&resampledBuffers);
|
||||||
mResampledData.AppendFrames(buf.forget(), bufferPtrs, length);
|
mResampledData.AppendFrames(buf.forget(), bufferPtrs, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,19 +49,8 @@ public:
|
|||||||
|
|
||||||
typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue;
|
typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue;
|
||||||
|
|
||||||
template<typename T> class AudioSampleTypeToFormat;
|
|
||||||
|
|
||||||
template <> class AudioSampleTypeToFormat<float> {
|
|
||||||
public:
|
|
||||||
static const AudioSampleFormat Format = AUDIO_FORMAT_FLOAT32;
|
|
||||||
};
|
|
||||||
|
|
||||||
template <> class AudioSampleTypeToFormat<short> {
|
|
||||||
public:
|
|
||||||
static const AudioSampleFormat Format = AUDIO_FORMAT_S16;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Single-sample conversion
|
// Single-sample conversion
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use "2^N" conversion since it's simple, fast, "bit transparent", used by
|
* Use "2^N" conversion since it's simple, fast, "bit transparent", used by
|
||||||
* many other libraries and apparently behaves reasonably.
|
* many other libraries and apparently behaves reasonably.
|
||||||
|
@ -6,10 +6,8 @@
|
|||||||
#include "AudioSegment.h"
|
#include "AudioSegment.h"
|
||||||
|
|
||||||
#include "AudioStream.h"
|
#include "AudioStream.h"
|
||||||
#include "AudioMixer.h"
|
|
||||||
#include "AudioChannelFormat.h"
|
#include "AudioChannelFormat.h"
|
||||||
#include "Latency.h"
|
#include "Latency.h"
|
||||||
#include "speex/speex_resampler.h"
|
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
@ -111,98 +109,70 @@ DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
|
|||||||
aDuration, aVolume, aOutputChannels, aOutput);
|
aDuration, aVolume, aOutputChannels, aOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler)
|
|
||||||
{
|
|
||||||
uint32_t inRate, outRate;
|
|
||||||
|
|
||||||
if (mChunks.IsEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
speex_resampler_get_rate(aResampler, &inRate, &outRate);
|
|
||||||
|
|
||||||
switch (mChunks[0].mBufferFormat) {
|
|
||||||
case AUDIO_FORMAT_FLOAT32:
|
|
||||||
Resample<float>(aResampler, inRate, outRate);
|
|
||||||
break;
|
|
||||||
case AUDIO_FORMAT_S16:
|
|
||||||
Resample<int16_t>(aResampler, inRate, outRate);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
MOZ_ASSERT(false);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
|
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput)
|
||||||
{
|
{
|
||||||
uint32_t outputChannels = aOutput->GetChannels();
|
uint32_t outputChannels = aOutput->GetChannels();
|
||||||
nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
|
nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
|
||||||
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
|
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
|
||||||
|
|
||||||
if (!GetDuration()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t outBufferLength = GetDuration() * outputChannels;
|
|
||||||
buf.SetLength(outBufferLength);
|
|
||||||
|
|
||||||
// Offset in the buffer that will end up sent to the AudioStream.
|
|
||||||
uint32_t offset = 0;
|
|
||||||
|
|
||||||
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
|
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
|
||||||
AudioChunk& c = *ci;
|
AudioChunk& c = *ci;
|
||||||
uint32_t frames = c.mDuration;
|
TrackTicks offset = 0;
|
||||||
|
while (offset < c.mDuration) {
|
||||||
// If we have written data in the past, or we have real (non-silent) data
|
TrackTicks durationTicks =
|
||||||
// to write, we can proceed. Otherwise, it means we just started the
|
std::min<TrackTicks>(c.mDuration - offset, AUDIO_PROCESSING_FRAMES);
|
||||||
// AudioStream, and we don't have real data to write to it (just silence).
|
if (uint64_t(outputChannels)*durationTicks > INT32_MAX || offset > INT32_MAX) {
|
||||||
// To avoid overbuffering in the AudioStream, we simply drop the silence,
|
NS_ERROR("Buffer overflow");
|
||||||
// here. The stream will underrun and output silence anyways.
|
return;
|
||||||
if (c.mBuffer || aOutput->GetWritten()) {
|
|
||||||
if (c.mBuffer) {
|
|
||||||
channelData.SetLength(c.mChannelData.Length());
|
|
||||||
for (uint32_t i = 0; i < channelData.Length(); ++i) {
|
|
||||||
channelData[i] = c.mChannelData[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (channelData.Length() < outputChannels) {
|
|
||||||
// Up-mix. Note that this might actually make channelData have more
|
|
||||||
// than outputChannels temporarily.
|
|
||||||
AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (channelData.Length() > outputChannels) {
|
|
||||||
// Down-mix.
|
|
||||||
DownmixAndInterleave(channelData, c.mBufferFormat, frames,
|
|
||||||
c.mVolume, outputChannels, buf.Elements() + offset);
|
|
||||||
} else {
|
|
||||||
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
|
|
||||||
frames, c.mVolume,
|
|
||||||
outputChannels,
|
|
||||||
buf.Elements() + offset);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Assumes that a bit pattern of zeroes == 0.0f
|
|
||||||
memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t duration = uint32_t(durationTicks);
|
||||||
|
|
||||||
|
// If we have written data in the past, or we have real (non-silent) data
|
||||||
|
// to write, we can proceed. Otherwise, it means we just started the
|
||||||
|
// AudioStream, and we don't have real data to write to it (just silence).
|
||||||
|
// To avoid overbuffering in the AudioStream, we simply drop the silence,
|
||||||
|
// here. The stream will underrun and output silence anyways.
|
||||||
|
if (c.mBuffer || aOutput->GetWritten()) {
|
||||||
|
buf.SetLength(outputChannels*duration);
|
||||||
|
if (c.mBuffer) {
|
||||||
|
channelData.SetLength(c.mChannelData.Length());
|
||||||
|
for (uint32_t i = 0; i < channelData.Length(); ++i) {
|
||||||
|
channelData[i] =
|
||||||
|
AddAudioSampleOffset(c.mChannelData[i], c.mBufferFormat, int32_t(offset));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (channelData.Length() < outputChannels) {
|
||||||
|
// Up-mix. Note that this might actually make channelData have more
|
||||||
|
// than outputChannels temporarily.
|
||||||
|
AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (channelData.Length() > outputChannels) {
|
||||||
|
// Down-mix.
|
||||||
|
DownmixAndInterleave(channelData, c.mBufferFormat, duration,
|
||||||
|
c.mVolume, outputChannels, buf.Elements());
|
||||||
|
} else {
|
||||||
|
InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
|
||||||
|
duration, c.mVolume,
|
||||||
|
outputChannels,
|
||||||
|
buf.Elements());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Assumes that a bit pattern of zeroes == 0.0f
|
||||||
|
memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue));
|
||||||
|
}
|
||||||
|
aOutput->Write(buf.Elements(), int32_t(duration), &(c.mTimeStamp));
|
||||||
|
}
|
||||||
|
if(!c.mTimeStamp.IsNull()) {
|
||||||
|
TimeStamp now = TimeStamp::Now();
|
||||||
|
// would be more efficient to c.mTimeStamp to ms on create time then pass here
|
||||||
|
LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
|
||||||
|
(now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
|
||||||
|
}
|
||||||
|
offset += duration;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += frames * outputChannels;
|
|
||||||
|
|
||||||
if (!c.mTimeStamp.IsNull()) {
|
|
||||||
TimeStamp now = TimeStamp::Now();
|
|
||||||
// would be more efficient to c.mTimeStamp to ms on create time then pass here
|
|
||||||
LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
|
|
||||||
(now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
aOutput->Write(buf.Elements(), GetDuration(), &(mChunks[mChunks.Length() - 1].mTimeStamp));
|
|
||||||
|
|
||||||
if (aMixer) {
|
|
||||||
aMixer->Mix(buf.Elements(), outputChannels, GetDuration());
|
|
||||||
}
|
}
|
||||||
aOutput->Start();
|
aOutput->Start();
|
||||||
}
|
}
|
||||||
|
@ -9,25 +9,13 @@
|
|||||||
#include "MediaSegment.h"
|
#include "MediaSegment.h"
|
||||||
#include "AudioSampleFormat.h"
|
#include "AudioSampleFormat.h"
|
||||||
#include "SharedBuffer.h"
|
#include "SharedBuffer.h"
|
||||||
#include "WebAudioUtils.h"
|
|
||||||
#ifdef MOZILLA_INTERNAL_API
|
#ifdef MOZILLA_INTERNAL_API
|
||||||
#include "mozilla/TimeStamp.h"
|
#include "mozilla/TimeStamp.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
template<typename T>
|
|
||||||
class SharedChannelArrayBuffer : public ThreadSharedObject {
|
|
||||||
public:
|
|
||||||
SharedChannelArrayBuffer(nsTArray<nsTArray<T>>* aBuffers)
|
|
||||||
{
|
|
||||||
mBuffers.SwapElements(*aBuffers);
|
|
||||||
}
|
|
||||||
nsTArray<nsTArray<T>> mBuffers;
|
|
||||||
};
|
|
||||||
|
|
||||||
class AudioStream;
|
class AudioStream;
|
||||||
class AudioMixer;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* For auto-arrays etc, guess this as the common number of channels.
|
* For auto-arrays etc, guess this as the common number of channels.
|
||||||
@ -123,7 +111,6 @@ struct AudioChunk {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A list of audio samples consisting of a sequence of slices of SharedBuffers.
|
* A list of audio samples consisting of a sequence of slices of SharedBuffers.
|
||||||
* The audio rate is determined by the track, not stored in this class.
|
* The audio rate is determined by the track, not stored in this class.
|
||||||
@ -134,43 +121,6 @@ public:
|
|||||||
|
|
||||||
AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {}
|
AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {}
|
||||||
|
|
||||||
// Resample the whole segment in place.
|
|
||||||
template<typename T>
|
|
||||||
void Resample(SpeexResamplerState* aResampler, uint32_t aInRate, uint32_t aOutRate)
|
|
||||||
{
|
|
||||||
mDuration = 0;
|
|
||||||
|
|
||||||
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
|
|
||||||
nsAutoTArray<nsTArray<T>, GUESS_AUDIO_CHANNELS> output;
|
|
||||||
nsAutoTArray<const T*, GUESS_AUDIO_CHANNELS> bufferPtrs;
|
|
||||||
AudioChunk& c = *ci;
|
|
||||||
uint32_t channels = c.mChannelData.Length();
|
|
||||||
output.SetLength(channels);
|
|
||||||
bufferPtrs.SetLength(channels);
|
|
||||||
uint32_t inFrames = c.mDuration,
|
|
||||||
outFrames = c.mDuration * aOutRate / aInRate;
|
|
||||||
for (uint32_t i = 0; i < channels; i++) {
|
|
||||||
const T* in = static_cast<const T*>(c.mChannelData[i]);
|
|
||||||
T* out = output[i].AppendElements(outFrames);
|
|
||||||
|
|
||||||
dom::WebAudioUtils::SpeexResamplerProcess(aResampler, i,
|
|
||||||
in, &inFrames,
|
|
||||||
out, &outFrames);
|
|
||||||
|
|
||||||
bufferPtrs[i] = out;
|
|
||||||
output[i].SetLength(outFrames);
|
|
||||||
}
|
|
||||||
c.mBuffer = new mozilla::SharedChannelArrayBuffer<T>(&output);
|
|
||||||
for (uint32_t i = 0; i < channels; i++) {
|
|
||||||
c.mChannelData[i] = bufferPtrs[i];
|
|
||||||
}
|
|
||||||
c.mDuration = outFrames;
|
|
||||||
mDuration += c.mDuration;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ResampleChunks(SpeexResamplerState* aResampler);
|
|
||||||
|
|
||||||
void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
|
void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
|
||||||
const nsTArray<const float*>& aChannelData,
|
const nsTArray<const float*>& aChannelData,
|
||||||
int32_t aDuration)
|
int32_t aDuration)
|
||||||
@ -216,13 +166,7 @@ public:
|
|||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
void ApplyVolume(float aVolume);
|
void ApplyVolume(float aVolume);
|
||||||
void WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer = nullptr);
|
void WriteTo(uint64_t aID, AudioStream* aOutput);
|
||||||
|
|
||||||
int ChannelCount() {
|
|
||||||
NS_WARN_IF_FALSE(!mChunks.IsEmpty(),
|
|
||||||
"Cannot query channel count on a AudioSegment with no chunks.");
|
|
||||||
return mChunks.IsEmpty() ? 0 : mChunks[0].mChannelData.Length();
|
|
||||||
}
|
|
||||||
|
|
||||||
static Type StaticType() { return AUDIO; }
|
static Type StaticType() { return AUDIO; }
|
||||||
};
|
};
|
||||||
|
@ -267,8 +267,9 @@ protected:
|
|||||||
void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource,
|
void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource,
|
||||||
TrackTicks aStart, TrackTicks aEnd)
|
TrackTicks aStart, TrackTicks aEnd)
|
||||||
{
|
{
|
||||||
MOZ_ASSERT(aStart <= aEnd, "Endpoints inverted");
|
NS_ASSERTION(aStart <= aEnd, "Endpoints inverted");
|
||||||
MOZ_ASSERT(aStart >= 0 && aEnd <= aSource.mDuration, "Slice out of range");
|
NS_WARN_IF_FALSE(aStart >= 0 && aEnd <= aSource.mDuration,
|
||||||
|
"Slice out of range");
|
||||||
mDuration += aEnd - aStart;
|
mDuration += aEnd - aStart;
|
||||||
TrackTicks offset = 0;
|
TrackTicks offset = 0;
|
||||||
for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) {
|
for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) {
|
||||||
|
@ -26,8 +26,6 @@
|
|||||||
#include "DOMMediaStream.h"
|
#include "DOMMediaStream.h"
|
||||||
#include "GeckoProfiler.h"
|
#include "GeckoProfiler.h"
|
||||||
#include "mozilla/unused.h"
|
#include "mozilla/unused.h"
|
||||||
#include "speex/speex_resampler.h"
|
|
||||||
#include "AudioOutputObserver.h"
|
|
||||||
|
|
||||||
using namespace mozilla::layers;
|
using namespace mozilla::layers;
|
||||||
using namespace mozilla::dom;
|
using namespace mozilla::dom;
|
||||||
@ -174,16 +172,15 @@ MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
|
|||||||
MediaStreamListener* l = aStream->mListeners[j];
|
MediaStreamListener* l = aStream->mListeners[j];
|
||||||
TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
|
TrackTicks offset = (data->mCommands & SourceMediaStream::TRACK_CREATE)
|
||||||
? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration();
|
? data->mStart : aStream->mBuffer.FindTrack(data->mID)->GetSegment()->GetDuration();
|
||||||
l->NotifyQueuedTrackChanges(this, data->mID, data->mOutputRate,
|
l->NotifyQueuedTrackChanges(this, data->mID, data->mRate,
|
||||||
offset, data->mCommands, *data->mData);
|
offset, data->mCommands, *data->mData);
|
||||||
}
|
}
|
||||||
if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
|
if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
|
||||||
MediaSegment* segment = data->mData.forget();
|
MediaSegment* segment = data->mData.forget();
|
||||||
STREAM_LOG(PR_LOG_DEBUG, ("SourceMediaStream %p creating track %d, rate %d, start %lld, initial end %lld",
|
STREAM_LOG(PR_LOG_DEBUG, ("SourceMediaStream %p creating track %d, rate %d, start %lld, initial end %lld",
|
||||||
aStream, data->mID, data->mOutputRate, int64_t(data->mStart),
|
aStream, data->mID, data->mRate, int64_t(data->mStart),
|
||||||
int64_t(segment->GetDuration())));
|
int64_t(segment->GetDuration())));
|
||||||
|
aStream->mBuffer.AddTrack(data->mID, data->mRate, data->mStart, segment);
|
||||||
aStream->mBuffer.AddTrack(data->mID, data->mOutputRate, data->mStart, segment);
|
|
||||||
// The track has taken ownership of data->mData, so let's replace
|
// The track has taken ownership of data->mData, so let's replace
|
||||||
// data->mData with an empty clone.
|
// data->mData with an empty clone.
|
||||||
data->mData = segment->CreateEmptyClone();
|
data->mData = segment->CreateEmptyClone();
|
||||||
@ -335,7 +332,7 @@ MediaStreamGraphImpl::GetAudioPosition(MediaStream* aStream)
|
|||||||
return mCurrentTime;
|
return mCurrentTime;
|
||||||
}
|
}
|
||||||
return aStream->mAudioOutputStreams[0].mAudioPlaybackStartTime +
|
return aStream->mAudioOutputStreams[0].mAudioPlaybackStartTime +
|
||||||
TicksToTimeRoundDown(IdealAudioRate(),
|
TicksToTimeRoundDown(aStream->mAudioOutputStreams[0].mStream->GetRate(),
|
||||||
positionInFrames);
|
positionInFrames);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -578,37 +575,17 @@ MediaStreamGraphImpl::UpdateStreamOrderForStream(mozilla::LinkedList<MediaStream
|
|||||||
*mStreams.AppendElement() = stream.forget();
|
*mStreams.AppendElement() = stream.forget();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
|
|
||||||
AudioSampleFormat aFormat,
|
|
||||||
uint32_t aChannels,
|
|
||||||
uint32_t aFrames)
|
|
||||||
{
|
|
||||||
// Need an api to register mixer callbacks, bug 989921
|
|
||||||
if (aFrames > 0 && aChannels > 0) {
|
|
||||||
// XXX need Observer base class and registration API
|
|
||||||
if (gFarendObserver) {
|
|
||||||
gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false,
|
|
||||||
IdealAudioRate(), aChannels, aFormat);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
MediaStreamGraphImpl::UpdateStreamOrder()
|
MediaStreamGraphImpl::UpdateStreamOrder()
|
||||||
{
|
{
|
||||||
mOldStreams.SwapElements(mStreams);
|
mOldStreams.SwapElements(mStreams);
|
||||||
mStreams.ClearAndRetainStorage();
|
mStreams.ClearAndRetainStorage();
|
||||||
bool shouldMix = false;
|
|
||||||
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
|
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
|
||||||
MediaStream* stream = mOldStreams[i];
|
MediaStream* stream = mOldStreams[i];
|
||||||
stream->mHasBeenOrdered = false;
|
stream->mHasBeenOrdered = false;
|
||||||
stream->mIsConsumed = false;
|
stream->mIsConsumed = false;
|
||||||
stream->mIsOnOrderingStack = false;
|
stream->mIsOnOrderingStack = false;
|
||||||
stream->mInBlockingSet = false;
|
stream->mInBlockingSet = false;
|
||||||
if (stream->AsSourceStream() &&
|
|
||||||
stream->AsSourceStream()->NeedsMixing()) {
|
|
||||||
shouldMix = true;
|
|
||||||
}
|
|
||||||
ProcessedMediaStream* ps = stream->AsProcessedStream();
|
ProcessedMediaStream* ps = stream->AsProcessedStream();
|
||||||
if (ps) {
|
if (ps) {
|
||||||
ps->mInCycle = false;
|
ps->mInCycle = false;
|
||||||
@ -619,12 +596,6 @@ MediaStreamGraphImpl::UpdateStreamOrder()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mMixer && shouldMix) {
|
|
||||||
mMixer = new AudioMixer(AudioMixerCallback);
|
|
||||||
} else if (mMixer && !shouldMix) {
|
|
||||||
mMixer = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
mozilla::LinkedList<MediaStream> stack;
|
mozilla::LinkedList<MediaStream> stack;
|
||||||
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
|
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
|
||||||
nsRefPtr<MediaStream>& s = mOldStreams[i];
|
nsRefPtr<MediaStream>& s = mOldStreams[i];
|
||||||
@ -837,11 +808,10 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim
|
|||||||
aStream->mAudioOutputStreams.AppendElement();
|
aStream->mAudioOutputStreams.AppendElement();
|
||||||
audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime;
|
audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime;
|
||||||
audioOutputStream->mBlockedAudioTime = 0;
|
audioOutputStream->mBlockedAudioTime = 0;
|
||||||
audioOutputStream->mLastTickWritten = 0;
|
|
||||||
audioOutputStream->mStream = new AudioStream();
|
audioOutputStream->mStream = new AudioStream();
|
||||||
// XXX for now, allocate stereo output. But we need to fix this to
|
// XXX for now, allocate stereo output. But we need to fix this to
|
||||||
// match the system's ideal channel configuration.
|
// match the system's ideal channel configuration.
|
||||||
audioOutputStream->mStream->Init(2, IdealAudioRate(), AUDIO_CHANNEL_NORMAL, AudioStream::LowLatency);
|
audioOutputStream->mStream->Init(2, tracks->GetRate(), AUDIO_CHANNEL_NORMAL, AudioStream::LowLatency);
|
||||||
audioOutputStream->mTrackID = tracks->GetID();
|
audioOutputStream->mTrackID = tracks->GetID();
|
||||||
|
|
||||||
LogLatency(AsyncLatencyLogger::AudioStreamCreate,
|
LogLatency(AsyncLatencyLogger::AudioStreamCreate,
|
||||||
@ -859,22 +829,14 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TrackTicks
|
void
|
||||||
MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
||||||
GraphTime aFrom, GraphTime aTo)
|
GraphTime aFrom, GraphTime aTo)
|
||||||
{
|
{
|
||||||
MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");
|
MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");
|
||||||
|
|
||||||
TrackTicks ticksWritten = 0;
|
|
||||||
// We compute the number of needed ticks by converting a difference of graph
|
|
||||||
// time rather than by substracting two converted stream time to ensure that
|
|
||||||
// the rounding between {Graph,Stream}Time and track ticks is not dependant
|
|
||||||
// on the absolute value of the {Graph,Stream}Time, and so that number of
|
|
||||||
// ticks to play is the same for each cycle.
|
|
||||||
TrackTicks ticksNeeded = TimeToTicksRoundDown(IdealAudioRate(), aTo) - TimeToTicksRoundDown(IdealAudioRate(), aFrom);
|
|
||||||
|
|
||||||
if (aStream->mAudioOutputStreams.IsEmpty()) {
|
if (aStream->mAudioOutputStreams.IsEmpty()) {
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// When we're playing multiple copies of this stream at the same time, they're
|
// When we're playing multiple copies of this stream at the same time, they're
|
||||||
@ -888,25 +850,6 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
|||||||
MediaStream::AudioOutputStream& audioOutput = aStream->mAudioOutputStreams[i];
|
MediaStream::AudioOutputStream& audioOutput = aStream->mAudioOutputStreams[i];
|
||||||
StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID);
|
StreamBuffer::Track* track = aStream->mBuffer.FindTrack(audioOutput.mTrackID);
|
||||||
AudioSegment* audio = track->Get<AudioSegment>();
|
AudioSegment* audio = track->Get<AudioSegment>();
|
||||||
AudioSegment output;
|
|
||||||
MOZ_ASSERT(track->GetRate() == IdealAudioRate());
|
|
||||||
|
|
||||||
// offset and audioOutput.mLastTickWritten can differ by at most one sample,
|
|
||||||
// because of the rounding issue. We track that to ensure we don't skip a
|
|
||||||
// sample, or play a sample twice.
|
|
||||||
TrackTicks offset = track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, aFrom));
|
|
||||||
if (!audioOutput.mLastTickWritten) {
|
|
||||||
audioOutput.mLastTickWritten = offset;
|
|
||||||
}
|
|
||||||
if (audioOutput.mLastTickWritten != offset) {
|
|
||||||
// If there is a global underrun of the MSG, this property won't hold, and
|
|
||||||
// we reset the sample count tracking.
|
|
||||||
if (std::abs(audioOutput.mLastTickWritten - offset) != 1) {
|
|
||||||
audioOutput.mLastTickWritten = offset;
|
|
||||||
} else {
|
|
||||||
offset = audioOutput.mLastTickWritten;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't update aStream->mBufferStartTime here to account for
|
// We don't update aStream->mBufferStartTime here to account for
|
||||||
// time spent blocked. Instead, we'll update it in UpdateCurrentTime after the
|
// time spent blocked. Instead, we'll update it in UpdateCurrentTime after the
|
||||||
@ -914,59 +857,54 @@ MediaStreamGraphImpl::PlayAudio(MediaStream* aStream,
|
|||||||
// right offsets in the stream buffer, even if we've already written silence for
|
// right offsets in the stream buffer, even if we've already written silence for
|
||||||
// some amount of blocked time after the current time.
|
// some amount of blocked time after the current time.
|
||||||
GraphTime t = aFrom;
|
GraphTime t = aFrom;
|
||||||
while (ticksNeeded) {
|
while (t < aTo) {
|
||||||
GraphTime end;
|
GraphTime end;
|
||||||
bool blocked = aStream->mBlocked.GetAt(t, &end);
|
bool blocked = aStream->mBlocked.GetAt(t, &end);
|
||||||
end = std::min(end, aTo);
|
end = std::min(end, aTo);
|
||||||
|
|
||||||
// Check how many ticks of sound we can provide if we are blocked some
|
AudioSegment output;
|
||||||
// time in the middle of this cycle.
|
|
||||||
TrackTicks toWrite = 0;
|
|
||||||
if (end >= aTo) {
|
|
||||||
toWrite = ticksNeeded;
|
|
||||||
} else {
|
|
||||||
toWrite = TimeToTicksRoundDown(IdealAudioRate(), end - aFrom);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blocked) {
|
if (blocked) {
|
||||||
output.InsertNullDataAtStart(toWrite);
|
// Track total blocked time in aStream->mBlockedAudioTime so that
|
||||||
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld blocking-silence samples for %f to %f (%ld to %ld)\n",
|
// the amount of silent samples we've inserted for blocking never gets
|
||||||
aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
|
// more than one sample away from the ideal amount.
|
||||||
offset, offset + toWrite));
|
TrackTicks startTicks =
|
||||||
ticksNeeded -= toWrite;
|
TimeToTicksRoundDown(track->GetRate(), audioOutput.mBlockedAudioTime);
|
||||||
} else {
|
audioOutput.mBlockedAudioTime += end - t;
|
||||||
TrackTicks endTicksNeeded = offset + toWrite;
|
TrackTicks endTicks =
|
||||||
TrackTicks endTicksAvailable = audio->GetDuration();
|
TimeToTicksRoundDown(track->GetRate(), audioOutput.mBlockedAudioTime);
|
||||||
if (endTicksNeeded <= endTicksAvailable) {
|
|
||||||
output.AppendSlice(*audio, offset, endTicksNeeded);
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not ended.");
|
|
||||||
// If we are at the end of the track, maybe write the remaining
|
|
||||||
// samples, and pad with/output silence.
|
|
||||||
if (endTicksNeeded > endTicksAvailable &&
|
|
||||||
offset < endTicksAvailable) {
|
|
||||||
output.AppendSlice(*audio, offset, endTicksAvailable);
|
|
||||||
ticksNeeded -= endTicksAvailable - offset;
|
|
||||||
toWrite -= endTicksAvailable - offset;
|
|
||||||
}
|
|
||||||
output.AppendNullData(toWrite);
|
|
||||||
}
|
|
||||||
output.ApplyVolume(volume);
|
|
||||||
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing %ld samples for %f to %f (samples %ld to %ld)\n",
|
|
||||||
aStream, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
|
|
||||||
offset, endTicksNeeded));
|
|
||||||
ticksNeeded -= toWrite;
|
|
||||||
}
|
|
||||||
t = end;
|
|
||||||
offset += toWrite;
|
|
||||||
audioOutput.mLastTickWritten += toWrite;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need unique id for stream & track - and we want it to match the inserter
|
output.InsertNullDataAtStart(endTicks - startTicks);
|
||||||
output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
|
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing blocking-silence samples for %f to %f",
|
||||||
audioOutput.mStream, mMixer);
|
aStream, MediaTimeToSeconds(t), MediaTimeToSeconds(end)));
|
||||||
|
} else {
|
||||||
|
TrackTicks startTicks =
|
||||||
|
track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, t));
|
||||||
|
TrackTicks endTicks =
|
||||||
|
track->TimeToTicksRoundDown(GraphTimeToStreamTime(aStream, end));
|
||||||
|
|
||||||
|
// If startTicks is before the track start, then that part of 'audio'
|
||||||
|
// will just be silence, which is fine here. But if endTicks is after
|
||||||
|
// the track end, then 'audio' won't be long enough, so we'll need
|
||||||
|
// to explicitly play silence.
|
||||||
|
TrackTicks sliceEnd = std::min(endTicks, audio->GetDuration());
|
||||||
|
if (sliceEnd > startTicks) {
|
||||||
|
output.AppendSlice(*audio, startTicks, sliceEnd);
|
||||||
|
}
|
||||||
|
// Play silence where the track has ended
|
||||||
|
output.AppendNullData(endTicks - sliceEnd);
|
||||||
|
NS_ASSERTION(endTicks == sliceEnd || track->IsEnded(),
|
||||||
|
"Ran out of data but track not ended?");
|
||||||
|
output.ApplyVolume(volume);
|
||||||
|
STREAM_LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing samples for %f to %f (samples %lld to %lld)",
|
||||||
|
aStream, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
|
||||||
|
startTicks, endTicks));
|
||||||
|
}
|
||||||
|
// Need unique id for stream & track - and we want it to match the inserter
|
||||||
|
output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
|
||||||
|
audioOutput.mStream);
|
||||||
|
t = end;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ticksWritten;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1301,9 +1239,6 @@ MediaStreamGraphImpl::RunThread()
|
|||||||
bool allBlockedForever = true;
|
bool allBlockedForever = true;
|
||||||
// True when we've done ProcessInput for all processed streams.
|
// True when we've done ProcessInput for all processed streams.
|
||||||
bool doneAllProducing = false;
|
bool doneAllProducing = false;
|
||||||
// This is the number of frame that are written to the AudioStreams, for
|
|
||||||
// this cycle.
|
|
||||||
TrackTicks ticksPlayed = 0;
|
|
||||||
// Figure out what each stream wants to do
|
// Figure out what each stream wants to do
|
||||||
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
|
||||||
MediaStream* stream = mStreams[i];
|
MediaStream* stream = mStreams[i];
|
||||||
@ -1340,13 +1275,7 @@ MediaStreamGraphImpl::RunThread()
|
|||||||
if (mRealtime) {
|
if (mRealtime) {
|
||||||
// Only playback audio and video in real-time mode
|
// Only playback audio and video in real-time mode
|
||||||
CreateOrDestroyAudioStreams(prevComputedTime, stream);
|
CreateOrDestroyAudioStreams(prevComputedTime, stream);
|
||||||
TrackTicks ticksPlayedForThisStream = PlayAudio(stream, prevComputedTime, mStateComputedTime);
|
PlayAudio(stream, prevComputedTime, mStateComputedTime);
|
||||||
if (!ticksPlayed) {
|
|
||||||
ticksPlayed = ticksPlayedForThisStream;
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(!ticksPlayedForThisStream || ticksPlayedForThisStream == ticksPlayed,
|
|
||||||
"Each stream should have the same number of frame.");
|
|
||||||
}
|
|
||||||
PlayVideo(stream);
|
PlayVideo(stream);
|
||||||
}
|
}
|
||||||
SourceMediaStream* is = stream->AsSourceStream();
|
SourceMediaStream* is = stream->AsSourceStream();
|
||||||
@ -1358,11 +1287,6 @@ MediaStreamGraphImpl::RunThread()
|
|||||||
allBlockedForever = false;
|
allBlockedForever = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mMixer) {
|
|
||||||
mMixer->FinishMixing();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ensureNextIteration || !allBlockedForever) {
|
if (ensureNextIteration || !allBlockedForever) {
|
||||||
EnsureNextIteration();
|
EnsureNextIteration();
|
||||||
}
|
}
|
||||||
@ -1468,6 +1392,12 @@ MediaStreamGraphImpl::ForceShutDown()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
MediaStreamGraphImpl::Init()
|
||||||
|
{
|
||||||
|
AudioStream::InitPreferredSampleRate();
|
||||||
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
class MediaStreamGraphInitThreadRunnable : public nsRunnable {
|
class MediaStreamGraphInitThreadRunnable : public nsRunnable {
|
||||||
@ -1480,6 +1410,7 @@ public:
|
|||||||
{
|
{
|
||||||
char aLocal;
|
char aLocal;
|
||||||
profiler_register_thread("MediaStreamGraph", &aLocal);
|
profiler_register_thread("MediaStreamGraph", &aLocal);
|
||||||
|
mGraph->Init();
|
||||||
mGraph->RunThread();
|
mGraph->RunThread();
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
@ -1851,7 +1782,7 @@ MediaStream::EnsureTrack(TrackID aTrackId, TrackRate aSampleRate)
|
|||||||
nsAutoPtr<MediaSegment> segment(new AudioSegment());
|
nsAutoPtr<MediaSegment> segment(new AudioSegment());
|
||||||
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
|
||||||
MediaStreamListener* l = mListeners[j];
|
MediaStreamListener* l = mListeners[j];
|
||||||
l->NotifyQueuedTrackChanges(Graph(), aTrackId, IdealAudioRate(), 0,
|
l->NotifyQueuedTrackChanges(Graph(), aTrackId, aSampleRate, 0,
|
||||||
MediaStreamListener::TRACK_EVENT_CREATED,
|
MediaStreamListener::TRACK_EVENT_CREATED,
|
||||||
*segment);
|
*segment);
|
||||||
}
|
}
|
||||||
@ -2198,10 +2129,7 @@ SourceMediaStream::AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart,
|
|||||||
MutexAutoLock lock(mMutex);
|
MutexAutoLock lock(mMutex);
|
||||||
TrackData* data = mUpdateTracks.AppendElement();
|
TrackData* data = mUpdateTracks.AppendElement();
|
||||||
data->mID = aID;
|
data->mID = aID;
|
||||||
data->mInputRate = aRate;
|
data->mRate = aRate;
|
||||||
// We resample all audio input tracks to the sample rate of the audio mixer.
|
|
||||||
data->mOutputRate = aSegment->GetType() == MediaSegment::AUDIO ?
|
|
||||||
IdealAudioRate() : aRate;
|
|
||||||
data->mStart = aStart;
|
data->mStart = aStart;
|
||||||
data->mCommands = TRACK_CREATE;
|
data->mCommands = TRACK_CREATE;
|
||||||
data->mData = aSegment;
|
data->mData = aSegment;
|
||||||
@ -2211,28 +2139,6 @@ SourceMediaStream::AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
SourceMediaStream::ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment)
|
|
||||||
{
|
|
||||||
if (aSegment->GetType() != MediaSegment::AUDIO ||
|
|
||||||
aTrackData->mInputRate == IdealAudioRate()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
AudioSegment* segment = static_cast<AudioSegment*>(aSegment);
|
|
||||||
if (!aTrackData->mResampler) {
|
|
||||||
int channels = segment->ChannelCount();
|
|
||||||
SpeexResamplerState* state = speex_resampler_init(channels,
|
|
||||||
aTrackData->mInputRate,
|
|
||||||
IdealAudioRate(),
|
|
||||||
SPEEX_RESAMPLER_QUALITY_DEFAULT,
|
|
||||||
nullptr);
|
|
||||||
if (state) {
|
|
||||||
aTrackData->mResampler.own(state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
segment->ResampleChunks(aTrackData->mResampler);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
|
SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegment *aRawSegment)
|
||||||
{
|
{
|
||||||
@ -2252,8 +2158,6 @@ SourceMediaStream::AppendToTrack(TrackID aID, MediaSegment* aSegment, MediaSegme
|
|||||||
// or inserting into the graph
|
// or inserting into the graph
|
||||||
ApplyTrackDisabling(aID, aSegment, aRawSegment);
|
ApplyTrackDisabling(aID, aSegment, aRawSegment);
|
||||||
|
|
||||||
ResampleAudioToGraphSampleRate(track, aSegment);
|
|
||||||
|
|
||||||
// Must notify first, since AppendFrom() will empty out aSegment
|
// Must notify first, since AppendFrom() will empty out aSegment
|
||||||
NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
|
NotifyDirectConsumers(track, aRawSegment ? aRawSegment : aSegment);
|
||||||
track->mData->AppendFrom(aSegment); // note: aSegment is now dead
|
track->mData->AppendFrom(aSegment); // note: aSegment is now dead
|
||||||
@ -2278,7 +2182,7 @@ SourceMediaStream::NotifyDirectConsumers(TrackData *aTrack,
|
|||||||
for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) {
|
for (uint32_t j = 0; j < mDirectListeners.Length(); ++j) {
|
||||||
MediaStreamDirectListener* l = mDirectListeners[j];
|
MediaStreamDirectListener* l = mDirectListeners[j];
|
||||||
TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer
|
TrackTicks offset = 0; // FIX! need a separate TrackTicks.... or the end of the internal buffer
|
||||||
l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, aTrack->mOutputRate,
|
l->NotifyRealtimeData(static_cast<MediaStreamGraph*>(GraphImpl()), aTrack->mID, aTrack->mRate,
|
||||||
offset, aTrack->mCommands, *aSegment);
|
offset, aTrack->mCommands, *aSegment);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2391,20 +2295,6 @@ SourceMediaStream::GetBufferedTicks(TrackID aID)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
SourceMediaStream::RegisterForAudioMixing()
|
|
||||||
{
|
|
||||||
MutexAutoLock lock(mMutex);
|
|
||||||
mNeedsMixing = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
|
||||||
SourceMediaStream::NeedsMixing()
|
|
||||||
{
|
|
||||||
MutexAutoLock lock(mMutex);
|
|
||||||
return mNeedsMixing;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
MediaInputPort::Init()
|
MediaInputPort::Init()
|
||||||
{
|
{
|
||||||
@ -2589,7 +2479,6 @@ MediaStreamGraphImpl::MediaStreamGraphImpl(bool aRealtime)
|
|||||||
, mNonRealtimeProcessing(false)
|
, mNonRealtimeProcessing(false)
|
||||||
, mStreamOrderDirty(false)
|
, mStreamOrderDirty(false)
|
||||||
, mLatencyLog(AsyncLatencyLogger::Get())
|
, mLatencyLog(AsyncLatencyLogger::Get())
|
||||||
, mMixer(nullptr)
|
|
||||||
{
|
{
|
||||||
#ifdef PR_LOGGING
|
#ifdef PR_LOGGING
|
||||||
if (!gMediaStreamGraphLog) {
|
if (!gMediaStreamGraphLog) {
|
||||||
@ -2632,8 +2521,6 @@ MediaStreamGraph::GetInstance()
|
|||||||
|
|
||||||
gGraph = new MediaStreamGraphImpl(true);
|
gGraph = new MediaStreamGraphImpl(true);
|
||||||
STREAM_LOG(PR_LOG_DEBUG, ("Starting up MediaStreamGraph %p", gGraph));
|
STREAM_LOG(PR_LOG_DEBUG, ("Starting up MediaStreamGraph %p", gGraph));
|
||||||
|
|
||||||
AudioStream::InitPreferredSampleRate();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return gGraph;
|
return gGraph;
|
||||||
|
@ -16,19 +16,9 @@
|
|||||||
#include "VideoFrameContainer.h"
|
#include "VideoFrameContainer.h"
|
||||||
#include "VideoSegment.h"
|
#include "VideoSegment.h"
|
||||||
#include "MainThreadUtils.h"
|
#include "MainThreadUtils.h"
|
||||||
#include "nsAutoRef.h"
|
|
||||||
#include "speex/speex_resampler.h"
|
|
||||||
#include "AudioMixer.h"
|
|
||||||
|
|
||||||
class nsIRunnable;
|
class nsIRunnable;
|
||||||
|
|
||||||
template <>
|
|
||||||
class nsAutoRefTraits<SpeexResamplerState> : public nsPointerRefTraits<SpeexResamplerState>
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static void Release(SpeexResamplerState* aState) { speex_resampler_destroy(aState); }
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
class DOMMediaStream;
|
class DOMMediaStream;
|
||||||
@ -573,8 +563,6 @@ protected:
|
|||||||
// Amount of time that we've wanted to play silence because of the stream
|
// Amount of time that we've wanted to play silence because of the stream
|
||||||
// blocking.
|
// blocking.
|
||||||
MediaTime mBlockedAudioTime;
|
MediaTime mBlockedAudioTime;
|
||||||
// Last tick written to the audio output.
|
|
||||||
TrackTicks mLastTickWritten;
|
|
||||||
nsAutoPtr<AudioStream> mStream;
|
nsAutoPtr<AudioStream> mStream;
|
||||||
TrackID mTrackID;
|
TrackID mTrackID;
|
||||||
};
|
};
|
||||||
@ -674,9 +662,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
void AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart,
|
void AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart,
|
||||||
MediaSegment* aSegment);
|
MediaSegment* aSegment);
|
||||||
|
|
||||||
struct TrackData;
|
|
||||||
void ResampleAudioToGraphSampleRate(TrackData* aTrackData, MediaSegment* aSegment);
|
|
||||||
/**
|
/**
|
||||||
* Append media data to a track. Ownership of aSegment remains with the caller,
|
* Append media data to a track. Ownership of aSegment remains with the caller,
|
||||||
* but aSegment is emptied.
|
* but aSegment is emptied.
|
||||||
@ -767,13 +752,7 @@ public:
|
|||||||
*/
|
*/
|
||||||
struct TrackData {
|
struct TrackData {
|
||||||
TrackID mID;
|
TrackID mID;
|
||||||
// Sample rate of the input data.
|
TrackRate mRate;
|
||||||
TrackRate mInputRate;
|
|
||||||
// Sample rate of the output data, always equal to IdealAudioRate()
|
|
||||||
TrackRate mOutputRate;
|
|
||||||
// Resampler if the rate of the input track does not match the
|
|
||||||
// MediaStreamGraph's.
|
|
||||||
nsAutoRef<SpeexResamplerState> mResampler;
|
|
||||||
TrackTicks mStart;
|
TrackTicks mStart;
|
||||||
// Each time the track updates are flushed to the media graph thread,
|
// Each time the track updates are flushed to the media graph thread,
|
||||||
// this is cleared.
|
// this is cleared.
|
||||||
@ -785,9 +764,6 @@ public:
|
|||||||
bool mHaveEnough;
|
bool mHaveEnough;
|
||||||
};
|
};
|
||||||
|
|
||||||
void RegisterForAudioMixing();
|
|
||||||
bool NeedsMixing();
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
TrackData* FindDataForTrack(TrackID aID)
|
TrackData* FindDataForTrack(TrackID aID)
|
||||||
{
|
{
|
||||||
@ -821,7 +797,6 @@ protected:
|
|||||||
bool mPullEnabled;
|
bool mPullEnabled;
|
||||||
bool mUpdateFinished;
|
bool mUpdateFinished;
|
||||||
bool mDestroyed;
|
bool mDestroyed;
|
||||||
bool mNeedsMixing;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1028,7 +1003,7 @@ protected:
|
|||||||
bool mInCycle;
|
bool mInCycle;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Returns ideal audio rate for processing.
|
// Returns ideal audio rate for processing
|
||||||
inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); }
|
inline TrackRate IdealAudioRate() { return AudioStream::PreferredSampleRate(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -13,15 +13,12 @@
|
|||||||
#include "nsIThread.h"
|
#include "nsIThread.h"
|
||||||
#include "nsIRunnable.h"
|
#include "nsIRunnable.h"
|
||||||
#include "Latency.h"
|
#include "Latency.h"
|
||||||
#include "mozilla/WeakPtr.h"
|
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class LinkedList;
|
class LinkedList;
|
||||||
|
|
||||||
class AudioMixer;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assume we can run an iteration of the MediaStreamGraph loop in this much time
|
* Assume we can run an iteration of the MediaStreamGraph loop in this much time
|
||||||
* or less.
|
* or less.
|
||||||
@ -326,9 +323,9 @@ public:
|
|||||||
MediaStream* aStream);
|
MediaStream* aStream);
|
||||||
/**
|
/**
|
||||||
* Queue audio (mix of stream audio and silence for blocked intervals)
|
* Queue audio (mix of stream audio and silence for blocked intervals)
|
||||||
* to the audio output stream. Returns the number of frames played.
|
* to the audio output stream.
|
||||||
*/
|
*/
|
||||||
TrackTicks PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo);
|
void PlayAudio(MediaStream* aStream, GraphTime aFrom, GraphTime aTo);
|
||||||
/**
|
/**
|
||||||
* Set the correct current video frame for stream aStream.
|
* Set the correct current video frame for stream aStream.
|
||||||
*/
|
*/
|
||||||
@ -574,10 +571,6 @@ public:
|
|||||||
* Hold a ref to the Latency logger
|
* Hold a ref to the Latency logger
|
||||||
*/
|
*/
|
||||||
nsRefPtr<AsyncLatencyLogger> mLatencyLog;
|
nsRefPtr<AsyncLatencyLogger> mLatencyLog;
|
||||||
/**
|
|
||||||
* If this is not null, all the audio output for the MSG will be mixed down.
|
|
||||||
*/
|
|
||||||
nsAutoPtr<AudioMixer> mMixer;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,155 +0,0 @@
|
|||||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
||||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
||||||
|
|
||||||
#include "AudioMixer.h"
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
using mozilla::AudioDataValue;
|
|
||||||
using mozilla::AudioSampleFormat;
|
|
||||||
|
|
||||||
/* In this test, the different audio stream and channels are always created to
|
|
||||||
* cancel each other. */
|
|
||||||
void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames)
|
|
||||||
{
|
|
||||||
bool silent = true;
|
|
||||||
for (uint32_t i = 0; i < aChannels * aFrames; i++) {
|
|
||||||
if (aData[i] != 0.0) {
|
|
||||||
if (aFormat == mozilla::AUDIO_FORMAT_S16) {
|
|
||||||
fprintf(stderr, "Sample at %d is not silent: %d\n", i, (short)aData[i]);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "Sample at %d is not silent: %f\n", i, (float)aData[i]);
|
|
||||||
}
|
|
||||||
silent = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!silent) {
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Helper function to give us the maximum and minimum value that don't clip,
|
|
||||||
* for a given sample format (integer or floating-point). */
|
|
||||||
template<typename T>
|
|
||||||
T GetLowValue();
|
|
||||||
|
|
||||||
template<typename T>
|
|
||||||
T GetHighValue();
|
|
||||||
|
|
||||||
template<>
|
|
||||||
float GetLowValue<float>() {
|
|
||||||
return -1.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
short GetLowValue<short>() {
|
|
||||||
return -INT16_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
float GetHighValue<float>() {
|
|
||||||
return 1.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<>
|
|
||||||
short GetHighValue<short>() {
|
|
||||||
return INT16_MAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue)
|
|
||||||
{
|
|
||||||
AudioDataValue* end = aBuffer + aLength;
|
|
||||||
while (aBuffer != end) {
|
|
||||||
*aBuffer++ = aValue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char* argv[]) {
|
|
||||||
const uint32_t CHANNEL_LENGTH = 256;
|
|
||||||
AudioDataValue a[CHANNEL_LENGTH * 2];
|
|
||||||
AudioDataValue b[CHANNEL_LENGTH * 2];
|
|
||||||
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
|
|
||||||
|
|
||||||
{
|
|
||||||
int iterations = 2;
|
|
||||||
mozilla::AudioMixer mixer(MixingDone);
|
|
||||||
|
|
||||||
fprintf(stderr, "Test AudioMixer constant buffer length.\n");
|
|
||||||
|
|
||||||
while (iterations--) {
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mozilla::AudioMixer mixer(MixingDone);
|
|
||||||
|
|
||||||
fprintf(stderr, "Test AudioMixer variable buffer length.\n");
|
|
||||||
|
|
||||||
FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH / 2);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH / 2);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b + CHANNEL_LENGTH, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
FillBuffer(a, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(a + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(b, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
|
|
||||||
FillBuffer(b + CHANNEL_LENGTH / 2, CHANNEL_LENGTH / 2, GetHighValue<AudioDataValue>());
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH / 2);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH / 2);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
}
|
|
||||||
|
|
||||||
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
|
|
||||||
FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
|
|
||||||
|
|
||||||
{
|
|
||||||
mozilla::AudioMixer mixer(MixingDone);
|
|
||||||
fprintf(stderr, "Test AudioMixer variable channel count.\n");
|
|
||||||
|
|
||||||
mixer.Mix(a, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
mixer.Mix(a, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
mixer.Mix(a, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 1, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
mozilla::AudioMixer mixer(MixingDone);
|
|
||||||
fprintf(stderr, "Test AudioMixer variable stream count.\n");
|
|
||||||
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
mixer.Mix(a, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.Mix(b, 2, CHANNEL_LENGTH);
|
|
||||||
mixer.FinishMixing();
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
|||||||
# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
|
|
||||||
# vim: set filetype=python:
|
|
||||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
||||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
CPP_UNIT_TESTS += [
|
|
||||||
'TestAudioMixer.cpp',
|
|
||||||
]
|
|
||||||
|
|
||||||
FAIL_ON_WARNINGS = True
|
|
||||||
|
|
||||||
LOCAL_INCLUDES += [
|
|
||||||
'..',
|
|
||||||
]
|
|
||||||
|
|
@ -12,8 +12,6 @@ PARALLEL_DIRS += [
|
|||||||
'webvtt'
|
'webvtt'
|
||||||
]
|
]
|
||||||
|
|
||||||
TEST_TOOL_DIRS += ['compiledtest']
|
|
||||||
|
|
||||||
if CONFIG['MOZ_RAW']:
|
if CONFIG['MOZ_RAW']:
|
||||||
PARALLEL_DIRS += ['raw']
|
PARALLEL_DIRS += ['raw']
|
||||||
|
|
||||||
@ -60,7 +58,6 @@ EXPORTS += [
|
|||||||
'AudioChannelFormat.h',
|
'AudioChannelFormat.h',
|
||||||
'AudioCompactor.h',
|
'AudioCompactor.h',
|
||||||
'AudioEventTimeline.h',
|
'AudioEventTimeline.h',
|
||||||
'AudioMixer.h',
|
|
||||||
'AudioNodeEngine.h',
|
'AudioNodeEngine.h',
|
||||||
'AudioNodeExternalInputStream.h',
|
'AudioNodeExternalInputStream.h',
|
||||||
'AudioNodeStream.h',
|
'AudioNodeStream.h',
|
||||||
|
@ -90,25 +90,5 @@ WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
|
|
||||||
uint32_t aChannel,
|
|
||||||
const int16_t* aIn, uint32_t* aInLen,
|
|
||||||
int16_t* aOut, uint32_t* aOutLen)
|
|
||||||
{
|
|
||||||
#ifdef MOZ_SAMPLE_TYPE_S16
|
|
||||||
return speex_resampler_process_int(aResampler, aChannel, aIn, aInLen, aOut, aOutLen);
|
|
||||||
#else
|
|
||||||
nsAutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp1;
|
|
||||||
nsAutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp2;
|
|
||||||
tmp1.SetLength(*aInLen);
|
|
||||||
tmp2.SetLength(*aOutLen);
|
|
||||||
ConvertAudioSamples(aIn, tmp1.Elements(), *aInLen);
|
|
||||||
int result = speex_resampler_process_float(aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen);
|
|
||||||
ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen);
|
|
||||||
return result;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ typedef struct SpeexResamplerState_ SpeexResamplerState;
|
|||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
class AudioNodeStream;
|
class AudioNodeStream;
|
||||||
|
class MediaStream;
|
||||||
|
|
||||||
namespace dom {
|
namespace dom {
|
||||||
|
|
||||||
@ -209,13 +210,7 @@ struct WebAudioUtils {
|
|||||||
uint32_t aChannel,
|
uint32_t aChannel,
|
||||||
const int16_t* aIn, uint32_t* aInLen,
|
const int16_t* aIn, uint32_t* aInLen,
|
||||||
float* aOut, uint32_t* aOutLen);
|
float* aOut, uint32_t* aOutLen);
|
||||||
|
};
|
||||||
static int
|
|
||||||
SpeexResamplerProcess(SpeexResamplerState* aResampler,
|
|
||||||
uint32_t aChannel,
|
|
||||||
const int16_t* aIn, uint32_t* aInLen,
|
|
||||||
int16_t* aOut, uint32_t* aOutLen);
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
||||||
|
|
||||||
#ifndef AUDIOOUTPUTOBSERVER_H_
|
|
||||||
#define AUDIOOUTPUTOBSERVER_H_
|
|
||||||
|
|
||||||
#include "mozilla/StaticPtr.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
class SingleRwFifo;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace mozilla {
|
|
||||||
|
|
||||||
typedef struct FarEndAudioChunk_ {
|
|
||||||
uint16_t mSamples;
|
|
||||||
bool mOverrun;
|
|
||||||
int16_t mData[1]; // variable-length
|
|
||||||
} FarEndAudioChunk;
|
|
||||||
|
|
||||||
// XXX Really a singleton currently
|
|
||||||
class AudioOutputObserver // : public MSGOutputObserver
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
AudioOutputObserver();
|
|
||||||
virtual ~AudioOutputObserver();
|
|
||||||
|
|
||||||
void Clear();
|
|
||||||
void InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran,
|
|
||||||
int aFreq, int aChannels, AudioSampleFormat aFormat);
|
|
||||||
uint32_t PlayoutFrequency() { return mPlayoutFreq; }
|
|
||||||
uint32_t PlayoutChannels() { return mPlayoutChannels; }
|
|
||||||
|
|
||||||
FarEndAudioChunk *Pop();
|
|
||||||
uint32_t Size();
|
|
||||||
|
|
||||||
private:
|
|
||||||
uint32_t mPlayoutFreq;
|
|
||||||
uint32_t mPlayoutChannels;
|
|
||||||
|
|
||||||
nsAutoPtr<webrtc::SingleRwFifo> mPlayoutFifo;
|
|
||||||
uint32_t mChunkSize;
|
|
||||||
|
|
||||||
// chunking to 10ms support
|
|
||||||
nsAutoPtr<FarEndAudioChunk> mSaved;
|
|
||||||
uint32_t mSamplesSaved;
|
|
||||||
};
|
|
||||||
|
|
||||||
// XXX until there's a registration API in MSG
|
|
||||||
extern StaticAutoPtr<AudioOutputObserver> gFarendObserver;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
@ -101,8 +101,7 @@ public:
|
|||||||
/* Change device configuration. */
|
/* Change device configuration. */
|
||||||
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise) = 0;
|
||||||
int32_t aPlayoutDelay) = 0;
|
|
||||||
|
|
||||||
/* Returns true if a source represents a fake capture device and
|
/* Returns true if a source represents a fake capture device and
|
||||||
* false otherwise
|
* false otherwise
|
||||||
|
@ -48,8 +48,7 @@ public:
|
|||||||
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
||||||
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
|
||||||
int32_t aPlayoutDelay) { return NS_OK; };
|
|
||||||
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
||||||
SourceMediaStream *aSource,
|
SourceMediaStream *aSource,
|
||||||
TrackID aId,
|
TrackID aId,
|
||||||
@ -101,8 +100,7 @@ public:
|
|||||||
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
||||||
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
|
||||||
int32_t aPlayoutDelay) { return NS_OK; };
|
|
||||||
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
||||||
SourceMediaStream *aSource,
|
SourceMediaStream *aSource,
|
||||||
TrackID aId,
|
TrackID aId,
|
||||||
|
@ -279,7 +279,7 @@ MediaEngineTabVideoSource::Stop(mozilla::SourceMediaStream*, mozilla::TrackID)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nsresult
|
nsresult
|
||||||
MediaEngineTabVideoSource::Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t)
|
MediaEngineTabVideoSource::Config(bool, uint32_t, bool, uint32_t, bool, uint32_t)
|
||||||
{
|
{
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ class MediaEngineTabVideoSource : public MediaEngineVideoSource, nsIDOMEventList
|
|||||||
virtual nsresult Snapshot(uint32_t, nsIDOMFile**);
|
virtual nsresult Snapshot(uint32_t, nsIDOMFile**);
|
||||||
virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&);
|
virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&);
|
||||||
virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID);
|
virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID);
|
||||||
virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t);
|
virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t);
|
||||||
virtual bool IsFake();
|
virtual bool IsFake();
|
||||||
void Draw();
|
void Draw();
|
||||||
|
|
||||||
|
@ -60,8 +60,6 @@ MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
|
|||||||
#else
|
#else
|
||||||
AsyncLatencyLogger::Get()->AddRef();
|
AsyncLatencyLogger::Get()->AddRef();
|
||||||
#endif
|
#endif
|
||||||
// XXX
|
|
||||||
gFarendObserver = new AudioOutputObserver();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -40,7 +40,6 @@
|
|||||||
#include "webrtc/voice_engine/include/voe_volume_control.h"
|
#include "webrtc/voice_engine/include/voe_volume_control.h"
|
||||||
#include "webrtc/voice_engine/include/voe_external_media.h"
|
#include "webrtc/voice_engine/include/voe_external_media.h"
|
||||||
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
#include "webrtc/voice_engine/include/voe_audio_processing.h"
|
||||||
#include "webrtc/voice_engine/include/voe_call_report.h"
|
|
||||||
|
|
||||||
// Video Engine
|
// Video Engine
|
||||||
#include "webrtc/video_engine/include/vie_base.h"
|
#include "webrtc/video_engine/include/vie_base.h"
|
||||||
@ -57,7 +56,6 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "NullTransport.h"
|
#include "NullTransport.h"
|
||||||
#include "AudioOutputObserver.h"
|
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
@ -149,8 +147,7 @@ public:
|
|||||||
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
||||||
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise) { return NS_OK; };
|
||||||
int32_t aPlayoutDelay) { return NS_OK; };
|
|
||||||
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
||||||
SourceMediaStream *aSource,
|
SourceMediaStream *aSource,
|
||||||
TrackID aId,
|
TrackID aId,
|
||||||
@ -261,13 +258,10 @@ public:
|
|||||||
, mCapIndex(aIndex)
|
, mCapIndex(aIndex)
|
||||||
, mChannel(-1)
|
, mChannel(-1)
|
||||||
, mInitDone(false)
|
, mInitDone(false)
|
||||||
, mStarted(false)
|
|
||||||
, mSamples(0)
|
|
||||||
, mEchoOn(false), mAgcOn(false), mNoiseOn(false)
|
, mEchoOn(false), mAgcOn(false), mNoiseOn(false)
|
||||||
, mEchoCancel(webrtc::kEcDefault)
|
, mEchoCancel(webrtc::kEcDefault)
|
||||||
, mAGC(webrtc::kAgcDefault)
|
, mAGC(webrtc::kAgcDefault)
|
||||||
, mNoiseSuppress(webrtc::kNsDefault)
|
, mNoiseSuppress(webrtc::kNsDefault)
|
||||||
, mPlayoutDelay(0)
|
|
||||||
, mNullTransport(nullptr) {
|
, mNullTransport(nullptr) {
|
||||||
MOZ_ASSERT(aVoiceEnginePtr);
|
MOZ_ASSERT(aVoiceEnginePtr);
|
||||||
mState = kReleased;
|
mState = kReleased;
|
||||||
@ -287,8 +281,7 @@ public:
|
|||||||
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
|
||||||
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise);
|
||||||
int32_t aPlayoutDelay);
|
|
||||||
|
|
||||||
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
virtual void NotifyPull(MediaStreamGraph* aGraph,
|
||||||
SourceMediaStream *aSource,
|
SourceMediaStream *aSource,
|
||||||
@ -319,7 +312,6 @@ private:
|
|||||||
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
|
ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
|
||||||
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
|
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
|
||||||
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
|
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
|
||||||
ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
|
|
||||||
|
|
||||||
// mMonitor protects mSources[] access/changes, and transitions of mState
|
// mMonitor protects mSources[] access/changes, and transitions of mState
|
||||||
// from kStarted to kStopped (which are combined with EndTrack()).
|
// from kStarted to kStopped (which are combined with EndTrack()).
|
||||||
@ -331,8 +323,6 @@ private:
|
|||||||
int mChannel;
|
int mChannel;
|
||||||
TrackID mTrackID;
|
TrackID mTrackID;
|
||||||
bool mInitDone;
|
bool mInitDone;
|
||||||
bool mStarted;
|
|
||||||
int mSamples; // int to avoid conversions when comparing/etc to samplingFreq & length
|
|
||||||
|
|
||||||
nsString mDeviceName;
|
nsString mDeviceName;
|
||||||
nsString mDeviceUUID;
|
nsString mDeviceUUID;
|
||||||
@ -341,7 +331,6 @@ private:
|
|||||||
webrtc::EcModes mEchoCancel;
|
webrtc::EcModes mEchoCancel;
|
||||||
webrtc::AgcModes mAGC;
|
webrtc::AgcModes mAGC;
|
||||||
webrtc::NsModes mNoiseSuppress;
|
webrtc::NsModes mNoiseSuppress;
|
||||||
int32_t mPlayoutDelay;
|
|
||||||
|
|
||||||
NullTransport *mNullTransport;
|
NullTransport *mNullTransport;
|
||||||
};
|
};
|
||||||
@ -355,8 +344,6 @@ public:
|
|||||||
#ifdef MOZ_B2G_CAMERA
|
#ifdef MOZ_B2G_CAMERA
|
||||||
AsyncLatencyLogger::Get()->Release();
|
AsyncLatencyLogger::Get()->Release();
|
||||||
#endif
|
#endif
|
||||||
// XXX
|
|
||||||
gFarendObserver = nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clients should ensure to clean-up sources video/audio sources
|
// Clients should ensure to clean-up sources video/audio sources
|
||||||
|
@ -3,15 +3,6 @@
|
|||||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||||
|
|
||||||
#include "MediaEngineWebRTC.h"
|
#include "MediaEngineWebRTC.h"
|
||||||
#include <stdio.h>
|
|
||||||
#include <algorithm>
|
|
||||||
#include "mozilla/Assertions.h"
|
|
||||||
|
|
||||||
// scoped_ptr.h uses FF
|
|
||||||
#ifdef FF
|
|
||||||
#undef FF
|
|
||||||
#endif
|
|
||||||
#include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
|
|
||||||
|
|
||||||
#define CHANNELS 1
|
#define CHANNELS 1
|
||||||
#define ENCODING "L16"
|
#define ENCODING "L16"
|
||||||
@ -21,13 +12,6 @@
|
|||||||
#define SAMPLE_FREQUENCY 16000
|
#define SAMPLE_FREQUENCY 16000
|
||||||
#define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000)
|
#define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000)
|
||||||
|
|
||||||
// These are restrictions from the webrtc.org code
|
|
||||||
#define MAX_CHANNELS 2
|
|
||||||
#define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100
|
|
||||||
|
|
||||||
#define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10
|
|
||||||
static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH");
|
|
||||||
|
|
||||||
namespace mozilla {
|
namespace mozilla {
|
||||||
|
|
||||||
#ifdef LOG
|
#ifdef LOG
|
||||||
@ -46,117 +30,6 @@ extern PRLogModuleInfo* GetMediaManagerLog();
|
|||||||
*/
|
*/
|
||||||
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
|
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
|
||||||
|
|
||||||
// XXX temp until MSG supports registration
|
|
||||||
StaticAutoPtr<AudioOutputObserver> gFarendObserver;
|
|
||||||
|
|
||||||
AudioOutputObserver::AudioOutputObserver()
|
|
||||||
: mPlayoutFreq(0)
|
|
||||||
, mPlayoutChannels(0)
|
|
||||||
, mChunkSize(0)
|
|
||||||
, mSamplesSaved(0)
|
|
||||||
{
|
|
||||||
// Buffers of 10ms chunks
|
|
||||||
mPlayoutFifo = new webrtc::SingleRwFifo(MAX_AEC_FIFO_DEPTH/10);
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioOutputObserver::~AudioOutputObserver()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
AudioOutputObserver::Clear()
|
|
||||||
{
|
|
||||||
while (mPlayoutFifo->size() > 0) {
|
|
||||||
(void) mPlayoutFifo->Pop();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FarEndAudioChunk *
|
|
||||||
AudioOutputObserver::Pop()
|
|
||||||
{
|
|
||||||
return (FarEndAudioChunk *) mPlayoutFifo->Pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t
|
|
||||||
AudioOutputObserver::Size()
|
|
||||||
{
|
|
||||||
return mPlayoutFifo->size();
|
|
||||||
}
|
|
||||||
|
|
||||||
// static
|
|
||||||
void
|
|
||||||
AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aSamples, bool aOverran,
|
|
||||||
int aFreq, int aChannels, AudioSampleFormat aFormat)
|
|
||||||
{
|
|
||||||
if (mPlayoutChannels != 0) {
|
|
||||||
if (mPlayoutChannels != static_cast<uint32_t>(aChannels)) {
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(aChannels <= MAX_CHANNELS);
|
|
||||||
mPlayoutChannels = static_cast<uint32_t>(aChannels);
|
|
||||||
}
|
|
||||||
if (mPlayoutFreq != 0) {
|
|
||||||
if (mPlayoutFreq != static_cast<uint32_t>(aFreq)) {
|
|
||||||
MOZ_CRASH();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
MOZ_ASSERT(aFreq <= MAX_SAMPLING_FREQ);
|
|
||||||
MOZ_ASSERT(!(aFreq % 100), "Sampling rate for far end data should be multiple of 100.");
|
|
||||||
mPlayoutFreq = aFreq;
|
|
||||||
mChunkSize = aFreq/100; // 10ms
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef LOG_FAREND_INSERTION
|
|
||||||
static FILE *fp = fopen("insertfarend.pcm","wb");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (mSaved) {
|
|
||||||
// flag overrun as soon as possible, and only once
|
|
||||||
mSaved->mOverrun = aOverran;
|
|
||||||
aOverran = false;
|
|
||||||
}
|
|
||||||
// Rechunk to 10ms.
|
|
||||||
// The AnalyzeReverseStream() and WebRtcAec_BufferFarend() functions insist on 10ms
|
|
||||||
// samples per call. Annoying...
|
|
||||||
while (aSamples) {
|
|
||||||
if (!mSaved) {
|
|
||||||
mSaved = (FarEndAudioChunk *) moz_xmalloc(sizeof(FarEndAudioChunk) +
|
|
||||||
(mChunkSize * aChannels - 1)*sizeof(int16_t));
|
|
||||||
mSaved->mSamples = mChunkSize;
|
|
||||||
mSaved->mOverrun = aOverran;
|
|
||||||
aOverran = false;
|
|
||||||
}
|
|
||||||
uint32_t to_copy = mChunkSize - mSamplesSaved;
|
|
||||||
if (to_copy > aSamples) {
|
|
||||||
to_copy = aSamples;
|
|
||||||
}
|
|
||||||
|
|
||||||
int16_t *dest = &(mSaved->mData[mSamplesSaved * aChannels]);
|
|
||||||
ConvertAudioSamples(aBuffer, dest, to_copy * aChannels);
|
|
||||||
|
|
||||||
#ifdef LOG_FAREND_INSERTION
|
|
||||||
if (fp) {
|
|
||||||
fwrite(&(mSaved->mData[mSamplesSaved * aChannels]), to_copy * aChannels, sizeof(int16_t), fp);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
aSamples -= to_copy;
|
|
||||||
mSamplesSaved += to_copy;
|
|
||||||
|
|
||||||
if (mSamplesSaved >= mChunkSize) {
|
|
||||||
int free_slots = mPlayoutFifo->capacity() - mPlayoutFifo->size();
|
|
||||||
if (free_slots <= 0) {
|
|
||||||
// XXX We should flag an overrun for the reader. We can't drop data from it due to
|
|
||||||
// thread safety issues.
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
mPlayoutFifo->Push((int8_t *) mSaved.forget()); // takes ownership
|
|
||||||
mSamplesSaved = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
|
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
|
||||||
{
|
{
|
||||||
@ -180,27 +53,18 @@ MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID)
|
|||||||
nsresult
|
nsresult
|
||||||
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
|
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise)
|
||||||
int32_t aPlayoutDelay)
|
|
||||||
{
|
{
|
||||||
LOG(("Audio config: aec: %d, agc: %d, noise: %d",
|
LOG(("Audio config: aec: %d, agc: %d, noise: %d",
|
||||||
aEchoOn ? aEcho : -1,
|
aEchoOn ? aEcho : -1,
|
||||||
aAgcOn ? aAGC : -1,
|
aAgcOn ? aAGC : -1,
|
||||||
aNoiseOn ? aNoise : -1));
|
aNoiseOn ? aNoise : -1));
|
||||||
|
|
||||||
bool update_echo = (mEchoOn != aEchoOn);
|
bool update_agc = (mAgcOn == aAgcOn);
|
||||||
bool update_agc = (mAgcOn != aAgcOn);
|
bool update_noise = (mNoiseOn == aNoiseOn);
|
||||||
bool update_noise = (mNoiseOn != aNoiseOn);
|
|
||||||
mEchoOn = aEchoOn;
|
|
||||||
mAgcOn = aAgcOn;
|
mAgcOn = aAgcOn;
|
||||||
mNoiseOn = aNoiseOn;
|
mNoiseOn = aNoiseOn;
|
||||||
|
|
||||||
if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged) {
|
|
||||||
if (mEchoCancel != (webrtc::EcModes) aEcho) {
|
|
||||||
update_echo = true;
|
|
||||||
mEchoCancel = (webrtc::EcModes) aEcho;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) {
|
if ((webrtc::AgcModes) aAGC != webrtc::kAgcUnchanged) {
|
||||||
if (mAGC != (webrtc::AgcModes) aAGC) {
|
if (mAGC != (webrtc::AgcModes) aAGC) {
|
||||||
update_agc = true;
|
update_agc = true;
|
||||||
@ -213,21 +77,21 @@ MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
|
|||||||
mNoiseSuppress = (webrtc::NsModes) aNoise;
|
mNoiseSuppress = (webrtc::NsModes) aNoise;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mPlayoutDelay = aPlayoutDelay;
|
|
||||||
|
|
||||||
if (mInitDone) {
|
if (mInitDone) {
|
||||||
int error;
|
int error;
|
||||||
|
#if 0
|
||||||
|
// Until we can support feeding our full output audio from the browser
|
||||||
|
// through the MediaStream, this won't work. Or we need to move AEC to
|
||||||
|
// below audio input and output, perhaps invoked from here.
|
||||||
|
mEchoOn = aEchoOn;
|
||||||
|
if ((webrtc::EcModes) aEcho != webrtc::kEcUnchanged)
|
||||||
|
mEchoCancel = (webrtc::EcModes) aEcho;
|
||||||
|
mVoEProcessing->SetEcStatus(mEchoOn, aEcho);
|
||||||
|
#else
|
||||||
|
(void) aEcho; (void) aEchoOn; (void) mEchoCancel; // suppress warnings
|
||||||
|
#endif
|
||||||
|
|
||||||
if (update_echo &&
|
|
||||||
0 != (error = mVoEProcessing->SetEcStatus(mEchoOn, (webrtc::EcModes) aEcho))) {
|
|
||||||
LOG(("%s Error setting Echo Status: %d ",__FUNCTION__, error));
|
|
||||||
// Overhead of capturing all the time is very low (<0.1% of an audio only call)
|
|
||||||
if (mEchoOn) {
|
|
||||||
if (0 != (error = mVoEProcessing->SetEcMetricsStatus(true))) {
|
|
||||||
LOG(("%s Error setting Echo Metrics: %d ",__FUNCTION__, error));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (update_agc &&
|
if (update_agc &&
|
||||||
0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) {
|
0 != (error = mVoEProcessing->SetAgcStatus(mAgcOn, (webrtc::AgcModes) aAGC))) {
|
||||||
LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
|
LOG(("%s Error setting AGC Status: %d ",__FUNCTION__, error));
|
||||||
@ -294,8 +158,6 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
|
|||||||
AudioSegment* segment = new AudioSegment();
|
AudioSegment* segment = new AudioSegment();
|
||||||
aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
|
aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
|
||||||
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
||||||
// XXX Make this based on the pref.
|
|
||||||
aStream->RegisterForAudioMixing();
|
|
||||||
LOG(("Start audio for stream %p", aStream));
|
LOG(("Start audio for stream %p", aStream));
|
||||||
|
|
||||||
if (mState == kStarted) {
|
if (mState == kStarted) {
|
||||||
@ -308,16 +170,10 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
|
|||||||
// Make sure logger starts before capture
|
// Make sure logger starts before capture
|
||||||
AsyncLatencyLogger::Get(true);
|
AsyncLatencyLogger::Get(true);
|
||||||
|
|
||||||
// Register output observer
|
|
||||||
// XXX
|
|
||||||
MOZ_ASSERT(gFarendObserver);
|
|
||||||
gFarendObserver->Clear();
|
|
||||||
|
|
||||||
// Configure audio processing in webrtc code
|
// Configure audio processing in webrtc code
|
||||||
Config(mEchoOn, webrtc::kEcUnchanged,
|
Config(mEchoOn, webrtc::kEcUnchanged,
|
||||||
mAgcOn, webrtc::kAgcUnchanged,
|
mAgcOn, webrtc::kAgcUnchanged,
|
||||||
mNoiseOn, webrtc::kNsUnchanged,
|
mNoiseOn, webrtc::kNsUnchanged);
|
||||||
mPlayoutDelay);
|
|
||||||
|
|
||||||
if (mVoEBase->StartReceive(mChannel)) {
|
if (mVoEBase->StartReceive(mChannel)) {
|
||||||
return NS_ERROR_FAILURE;
|
return NS_ERROR_FAILURE;
|
||||||
@ -410,11 +266,6 @@ MediaEngineWebRTCAudioSource::Init()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mVoECallReport = webrtc::VoECallReport::GetInterface(mVoiceEngine);
|
|
||||||
if (!mVoECallReport) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
mChannel = mVoEBase->CreateChannel();
|
mChannel = mVoEBase->CreateChannel();
|
||||||
if (mChannel < 0) {
|
if (mChannel < 0) {
|
||||||
return;
|
return;
|
||||||
@ -511,50 +362,6 @@ MediaEngineWebRTCAudioSource::Process(int channel,
|
|||||||
webrtc::ProcessingTypes type, sample* audio10ms,
|
webrtc::ProcessingTypes type, sample* audio10ms,
|
||||||
int length, int samplingFreq, bool isStereo)
|
int length, int samplingFreq, bool isStereo)
|
||||||
{
|
{
|
||||||
// On initial capture, throw away all far-end data except the most recent sample
|
|
||||||
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
|
|
||||||
// input code with "old" audio.
|
|
||||||
if (!mStarted) {
|
|
||||||
mStarted = true;
|
|
||||||
while (gFarendObserver->Size() > 1) {
|
|
||||||
FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
|
|
||||||
free(buffer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (gFarendObserver->Size() > 0) {
|
|
||||||
FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
|
|
||||||
if (buffer) {
|
|
||||||
int length = buffer->mSamples;
|
|
||||||
if (mVoERender->ExternalPlayoutData(buffer->mData,
|
|
||||||
gFarendObserver->PlayoutFrequency(),
|
|
||||||
gFarendObserver->PlayoutChannels(),
|
|
||||||
mPlayoutDelay,
|
|
||||||
length) == -1) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
free(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef PR_LOGGING
|
|
||||||
mSamples += length;
|
|
||||||
if (mSamples > samplingFreq) {
|
|
||||||
mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
|
|
||||||
if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
|
|
||||||
webrtc::EchoStatistics echo;
|
|
||||||
|
|
||||||
mVoECallReport->GetEchoMetricSummary(echo);
|
|
||||||
#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
|
|
||||||
LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
|
|
||||||
DUMP_STATVAL(echo.erl),
|
|
||||||
DUMP_STATVAL(echo.erle),
|
|
||||||
DUMP_STATVAL(echo.rerl),
|
|
||||||
DUMP_STATVAL(echo.a_nlp)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
MonitorAutoLock lock(mMonitor);
|
MonitorAutoLock lock(mMonitor);
|
||||||
if (mState != kStarted)
|
if (mState != kStarted)
|
||||||
return;
|
return;
|
||||||
|
@ -12,8 +12,7 @@ EXPORTS += [
|
|||||||
]
|
]
|
||||||
|
|
||||||
if CONFIG['MOZ_WEBRTC']:
|
if CONFIG['MOZ_WEBRTC']:
|
||||||
EXPORTS += ['AudioOutputObserver.h',
|
EXPORTS += ['LoadManager.h',
|
||||||
'LoadManager.h',
|
|
||||||
'LoadManagerFactory.h',
|
'LoadManagerFactory.h',
|
||||||
'LoadMonitor.h',
|
'LoadMonitor.h',
|
||||||
'MediaEngineWebRTC.h']
|
'MediaEngineWebRTC.h']
|
||||||
|
@ -400,30 +400,13 @@ class nsDOMUserMediaStream : public DOMLocalMediaStream
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static already_AddRefed<nsDOMUserMediaStream>
|
static already_AddRefed<nsDOMUserMediaStream>
|
||||||
CreateTrackUnionStream(nsIDOMWindow* aWindow,
|
CreateTrackUnionStream(nsIDOMWindow* aWindow, uint32_t aHintContents)
|
||||||
MediaEngineSource *aAudioSource,
|
|
||||||
MediaEngineSource *aVideoSource)
|
|
||||||
{
|
{
|
||||||
DOMMediaStream::TrackTypeHints hints =
|
nsRefPtr<nsDOMUserMediaStream> stream = new nsDOMUserMediaStream();
|
||||||
(aAudioSource ? DOMMediaStream::HINT_CONTENTS_AUDIO : 0) |
|
stream->InitTrackUnionStream(aWindow, aHintContents);
|
||||||
(aVideoSource ? DOMMediaStream::HINT_CONTENTS_VIDEO : 0);
|
|
||||||
|
|
||||||
nsRefPtr<nsDOMUserMediaStream> stream = new nsDOMUserMediaStream(aAudioSource);
|
|
||||||
stream->InitTrackUnionStream(aWindow, hints);
|
|
||||||
return stream.forget();
|
return stream.forget();
|
||||||
}
|
}
|
||||||
|
|
||||||
nsDOMUserMediaStream(MediaEngineSource *aAudioSource) :
|
|
||||||
mAudioSource(aAudioSource),
|
|
||||||
mEchoOn(true),
|
|
||||||
mAgcOn(false),
|
|
||||||
mNoiseOn(true),
|
|
||||||
mEcho(webrtc::kEcDefault),
|
|
||||||
mAgc(webrtc::kAgcDefault),
|
|
||||||
mNoise(webrtc::kNsDefault),
|
|
||||||
mPlayoutDelay(20)
|
|
||||||
{}
|
|
||||||
|
|
||||||
virtual ~nsDOMUserMediaStream()
|
virtual ~nsDOMUserMediaStream()
|
||||||
{
|
{
|
||||||
Stop();
|
Stop();
|
||||||
@ -453,21 +436,6 @@ public:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void
|
|
||||||
AudioConfig(bool aEchoOn, uint32_t aEcho,
|
|
||||||
bool aAgcOn, uint32_t aAgc,
|
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
|
||||||
int32_t aPlayoutDelay)
|
|
||||||
{
|
|
||||||
mEchoOn = aEchoOn;
|
|
||||||
mEcho = aEcho;
|
|
||||||
mAgcOn = aAgcOn;
|
|
||||||
mAgc = aAgc;
|
|
||||||
mNoiseOn = aNoiseOn;
|
|
||||||
mNoise = aNoise;
|
|
||||||
mPlayoutDelay = aPlayoutDelay;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void RemoveDirectListener(MediaStreamDirectListener *aListener) MOZ_OVERRIDE
|
virtual void RemoveDirectListener(MediaStreamDirectListener *aListener) MOZ_OVERRIDE
|
||||||
{
|
{
|
||||||
if (mSourceStream) {
|
if (mSourceStream) {
|
||||||
@ -490,14 +458,6 @@ public:
|
|||||||
// explicitly destroyed too.
|
// explicitly destroyed too.
|
||||||
nsRefPtr<SourceMediaStream> mSourceStream;
|
nsRefPtr<SourceMediaStream> mSourceStream;
|
||||||
nsRefPtr<MediaInputPort> mPort;
|
nsRefPtr<MediaInputPort> mPort;
|
||||||
nsRefPtr<MediaEngineSource> mAudioSource; // so we can turn on AEC
|
|
||||||
bool mEchoOn;
|
|
||||||
bool mAgcOn;
|
|
||||||
bool mNoiseOn;
|
|
||||||
uint32_t mEcho;
|
|
||||||
uint32_t mAgc;
|
|
||||||
uint32_t mNoise;
|
|
||||||
uint32_t mPlayoutDelay;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -578,12 +538,6 @@ public:
|
|||||||
NS_IMETHOD
|
NS_IMETHOD
|
||||||
Run()
|
Run()
|
||||||
{
|
{
|
||||||
int32_t aec = (int32_t) webrtc::kEcUnchanged;
|
|
||||||
int32_t agc = (int32_t) webrtc::kAgcUnchanged;
|
|
||||||
int32_t noise = (int32_t) webrtc::kNsUnchanged;
|
|
||||||
bool aec_on = false, agc_on = false, noise_on = false;
|
|
||||||
int32_t playout_delay = 0;
|
|
||||||
|
|
||||||
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
|
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
|
||||||
nsPIDOMWindow *window = static_cast<nsPIDOMWindow*>
|
nsPIDOMWindow *window = static_cast<nsPIDOMWindow*>
|
||||||
(nsGlobalWindow::GetInnerWindowWithId(mWindowID));
|
(nsGlobalWindow::GetInnerWindowWithId(mWindowID));
|
||||||
@ -596,39 +550,19 @@ public:
|
|||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MOZ_WEBRTC
|
|
||||||
// Right now these configs are only of use if webrtc is available
|
|
||||||
nsresult rv;
|
|
||||||
nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
|
|
||||||
if (NS_SUCCEEDED(rv)) {
|
|
||||||
nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
|
|
||||||
|
|
||||||
if (branch) {
|
|
||||||
branch->GetBoolPref("media.getusermedia.aec_enabled", &aec_on);
|
|
||||||
branch->GetIntPref("media.getusermedia.aec", &aec);
|
|
||||||
branch->GetBoolPref("media.getusermedia.agc_enabled", &agc_on);
|
|
||||||
branch->GetIntPref("media.getusermedia.agc", &agc);
|
|
||||||
branch->GetBoolPref("media.getusermedia.noise_enabled", &noise_on);
|
|
||||||
branch->GetIntPref("media.getusermedia.noise", &noise);
|
|
||||||
branch->GetIntPref("media.getusermedia.playout_delay", &playout_delay);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
// Create a media stream.
|
// Create a media stream.
|
||||||
|
DOMMediaStream::TrackTypeHints hints =
|
||||||
|
(mAudioSource ? DOMMediaStream::HINT_CONTENTS_AUDIO : 0) |
|
||||||
|
(mVideoSource ? DOMMediaStream::HINT_CONTENTS_VIDEO : 0);
|
||||||
|
|
||||||
nsRefPtr<nsDOMUserMediaStream> trackunion =
|
nsRefPtr<nsDOMUserMediaStream> trackunion =
|
||||||
nsDOMUserMediaStream::CreateTrackUnionStream(window, mAudioSource,
|
nsDOMUserMediaStream::CreateTrackUnionStream(window, hints);
|
||||||
mVideoSource);
|
|
||||||
if (!trackunion) {
|
if (!trackunion) {
|
||||||
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> error = mError.forget();
|
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> error = mError.forget();
|
||||||
LOG(("Returning error for getUserMedia() - no stream"));
|
LOG(("Returning error for getUserMedia() - no stream"));
|
||||||
error->OnError(NS_LITERAL_STRING("NO_STREAM"));
|
error->OnError(NS_LITERAL_STRING("NO_STREAM"));
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
trackunion->AudioConfig(aec_on, (uint32_t) aec,
|
|
||||||
agc_on, (uint32_t) agc,
|
|
||||||
noise_on, (uint32_t) noise,
|
|
||||||
playout_delay);
|
|
||||||
|
|
||||||
|
|
||||||
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
|
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
|
||||||
nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
|
nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
|
||||||
@ -658,13 +592,6 @@ public:
|
|||||||
TracksAvailableCallback* tracksAvailableCallback =
|
TracksAvailableCallback* tracksAvailableCallback =
|
||||||
new TracksAvailableCallback(mManager, mSuccess, mWindowID, trackunion);
|
new TracksAvailableCallback(mManager, mSuccess, mWindowID, trackunion);
|
||||||
|
|
||||||
#ifdef MOZ_WEBRTC
|
|
||||||
mListener->AudioConfig(aec_on, (uint32_t) aec,
|
|
||||||
agc_on, (uint32_t) agc,
|
|
||||||
noise_on, (uint32_t) noise,
|
|
||||||
playout_delay);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Dispatch to the media thread to ask it to start the sources,
|
// Dispatch to the media thread to ask it to start the sources,
|
||||||
// because that can take a while.
|
// because that can take a while.
|
||||||
// Pass ownership of trackunion to the MediaOperationRunnable
|
// Pass ownership of trackunion to the MediaOperationRunnable
|
||||||
@ -677,6 +604,33 @@ public:
|
|||||||
mError.forget()));
|
mError.forget()));
|
||||||
mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
|
mediaThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
|
||||||
|
|
||||||
|
#ifdef MOZ_WEBRTC
|
||||||
|
// Right now these configs are only of use if webrtc is available
|
||||||
|
nsresult rv;
|
||||||
|
nsCOMPtr<nsIPrefService> prefs = do_GetService("@mozilla.org/preferences-service;1", &rv);
|
||||||
|
if (NS_SUCCEEDED(rv)) {
|
||||||
|
nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
|
||||||
|
|
||||||
|
if (branch) {
|
||||||
|
int32_t aec = (int32_t) webrtc::kEcUnchanged;
|
||||||
|
int32_t agc = (int32_t) webrtc::kAgcUnchanged;
|
||||||
|
int32_t noise = (int32_t) webrtc::kNsUnchanged;
|
||||||
|
bool aec_on = false, agc_on = false, noise_on = false;
|
||||||
|
|
||||||
|
branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on);
|
||||||
|
branch->GetIntPref("media.peerconnection.aec", &aec);
|
||||||
|
branch->GetBoolPref("media.peerconnection.agc_enabled", &agc_on);
|
||||||
|
branch->GetIntPref("media.peerconnection.agc", &agc);
|
||||||
|
branch->GetBoolPref("media.peerconnection.noise_enabled", &noise_on);
|
||||||
|
branch->GetIntPref("media.peerconnection.noise", &noise);
|
||||||
|
|
||||||
|
mListener->AudioConfig(aec_on, (uint32_t) aec,
|
||||||
|
agc_on, (uint32_t) agc,
|
||||||
|
noise_on, (uint32_t) noise);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// We won't need mError now.
|
// We won't need mError now.
|
||||||
mError = nullptr;
|
mError = nullptr;
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
|
@ -127,8 +127,7 @@ public:
|
|||||||
void
|
void
|
||||||
AudioConfig(bool aEchoOn, uint32_t aEcho,
|
AudioConfig(bool aEchoOn, uint32_t aEcho,
|
||||||
bool aAgcOn, uint32_t aAGC,
|
bool aAgcOn, uint32_t aAGC,
|
||||||
bool aNoiseOn, uint32_t aNoise,
|
bool aNoiseOn, uint32_t aNoise)
|
||||||
int32_t aPlayoutDelay)
|
|
||||||
{
|
{
|
||||||
if (mAudioSource) {
|
if (mAudioSource) {
|
||||||
#ifdef MOZ_WEBRTC
|
#ifdef MOZ_WEBRTC
|
||||||
@ -136,7 +135,7 @@ public:
|
|||||||
RUN_ON_THREAD(mMediaThread,
|
RUN_ON_THREAD(mMediaThread,
|
||||||
WrapRunnable(nsRefPtr<MediaEngineSource>(mAudioSource), // threadsafe
|
WrapRunnable(nsRefPtr<MediaEngineSource>(mAudioSource), // threadsafe
|
||||||
&MediaEngineSource::Config,
|
&MediaEngineSource::Config,
|
||||||
aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise, aPlayoutDelay),
|
aEchoOn, aEcho, aAgcOn, aAGC, aNoiseOn, aNoise),
|
||||||
NS_DISPATCH_NORMAL);
|
NS_DISPATCH_NORMAL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -14,11 +14,8 @@ webrtc_non_unified_sources = [
|
|||||||
'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c', # Because of name clash in the kDampFilter variable
|
'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c', # Because of name clash in the kDampFilter variable
|
||||||
'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c', # Because of name clash in the kDampFilter variable
|
'trunk/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c', # Because of name clash in the kDampFilter variable
|
||||||
'trunk/webrtc/modules/audio_coding/neteq4/audio_vector.cc', # Because of explicit template specializations
|
'trunk/webrtc/modules/audio_coding/neteq4/audio_vector.cc', # Because of explicit template specializations
|
||||||
'trunk/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc', # Because of LATE()
|
|
||||||
'trunk/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc',# Because of LATE()
|
|
||||||
'trunk/webrtc/modules/audio_device/opensl/opensles_input.cc', # Because of name clash in the kOption variable
|
'trunk/webrtc/modules/audio_device/opensl/opensles_input.cc', # Because of name clash in the kOption variable
|
||||||
'trunk/webrtc/modules/audio_device/opensl/opensles_output.cc', # Because of name clash in the kOption variable
|
'trunk/webrtc/modules/audio_device/opensl/opensles_output.cc', # Because of name clash in the kOption variable
|
||||||
'trunk/webrtc/modules/audio_device/opensl/single_rw_fifo.cc', # Because of name clash with #define FF
|
|
||||||
'trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc', # Because of ordering assumptions in strsafe.h
|
'trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc', # Because of ordering assumptions in strsafe.h
|
||||||
'trunk/webrtc/modules/audio_processing/aec/aec_core.c', # Because of name clash in the ComfortNoise function
|
'trunk/webrtc/modules/audio_processing/aec/aec_core.c', # Because of name clash in the ComfortNoise function
|
||||||
'trunk/webrtc/modules/audio_processing/aecm/aecm_core.c', # Because of name clash in the ComfortNoise function
|
'trunk/webrtc/modules/audio_processing/aecm/aecm_core.c', # Because of name clash in the ComfortNoise function
|
||||||
|
@ -413,11 +413,27 @@ WebrtcAudioConduit::ConfigureSendMediaCodec(const AudioCodecConfig* codecConfig)
|
|||||||
nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
|
nsCOMPtr<nsIPrefBranch> branch = do_QueryInterface(prefs);
|
||||||
|
|
||||||
if (branch) {
|
if (branch) {
|
||||||
|
int32_t aec = 0; // 0 == unchanged
|
||||||
|
bool aec_on = false;
|
||||||
|
|
||||||
|
branch->GetBoolPref("media.peerconnection.aec_enabled", &aec_on);
|
||||||
|
branch->GetIntPref("media.peerconnection.aec", &aec);
|
||||||
|
|
||||||
|
CSFLogDebug(logTag,"Audio config: aec: %d", aec_on ? aec : -1);
|
||||||
|
mEchoOn = aec_on;
|
||||||
|
if (static_cast<webrtc::EcModes>(aec) != webrtc::kEcUnchanged)
|
||||||
|
mEchoCancel = static_cast<webrtc::EcModes>(aec);
|
||||||
|
|
||||||
branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay);
|
branch->GetIntPref("media.peerconnection.capture_delay", &mCaptureDelay);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (0 != (error = mPtrVoEProcessing->SetEcStatus(mEchoOn, mEchoCancel))) {
|
||||||
|
CSFLogError(logTag,"%s Error setting EVStatus: %d ",__FUNCTION__, error);
|
||||||
|
return kMediaConduitUnknownError;
|
||||||
|
}
|
||||||
|
|
||||||
//Let's Send Transport State-machine on the Engine
|
//Let's Send Transport State-machine on the Engine
|
||||||
if(mPtrVoEBase->StartSend(mChannel) == -1)
|
if(mPtrVoEBase->StartSend(mChannel) == -1)
|
||||||
{
|
{
|
||||||
@ -911,7 +927,7 @@ WebrtcAudioConduit::GetNum10msSamplesForFrequency(int samplingFreqHz) const
|
|||||||
{
|
{
|
||||||
case 16000: return 160; //160 samples
|
case 16000: return 160; //160 samples
|
||||||
case 32000: return 320; //320 samples
|
case 32000: return 320; //320 samples
|
||||||
case 44100: return 441; //441 samples
|
case 44000: return 440; //440 samples
|
||||||
case 48000: return 480; //480 samples
|
case 48000: return 480; //480 samples
|
||||||
default: return 0; // invalid or unsupported
|
default: return 0; // invalid or unsupported
|
||||||
}
|
}
|
||||||
|
@ -162,6 +162,8 @@ public:
|
|||||||
mChannel(-1),
|
mChannel(-1),
|
||||||
mCurSendCodecConfig(nullptr),
|
mCurSendCodecConfig(nullptr),
|
||||||
mCaptureDelay(150),
|
mCaptureDelay(150),
|
||||||
|
mEchoOn(true),
|
||||||
|
mEchoCancel(webrtc::kEcAec),
|
||||||
#ifdef MOZILLA_INTERNAL_API
|
#ifdef MOZILLA_INTERNAL_API
|
||||||
mLastTimestamp(0),
|
mLastTimestamp(0),
|
||||||
#endif // MOZILLA_INTERNAL_API
|
#endif // MOZILLA_INTERNAL_API
|
||||||
@ -262,6 +264,9 @@ private:
|
|||||||
// Current "capture" delay (really output plus input delay)
|
// Current "capture" delay (really output plus input delay)
|
||||||
int32_t mCaptureDelay;
|
int32_t mCaptureDelay;
|
||||||
|
|
||||||
|
bool mEchoOn;
|
||||||
|
webrtc::EcModes mEchoCancel;
|
||||||
|
|
||||||
#ifdef MOZILLA_INTERNAL_API
|
#ifdef MOZILLA_INTERNAL_API
|
||||||
uint32_t mLastTimestamp;
|
uint32_t mLastTimestamp;
|
||||||
#endif // MOZILLA_INTERNAL_API
|
#endif // MOZILLA_INTERNAL_API
|
||||||
|
@ -9,9 +9,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
|
||||||
#if defined(_MSC_VER)
|
|
||||||
#include <windows.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int UpdatePos(int pos, int capacity) {
|
static int UpdatePos(int pos, int capacity) {
|
||||||
return (pos + 1) % capacity;
|
return (pos + 1) % capacity;
|
||||||
@ -22,11 +19,7 @@ namespace webrtc {
|
|||||||
namespace subtle {
|
namespace subtle {
|
||||||
|
|
||||||
inline void MemoryBarrier() {
|
inline void MemoryBarrier() {
|
||||||
#if defined(_MSC_VER)
|
|
||||||
::MemoryBarrier();
|
|
||||||
#else
|
|
||||||
__sync_synchronize();
|
__sync_synchronize();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace subtle
|
} // namespace subtle
|
||||||
|
@ -123,19 +123,16 @@
|
|||||||
'win/audio_device_utility_win.h',
|
'win/audio_device_utility_win.h',
|
||||||
'win/audio_mixer_manager_win.cc',
|
'win/audio_mixer_manager_win.cc',
|
||||||
'win/audio_mixer_manager_win.h',
|
'win/audio_mixer_manager_win.h',
|
||||||
# used externally for getUserMedia
|
|
||||||
'opensl/single_rw_fifo.cc',
|
|
||||||
'opensl/single_rw_fifo.h',
|
|
||||||
],
|
],
|
||||||
'conditions': [
|
'conditions': [
|
||||||
['OS=="android"', {
|
['OS=="android"', {
|
||||||
'sources': [
|
'sources': [
|
||||||
'opensl/audio_manager_jni.cc',
|
'opensl/audio_manager_jni.cc',
|
||||||
'opensl/audio_manager_jni.h',
|
'opensl/audio_manager_jni.h',
|
||||||
'android/audio_device_jni_android.cc',
|
'android/audio_device_jni_android.cc',
|
||||||
'android/audio_device_jni_android.h',
|
'android/audio_device_jni_android.h',
|
||||||
],
|
],
|
||||||
}],
|
}],
|
||||||
['OS=="android" or moz_widget_toolkit_gonk==1', {
|
['OS=="android" or moz_widget_toolkit_gonk==1', {
|
||||||
'link_settings': {
|
'link_settings': {
|
||||||
'libraries': [
|
'libraries': [
|
||||||
@ -157,15 +154,17 @@
|
|||||||
'opensl/opensles_input.cc',
|
'opensl/opensles_input.cc',
|
||||||
'opensl/opensles_input.h',
|
'opensl/opensles_input.h',
|
||||||
'opensl/opensles_output.h',
|
'opensl/opensles_output.h',
|
||||||
'shared/audio_device_utility_shared.cc',
|
'opensl/single_rw_fifo.cc',
|
||||||
'shared/audio_device_utility_shared.h',
|
'opensl/single_rw_fifo.h',
|
||||||
|
'shared/audio_device_utility_shared.cc',
|
||||||
|
'shared/audio_device_utility_shared.h',
|
||||||
],
|
],
|
||||||
}, {
|
}, {
|
||||||
'sources': [
|
'sources': [
|
||||||
'shared/audio_device_utility_shared.cc',
|
'shared/audio_device_utility_shared.cc',
|
||||||
'shared/audio_device_utility_shared.h',
|
'shared/audio_device_utility_shared.h',
|
||||||
'android/audio_device_jni_android.cc',
|
'android/audio_device_jni_android.cc',
|
||||||
'android/audio_device_jni_android.h',
|
'android/audio_device_jni_android.h',
|
||||||
],
|
],
|
||||||
}],
|
}],
|
||||||
['enable_android_opensl_output==1', {
|
['enable_android_opensl_output==1', {
|
||||||
|
@ -43,9 +43,6 @@ class FakeVoEExternalMedia : public VoEExternalMedia {
|
|||||||
WEBRTC_STUB(ExternalPlayoutGetData,
|
WEBRTC_STUB(ExternalPlayoutGetData,
|
||||||
(int16_t speechData10ms[], int samplingFreqHz,
|
(int16_t speechData10ms[], int samplingFreqHz,
|
||||||
int current_delay_ms, int& lengthSamples));
|
int current_delay_ms, int& lengthSamples));
|
||||||
WEBRTC_STUB(ExternalPlayoutData,
|
|
||||||
(int16_t speechData10ms[], int samplingFreqHz,
|
|
||||||
int num_channels, int current_delay_ms, int& lengthSamples));
|
|
||||||
WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz,
|
WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz,
|
||||||
AudioFrame* frame));
|
AudioFrame* frame));
|
||||||
WEBRTC_STUB(SetExternalMixing, (int channel, bool enable));
|
WEBRTC_STUB(SetExternalMixing, (int channel, bool enable));
|
||||||
|
@ -97,18 +97,10 @@ public:
|
|||||||
int samplingFreqHz, int current_delay_ms) = 0;
|
int samplingFreqHz, int current_delay_ms) = 0;
|
||||||
|
|
||||||
|
|
||||||
// This function inserts audio written to the OS audio drivers for use
|
|
||||||
// as the far-end signal for AEC processing. The length of the block
|
|
||||||
// must be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or
|
|
||||||
// 48000 kHz sampling rates respectively).
|
|
||||||
virtual int ExternalPlayoutData(
|
|
||||||
int16_t speechData10ms[], int samplingFreqHz, int num_channels,
|
|
||||||
int current_delay_ms, int& lengthSamples) = 0;
|
|
||||||
|
|
||||||
// This function gets audio for an external playout sink.
|
// This function gets audio for an external playout sink.
|
||||||
// During transmission, this function should be called every ~10 ms
|
// During transmission, this function should be called every ~10 ms
|
||||||
// to obtain a new 10 ms frame of audio. The length of the block will
|
// to obtain a new 10 ms frame of audio. The length of the block will
|
||||||
// be 160, 320, 441 or 480 samples (for 16000, 32000, 44100 or 48000
|
// be 160, 320, 440 or 480 samples (for 16000, 32000, 44100 or 48000
|
||||||
// kHz sampling rates respectively).
|
// kHz sampling rates respectively).
|
||||||
virtual int ExternalPlayoutGetData(
|
virtual int ExternalPlayoutGetData(
|
||||||
int16_t speechData10ms[], int samplingFreqHz,
|
int16_t speechData10ms[], int samplingFreqHz,
|
||||||
|
@ -566,7 +566,7 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
|||||||
|
|
||||||
// --- Far-end Voice Quality Enhancement (AudioProcessing Module)
|
// --- Far-end Voice Quality Enhancement (AudioProcessing Module)
|
||||||
|
|
||||||
APMAnalyzeReverseStream(_audioFrame);
|
APMAnalyzeReverseStream();
|
||||||
|
|
||||||
// --- External media processing
|
// --- External media processing
|
||||||
|
|
||||||
@ -592,13 +592,17 @@ OutputMixer::DoOperationsOnCombinedSignal()
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void OutputMixer::APMAnalyzeReverseStream(AudioFrame &audioFrame) {
|
// ----------------------------------------------------------------------------
|
||||||
|
// Private methods
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
void OutputMixer::APMAnalyzeReverseStream() {
|
||||||
// Convert from mixing to AudioProcessing sample rate, determined by the send
|
// Convert from mixing to AudioProcessing sample rate, determined by the send
|
||||||
// side. Downmix to mono.
|
// side. Downmix to mono.
|
||||||
AudioFrame frame;
|
AudioFrame frame;
|
||||||
frame.num_channels_ = 1;
|
frame.num_channels_ = 1;
|
||||||
frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
|
frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
|
||||||
if (RemixAndResample(audioFrame, &audioproc_resampler_, &frame) == -1)
|
if (RemixAndResample(_audioFrame, &audioproc_resampler_, &frame) == -1)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
|
if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
|
||||||
@ -607,10 +611,6 @@ void OutputMixer::APMAnalyzeReverseStream(AudioFrame &audioFrame) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// Private methods
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
int
|
int
|
||||||
OutputMixer::InsertInbandDtmfTone()
|
OutputMixer::InsertInbandDtmfTone()
|
||||||
{
|
{
|
||||||
|
@ -118,11 +118,9 @@ public:
|
|||||||
void PlayFileEnded(int32_t id);
|
void PlayFileEnded(int32_t id);
|
||||||
void RecordFileEnded(int32_t id);
|
void RecordFileEnded(int32_t id);
|
||||||
|
|
||||||
// so ExternalPlayoutData() can insert far-end audio from the audio drivers
|
|
||||||
void APMAnalyzeReverseStream(AudioFrame &audioFrame);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
OutputMixer(uint32_t instanceId);
|
OutputMixer(uint32_t instanceId);
|
||||||
|
void APMAnalyzeReverseStream();
|
||||||
int InsertInbandDtmfTone();
|
int InsertInbandDtmfTone();
|
||||||
|
|
||||||
// uses
|
// uses
|
||||||
|
@ -280,68 +280,6 @@ int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// This inserts a copy of the raw audio sent to the output drivers to use
|
|
||||||
// as the "far end" signal for the AEC. Currently only 10ms chunks are
|
|
||||||
// supported unfortunately. Since we have to rechunk to 10ms to call this,
|
|
||||||
// thre isn't much gained by allowing N*10ms here; external code can loop
|
|
||||||
// if needed.
|
|
||||||
int VoEExternalMediaImpl::ExternalPlayoutData(
|
|
||||||
int16_t speechData10ms[],
|
|
||||||
int samplingFreqHz,
|
|
||||||
int num_channels,
|
|
||||||
int current_delay_ms,
|
|
||||||
int& lengthSamples)
|
|
||||||
{
|
|
||||||
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
|
|
||||||
"ExternalPlayoutData(speechData10ms=0x%x,"
|
|
||||||
" lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
|
|
||||||
&speechData10ms[0], lengthSamples, samplingFreqHz,
|
|
||||||
current_delay_ms);
|
|
||||||
|
|
||||||
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
|
|
||||||
if (!shared_->statistics().Initialized())
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_NOT_INITED, kTraceError);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
// FIX(jesup) - check if this is enabled?
|
|
||||||
if (shared_->NumOfSendingChannels() == 0)
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() no channel is sending");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
|
|
||||||
(48000 != samplingFreqHz) && (44100 != samplingFreqHz))
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() invalid sample rate");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
if (current_delay_ms < 0)
|
|
||||||
{
|
|
||||||
shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
|
|
||||||
"SetExternalRecordingStatus() invalid delay)");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Far-end data is inserted without going through neteq/etc.
|
|
||||||
// Only supports 10ms chunks; AnalyzeReverseStream() enforces that
|
|
||||||
// lower down.
|
|
||||||
AudioFrame audioFrame;
|
|
||||||
audioFrame.UpdateFrame(-1, 0xFFFFFFFF,
|
|
||||||
speechData10ms,
|
|
||||||
lengthSamples,
|
|
||||||
samplingFreqHz,
|
|
||||||
AudioFrame::kNormalSpeech,
|
|
||||||
AudioFrame::kVadUnknown,
|
|
||||||
num_channels);
|
|
||||||
|
|
||||||
shared_->output_mixer()->APMAnalyzeReverseStream(audioFrame);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
int VoEExternalMediaImpl::ExternalPlayoutGetData(
|
||||||
int16_t speechData10ms[],
|
int16_t speechData10ms[],
|
||||||
int samplingFreqHz,
|
int samplingFreqHz,
|
||||||
|
@ -39,14 +39,6 @@ public:
|
|||||||
int samplingFreqHz,
|
int samplingFreqHz,
|
||||||
int current_delay_ms);
|
int current_delay_ms);
|
||||||
|
|
||||||
// Insertion of far-end data as actually played out to the OS audio driver
|
|
||||||
virtual int ExternalPlayoutData(
|
|
||||||
int16_t speechData10ms[],
|
|
||||||
int samplingFreqHz,
|
|
||||||
int num_channels,
|
|
||||||
int current_delay_ms,
|
|
||||||
int& lengthSamples);
|
|
||||||
|
|
||||||
virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
|
virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
|
||||||
int samplingFreqHz,
|
int samplingFreqHz,
|
||||||
int current_delay_ms,
|
int current_delay_ms,
|
||||||
|
@ -264,33 +264,27 @@ pref("media.peerconnection.identity.timeout", 5000);
|
|||||||
// kXxxUnchanged = 0, kXxxDefault = 1, and higher values are specific to each
|
// kXxxUnchanged = 0, kXxxDefault = 1, and higher values are specific to each
|
||||||
// setting (for Xxx = Ec, Agc, or Ns). Defaults are all set to kXxxDefault here.
|
// setting (for Xxx = Ec, Agc, or Ns). Defaults are all set to kXxxDefault here.
|
||||||
pref("media.peerconnection.turn.disable", false);
|
pref("media.peerconnection.turn.disable", false);
|
||||||
pref("media.getusermedia.aec_enabled", true);
|
pref("media.peerconnection.aec_enabled", true);
|
||||||
pref("media.getusermedia.aec", 1);
|
pref("media.peerconnection.aec", 1);
|
||||||
pref("media.getusermedia.agc_enabled", false);
|
pref("media.peerconnection.agc_enabled", false);
|
||||||
pref("media.getusermedia.agc", 1);
|
pref("media.peerconnection.agc", 1);
|
||||||
pref("media.getusermedia.noise_enabled", true);
|
pref("media.peerconnection.noise_enabled", false);
|
||||||
pref("media.getusermedia.noise", 1);
|
pref("media.peerconnection.noise", 1);
|
||||||
// Adjustments for OS-specific input delay (lower bound)
|
// Adjustments for OS mediastream+output+OS+input delay (lower bound)
|
||||||
// Adjustments for OS-specific AudioStream+cubeb+output delay (lower bound)
|
|
||||||
#if defined(XP_MACOSX)
|
#if defined(XP_MACOSX)
|
||||||
pref("media.peerconnection.capture_delay", 50);
|
pref("media.peerconnection.capture_delay", 50);
|
||||||
pref("media.getusermedia.playout_delay", 10);
|
|
||||||
#elif defined(XP_WIN)
|
#elif defined(XP_WIN)
|
||||||
pref("media.peerconnection.capture_delay", 50);
|
pref("media.peerconnection.capture_delay", 50);
|
||||||
pref("media.getusermedia.playout_delay", 40);
|
|
||||||
#elif defined(ANDROID)
|
#elif defined(ANDROID)
|
||||||
pref("media.peerconnection.capture_delay", 100);
|
pref("media.peerconnection.capture_delay", 100);
|
||||||
pref("media.getusermedia.playout_delay", 100);
|
|
||||||
// Whether to enable Webrtc Hardware acceleration support
|
// Whether to enable Webrtc Hardware acceleration support
|
||||||
pref("media.navigator.hardware.vp8_encode.acceleration_enabled", false);
|
pref("media.navigator.hardware.vp8_encode.acceleration_enabled", false);
|
||||||
pref("media.navigator.hardware.vp8_decode.acceleration_enabled", false);
|
pref("media.navigator.hardware.vp8_decode.acceleration_enabled", false);
|
||||||
#elif defined(XP_LINUX)
|
#elif defined(XP_LINUX)
|
||||||
pref("media.peerconnection.capture_delay", 70);
|
pref("media.peerconnection.capture_delay", 70);
|
||||||
pref("media.getusermedia.playout_delay", 50);
|
|
||||||
#else
|
#else
|
||||||
// *BSD, others - merely a guess for now
|
// *BSD, others - merely a guess for now
|
||||||
pref("media.peerconnection.capture_delay", 50);
|
pref("media.peerconnection.capture_delay", 50);
|
||||||
pref("media.getusermedia.playout_delay", 50);
|
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
#ifdef ANDROID
|
#ifdef ANDROID
|
||||||
|
Loading…
x
Reference in New Issue
Block a user