mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-26 06:11:37 +00:00
Bug 853721 - Part 2: Hook up DelayNode to the media streams graph and implement delaying of incoming audio; r=roc
This commit is contained in:
parent
0d62ef2574
commit
dd2894d770
@ -191,9 +191,6 @@ AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
|
||||
continue;
|
||||
}
|
||||
AudioChunk* chunk = &a->mLastChunk;
|
||||
// XXX when we implement DelayNode, this will no longer be true and we'll
|
||||
// need to treat a null chunk (when the DelayNode hasn't had a chance
|
||||
// to produce data yet) as silence here.
|
||||
MOZ_ASSERT(chunk);
|
||||
if (chunk->IsNull()) {
|
||||
continue;
|
||||
|
@ -253,10 +253,6 @@ public:
|
||||
* Produce data for all streams >= aStreamIndex for the given time interval.
|
||||
* Advances block by block, each iteration producing data for all streams
|
||||
* for a single block.
|
||||
* This is needed if there are WebAudio delay nodes, whose output for a block
|
||||
* may depend on the output of any other node (including itself) for the
|
||||
* previous block. This is probably also more performant due to better memory
|
||||
* locality.
|
||||
* This is called whenever we have an AudioNodeStream in the graph.
|
||||
*/
|
||||
void ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
|
||||
|
@ -6,6 +6,10 @@
|
||||
|
||||
#include "DelayNode.h"
|
||||
#include "mozilla/dom/DelayNodeBinding.h"
|
||||
#include "AudioNodeEngine.h"
|
||||
#include "AudioNodeStream.h"
|
||||
#include "AudioDestinationNode.h"
|
||||
#include "WebAudioUtils.h"
|
||||
|
||||
namespace mozilla {
|
||||
namespace dom {
|
||||
@ -19,10 +23,191 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNode)
|
||||
NS_IMPL_ADDREF_INHERITED(DelayNode, AudioNode)
|
||||
NS_IMPL_RELEASE_INHERITED(DelayNode, AudioNode)
|
||||
|
||||
class DelayNodeEngine : public AudioNodeEngine
|
||||
{
|
||||
public:
|
||||
explicit DelayNodeEngine(AudioDestinationNode* aDestination)
|
||||
: mSource(nullptr)
|
||||
, mDestination(static_cast<AudioNodeStream*> (aDestination->Stream()))
|
||||
// Keep the default value in sync with the default value in DelayNode::DelayNode.
|
||||
, mDelay(0.f)
|
||||
, mMaxDelay(0.)
|
||||
, mWriteIndex(0)
|
||||
, mCurrentDelayTime(0.)
|
||||
{
|
||||
}
|
||||
|
||||
void SetSourceStream(AudioNodeStream* aSource)
|
||||
{
|
||||
mSource = aSource;
|
||||
}
|
||||
|
||||
enum Parameters {
|
||||
DELAY,
|
||||
MAX_DELAY
|
||||
};
|
||||
void SetTimelineParameter(uint32_t aIndex, const AudioParamTimeline& aValue) MOZ_OVERRIDE
|
||||
{
|
||||
switch (aIndex) {
|
||||
case DELAY:
|
||||
MOZ_ASSERT(mSource && mDestination);
|
||||
mDelay = aValue;
|
||||
WebAudioUtils::ConvertAudioParamToTicks(mDelay, mSource, mDestination);
|
||||
break;
|
||||
default:
|
||||
NS_ERROR("Bad DelayNodeEngine TimelineParameter");
|
||||
}
|
||||
}
|
||||
void SetDoubleParameter(uint32_t aIndex, double aValue) MOZ_OVERRIDE
|
||||
{
|
||||
switch (aIndex) {
|
||||
case MAX_DELAY: mMaxDelay = aValue; break;
|
||||
default:
|
||||
NS_ERROR("Bad DelayNodeEngine DoubleParameter");
|
||||
}
|
||||
}
|
||||
|
||||
bool EnsureBuffer(uint32_t aNumberOfChannels)
|
||||
{
|
||||
if (aNumberOfChannels == 0) {
|
||||
return false;
|
||||
}
|
||||
if (mBuffer.Length() == 0) {
|
||||
if (!mBuffer.SetLength(aNumberOfChannels)) {
|
||||
return false;
|
||||
}
|
||||
const int32_t numFrames = NS_lround(mMaxDelay) * IdealAudioRate();
|
||||
for (uint32_t channel = 0; channel < aNumberOfChannels; ++channel) {
|
||||
if (!mBuffer[channel].SetLength(numFrames)) {
|
||||
return false;
|
||||
}
|
||||
memset(mBuffer[channel].Elements(), 0, numFrames * sizeof(float));
|
||||
}
|
||||
} else if (mBuffer.Length() != aNumberOfChannels) {
|
||||
// TODO: Handle changes in the channel count
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual void ProduceAudioBlock(AudioNodeStream* aStream,
|
||||
const AudioChunk& aInput,
|
||||
AudioChunk* aOutput,
|
||||
bool* aFinished)
|
||||
{
|
||||
MOZ_ASSERT(mSource == aStream, "Invalid source stream");
|
||||
|
||||
const bool firstTime = !!!mBuffer.Length();
|
||||
const uint32_t numChannels = aInput.mChannelData.Length();
|
||||
|
||||
if (!EnsureBuffer(numChannels)) {
|
||||
aOutput->SetNull(0);
|
||||
return;
|
||||
}
|
||||
|
||||
AllocateAudioBlock(numChannels, aOutput);
|
||||
|
||||
double delayTime = 0;
|
||||
float computedDelay[WEBAUDIO_BLOCK_SIZE];
|
||||
// Use a smoothing range of 20ms
|
||||
const double smoothingRate = WebAudioUtils::ComputeSmoothingRate(0.02, IdealAudioRate());
|
||||
|
||||
if (mDelay.HasSimpleValue()) {
|
||||
delayTime = std::max(0.0, std::min(mMaxDelay, double(mDelay.GetValue())));
|
||||
if (firstTime) {
|
||||
// Initialize this only the first time to make sure that mCurrentDelayTime
|
||||
// has a valid value when we try to change the delay time further below.
|
||||
mCurrentDelayTime = delayTime;
|
||||
}
|
||||
} else {
|
||||
// Compute the delay values for the duration of the input AudioChunk
|
||||
TrackTicks tick = aStream->GetCurrentPosition();
|
||||
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
|
||||
computedDelay[counter] = std::max(0.0, std::min(mMaxDelay,
|
||||
double(mDelay.GetValueAtTime<TrackTicks>(tick + counter))));
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t channel = 0; channel < numChannels; ++channel) {
|
||||
double currentDelayTime = mCurrentDelayTime;
|
||||
uint32_t writeIndex = mWriteIndex;
|
||||
|
||||
float* buffer = mBuffer[channel].Elements();
|
||||
const uint32_t bufferLength = mBuffer[channel].Length();
|
||||
const float* input = static_cast<const float*>(aInput.mChannelData[channel]);
|
||||
float* output = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[channel]));
|
||||
|
||||
for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
|
||||
if (mDelay.HasSimpleValue()) {
|
||||
// If the simple value has changed, smoothly approach it
|
||||
currentDelayTime += (delayTime - currentDelayTime) * smoothingRate;
|
||||
} else {
|
||||
currentDelayTime = computedDelay[i];
|
||||
}
|
||||
|
||||
// Write the input sample to the correct location in our buffer
|
||||
buffer[writeIndex] = input[i];
|
||||
|
||||
// Now, determine the correct read position. We adjust the read position to be
|
||||
// from currentDelayTime seconds in the past. We also interpolate the two input
|
||||
// frames in case the read position does not match an integer index.
|
||||
double readPosition = writeIndex + bufferLength -
|
||||
(currentDelayTime * IdealAudioRate());
|
||||
if (readPosition >= bufferLength) {
|
||||
readPosition -= bufferLength;
|
||||
}
|
||||
MOZ_ASSERT(readPosition >= 0.0, "Why are we reading before the beginning of the buffer?");
|
||||
|
||||
// Here is a the reason why readIndex1 and readIndex will never be out
|
||||
// of bounds. The maximum value for bufferLength is 180 * 48000 (see
|
||||
// AudioContext::CreateDelay). The maximum value for mCurrentDelay is
|
||||
// 180.0, so initially readPosition cannot be more than bufferLength +
|
||||
// a fraction less than 1. Then we take care of that case by
|
||||
// subtracting bufferLength from it if needed. So, if
|
||||
// |bufferLength-readPosition<1.0|, readIndex1 will end up being zero.
|
||||
// If |1.0<=bufferLength-readPosition<2.0|, readIndex1 will be
|
||||
// bufferLength-1 and readIndex2 will be 0.
|
||||
int readIndex1 = int(readPosition);
|
||||
int readIndex2 = (readIndex1 + 1) % bufferLength;
|
||||
double interpolationFactor = readPosition - readIndex1;
|
||||
|
||||
output[i] = (1.0 - interpolationFactor) * buffer[readIndex1] +
|
||||
interpolationFactor * buffer[readIndex2];
|
||||
writeIndex = (writeIndex + 1) % bufferLength;
|
||||
}
|
||||
|
||||
// Remember currentDelayTime and writeIndex for the next ProduceAudioBlock
|
||||
// call when processing the last channel.
|
||||
if (channel == numChannels - 1) {
|
||||
mCurrentDelayTime = currentDelayTime;
|
||||
mWriteIndex = writeIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioNodeStream* mSource;
|
||||
AudioNodeStream* mDestination;
|
||||
AudioParamTimeline mDelay;
|
||||
// Maximum delay time in seconds
|
||||
double mMaxDelay;
|
||||
// Circular buffer for capturing delayed samples.
|
||||
AutoFallibleTArray<FallibleTArray<float>, 2> mBuffer;
|
||||
// Write index for the buffer, to write the frames to the correct index of the buffer
|
||||
// given the current delay.
|
||||
uint32_t mWriteIndex;
|
||||
// Current delay time, in seconds
|
||||
double mCurrentDelayTime;
|
||||
};
|
||||
|
||||
DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
|
||||
: AudioNode(aContext)
|
||||
, mDelay(new AudioParam(this, Callback, 0.0f, 0.0f, float(aMaxDelay)))
|
||||
, mDelay(new AudioParam(this, SendDelayToStream, 0.0f, 0.0f, float(aMaxDelay)))
|
||||
{
|
||||
DelayNodeEngine* engine = new DelayNodeEngine(aContext->Destination());
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
|
||||
AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
|
||||
ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
|
||||
}
|
||||
|
||||
JSObject*
|
||||
@ -31,6 +216,13 @@ DelayNode::WrapObject(JSContext* aCx, JSObject* aScope)
|
||||
return DelayNodeBinding::Wrap(aCx, aScope, this);
|
||||
}
|
||||
|
||||
void
|
||||
DelayNode::SendDelayToStream(AudioNode* aNode)
|
||||
{
|
||||
DelayNode* This = static_cast<DelayNode*>(aNode);
|
||||
SendTimelineParameterToStream(This, DelayNodeEngine::DELAY, *This->mDelay);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,14 @@ public:
|
||||
return mDelay;
|
||||
}
|
||||
|
||||
virtual bool SupportsMediaStreams() const MOZ_OVERRIDE
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
static void SendDelayToStream(AudioNode* aNode);
|
||||
|
||||
private:
|
||||
nsRefPtr<AudioParam> mDelay;
|
||||
};
|
||||
|
@ -28,6 +28,15 @@ struct WebAudioUtils {
|
||||
return fabs(v1 - v2) < 1e-7;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes an exponential smoothing rate for a time based variable
|
||||
* over aDuration seconds.
|
||||
*/
|
||||
static double ComputeSmoothingRate(double aDuration, double aSampleRate)
|
||||
{
|
||||
return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate));
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts AudioParamTimeline floating point time values to tick values
|
||||
* with respect to a source and a destination AudioNodeStream.
|
||||
|
Loading…
Reference in New Issue
Block a user