Bug 775319 - Determine the sample format at compile time for all media code. r=kinetik

This commit is contained in:
Paul Adenot 2012-08-16 18:10:36 -07:00
parent 967b36e8ae
commit ee3a886b03
8 changed files with 118 additions and 133 deletions

View File

@ -4144,6 +4144,8 @@ MOZ_CUBEB=
MOZ_VORBIS= MOZ_VORBIS=
MOZ_TREMOR= MOZ_TREMOR=
MOZ_WAVE=1 MOZ_WAVE=1
MOZ_SAMPLE_TYPE_FLOAT32=
MOZ_SAMPLE_TYPE_S16LE=
MOZ_MEDIA= MOZ_MEDIA=
MOZ_OPUS=1 MOZ_OPUS=1
MOZ_WEBM=1 MOZ_WEBM=1
@ -5192,6 +5194,19 @@ fi
AC_SUBST(MOZ_WEBRTC) AC_SUBST(MOZ_WEBRTC)
case "$target_cpu" in
arm*)
MOZ_SAMPLE_TYPE_S16LE=1
AC_DEFINE(MOZ_SAMPLE_TYPE_S16LE)
AC_SUBST(MOZ_SAMPLE_TYPE_S16LE)
;;
*)
MOZ_SAMPLE_TYPE_FLOAT32=1
AC_DEFINE(MOZ_SAMPLE_TYPE_FLOAT32)
AC_SUBST(MOZ_SAMPLE_TYPE_FLOAT32)
;;
esac
dnl ======================================================== dnl ========================================================
dnl = Enable Raw Codecs dnl = Enable Raw Codecs
dnl ======================================================== dnl ========================================================
@ -5220,14 +5235,6 @@ if test -n "$MOZ_OGG"; then
MOZ_SYDNEYAUDIO=1 MOZ_SYDNEYAUDIO=1
MOZ_CUBEB=1 MOZ_CUBEB=1
MOZ_MEDIA=1 MOZ_MEDIA=1
case "$target_cpu" in
arm*)
MOZ_TREMOR=1
;;
*)
MOZ_VORBIS=1
;;
esac
dnl Checks for __attribute__(aligned()) directive dnl Checks for __attribute__(aligned()) directive
AC_CACHE_CHECK([__attribute__ ((aligned ())) support], AC_CACHE_CHECK([__attribute__ ((aligned ())) support],
@ -5367,14 +5374,11 @@ if test "$MOZ_WEBM"; then
MOZ_SYDNEYAUDIO=1 MOZ_SYDNEYAUDIO=1
MOZ_CUBEB=1 MOZ_CUBEB=1
MOZ_MEDIA=1 MOZ_MEDIA=1
case "$target_cpu" in if test "$MOZ_SAMPLE_TYPE_FLOAT32"; then
arm*)
MOZ_TREMOR=1
;;
*)
MOZ_VORBIS=1 MOZ_VORBIS=1
;; else
esac MOZ_TREMOR=1
fi
fi fi
if test -n "$MOZ_VP8" -a -z "$MOZ_NATIVE_LIBVPX"; then if test -n "$MOZ_VP8" -a -z "$MOZ_NATIVE_LIBVPX"; then

View File

@ -116,8 +116,7 @@ nsHTMLAudioElement::MozSetup(PRUint32 aChannels, PRUint32 aRate)
} }
mAudioStream = nsAudioStream::AllocateStream(); mAudioStream = nsAudioStream::AllocateStream();
nsresult rv = mAudioStream->Init(aChannels, aRate, nsresult rv = mAudioStream->Init(aChannels, aRate);
nsAudioStream::FORMAT_FLOAT32);
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
mAudioStream->Shutdown(); mAudioStream->Shutdown();
mAudioStream = nullptr; mAudioStream = nullptr;
@ -169,7 +168,30 @@ nsHTMLAudioElement::MozWriteAudio(const JS::Value& aData, JSContext* aCx, PRUint
// Don't write more than can be written without blocking. // Don't write more than can be written without blocking.
PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels); PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength / mChannels);
nsresult rv = mAudioStream->Write(JS_GetFloat32ArrayData(tsrc, aCx), writeLen); float* frames = JS_GetFloat32ArrayData(tsrc, aCx);
#ifdef MOZ_SAMPLE_TYPE_S16LE
// Convert the samples back to integers as we are using fixed point audio in
// the nsAudioStream.
nsAutoArrayPtr<short> shortsArray(new short[writeLen * mChannels]);
// Hard clip the samples.
for (PRUint32 i = 0; i < writeLen * mChannels; ++i) {
float scaled_value = floorf(0.5 + 32768 * frames[i]);
if (frames[i] < 0.0) {
shortsArray[i] = (scaled_value < -32768.0) ?
-32768 :
short(scaled_value);
} else {
shortsArray[i] = (scaled_value > 32767.0) ?
32767 :
short(scaled_value);
}
}
nsresult rv = mAudioStream->Write(shortsArray, writeLen);
#else
nsresult rv = mAudioStream->Write(frames, writeLen);
#endif
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
return rv; return rv;
} }

View File

@ -1150,8 +1150,7 @@ MediaStreamGraphImpl::CreateOrDestroyAudioStreams(GraphTime aAudioOutputStartTim
audioOutputStream->mBlockedAudioTime = 0; audioOutputStream->mBlockedAudioTime = 0;
audioOutputStream->mStream = nsAudioStream::AllocateStream(); audioOutputStream->mStream = nsAudioStream::AllocateStream();
audioOutputStream->mStream->Init(audio->GetChannels(), audioOutputStream->mStream->Init(audio->GetChannels(),
tracks->GetRate(), tracks->GetRate());
audio->GetFirstFrameFormat());
audioOutputStream->mTrackID = tracks->GetID(); audioOutputStream->mTrackID = tracks->GetID();
} }
} }

View File

@ -60,7 +60,7 @@ class nsNativeAudioStream : public nsAudioStream
~nsNativeAudioStream(); ~nsNativeAudioStream();
nsNativeAudioStream(); nsNativeAudioStream();
nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); nsresult Init(PRInt32 aNumChannels, PRInt32 aRate);
void Shutdown(); void Shutdown();
nsresult Write(const void* aBuf, PRUint32 aFrames); nsresult Write(const void* aBuf, PRUint32 aFrames);
PRUint32 Available(); PRUint32 Available();
@ -95,7 +95,7 @@ class nsRemotedAudioStream : public nsAudioStream
nsRemotedAudioStream(); nsRemotedAudioStream();
~nsRemotedAudioStream(); ~nsRemotedAudioStream();
nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); nsresult Init(PRInt32 aNumChannels, PRInt32 aRate);
void Shutdown(); void Shutdown();
nsresult Write(const void* aBuf, PRUint32 aFrames); nsresult Write(const void* aBuf, PRUint32 aFrames);
PRUint32 Available(); PRUint32 Available();
@ -422,11 +422,11 @@ nsNativeAudioStream::~nsNativeAudioStream()
NS_IMPL_THREADSAFE_ISUPPORTS0(nsNativeAudioStream) NS_IMPL_THREADSAFE_ISUPPORTS0(nsNativeAudioStream)
nsresult nsNativeAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) nsresult nsNativeAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate)
{ {
mRate = aRate; mRate = aRate;
mChannels = aNumChannels; mChannels = aNumChannels;
mFormat = aFormat; mFormat = MOZ_AUDIO_DATA_FORMAT;
if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle), if (sa_stream_create_pcm(reinterpret_cast<sa_stream_t**>(&mAudioHandle),
NULL, NULL,
@ -474,53 +474,40 @@ nsresult nsNativeAudioStream::Write(const void* aBuf, PRUint32 aFrames)
if (s_data) { if (s_data) {
double scaled_volume = GetVolumeScale() * mVolume; double scaled_volume = GetVolumeScale() * mVolume;
switch (mFormat) { #ifdef MOZ_SAMPLE_TYPE_S16LE
case FORMAT_U8: { const short* buf = static_cast<const short*>(aBuf);
const PRUint8* buf = static_cast<const PRUint8*>(aBuf); PRInt32 volume = PRInt32((1 << 16) * scaled_volume);
PRInt32 volume = PRInt32((1 << 16) * scaled_volume); for (PRUint32 i = 0; i < samples; ++i) {
for (PRUint32 i = 0; i < samples; ++i) { short s = buf[i];
s_data[i] = short(((PRInt32(buf[i]) - 128) * volume) >> 8);
}
break;
}
case FORMAT_S16_LE: {
const short* buf = static_cast<const short*>(aBuf);
PRInt32 volume = PRInt32((1 << 16) * scaled_volume);
for (PRUint32 i = 0; i < samples; ++i) {
short s = buf[i];
#if defined(IS_BIG_ENDIAN) #if defined(IS_BIG_ENDIAN)
s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8); s = ((s & 0x00ff) << 8) | ((s & 0xff00) >> 8);
#endif #endif
s_data[i] = short((PRInt32(s) * volume) >> 16); s_data[i] = short((PRInt32(s) * volume) >> 16);
} }
break; #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
} const SampleType* buf = static_cast<const SampleType*>(aBuf);
case FORMAT_FLOAT32: { for (PRUint32 i = 0; i < samples; ++i) {
const float* buf = static_cast<const float*>(aBuf); float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume);
for (PRUint32 i = 0; i < samples; ++i) { if (buf[i] < 0.0) {
float scaled_value = floorf(0.5 + 32768 * buf[i] * scaled_volume); s_data[i] = (scaled_value < -32768.0) ?
if (buf[i] < 0.0) { -32768 :
s_data[i] = (scaled_value < -32768.0) ? short(scaled_value);
-32768 : } else {
short(scaled_value); s_data[i] = (scaled_value > 32767.0) ?
} else { 32767 :
s_data[i] = (scaled_value > 32767.0) ? short(scaled_value);
32767 :
short(scaled_value);
}
}
break;
} }
} }
#endif
}
if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle), if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle),
s_data.get(), s_data.get(),
samples * sizeof(short)) != SA_SUCCESS) samples * sizeof(short)) != SA_SUCCESS)
{ {
PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error")); PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error"));
mInError = true; mInError = true;
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
}
} }
return NS_OK; return NS_OK;
} }
@ -644,26 +631,12 @@ NS_IMPL_THREADSAFE_ISUPPORTS0(nsRemotedAudioStream)
nsresult nsresult
nsRemotedAudioStream::Init(PRInt32 aNumChannels, nsRemotedAudioStream::Init(PRInt32 aNumChannels,
PRInt32 aRate, PRInt32 aRate)
SampleFormat aFormat)
{ {
mRate = aRate; mRate = aRate;
mChannels = aNumChannels; mChannels = aNumChannels;
mFormat = aFormat; mFormat = MOZ_AUDIO_DATA_FORMAT;
mBytesPerFrame = sizeof(SampleType) * mChannels;
switch (mFormat) {
case FORMAT_U8: {
mBytesPerFrame = sizeof(PRUint8) * mChannels;
break;
}
case FORMAT_S16_LE: {
mBytesPerFrame = sizeof(short) * mChannels;
break;
}
case FORMAT_FLOAT32: {
mBytesPerFrame = sizeof(float) * mChannels;
}
}
nsCOMPtr<nsIRunnable> event = new AudioInitEvent(this); nsCOMPtr<nsIRunnable> event = new AudioInitEvent(this);
NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
@ -861,7 +834,7 @@ class nsBufferedAudioStream : public nsAudioStream
nsBufferedAudioStream(); nsBufferedAudioStream();
~nsBufferedAudioStream(); ~nsBufferedAudioStream();
nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat); nsresult Init(PRInt32 aNumChannels, PRInt32 aRate);
void Shutdown(); void Shutdown();
nsresult Write(const void* aBuf, PRUint32 aFrames); nsresult Write(const void* aBuf, PRUint32 aFrames);
PRUint32 Available(); PRUint32 Available();
@ -963,7 +936,7 @@ nsBufferedAudioStream::~nsBufferedAudioStream()
NS_IMPL_THREADSAFE_ISUPPORTS0(nsBufferedAudioStream) NS_IMPL_THREADSAFE_ISUPPORTS0(nsBufferedAudioStream)
nsresult nsresult
nsBufferedAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) nsBufferedAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate)
{ {
cubeb* cubebContext = GetCubebContext(); cubeb* cubebContext = GetCubebContext();
@ -973,23 +946,17 @@ nsBufferedAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aF
mRate = aRate; mRate = aRate;
mChannels = aNumChannels; mChannels = aNumChannels;
mFormat = aFormat; mFormat = MOZ_AUDIO_DATA_FORMAT;
cubeb_stream_params params; cubeb_stream_params params;
params.rate = aRate; params.rate = aRate;
params.channels = aNumChannels; params.channels = aNumChannels;
switch (aFormat) { #ifdef MOZ_SAMPLE_TYPE_S16LE
case FORMAT_S16_LE: params.format = CUBEB_SAMPLE_S16NE;
params.format = CUBEB_SAMPLE_S16LE; #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
mBytesPerFrame = sizeof(short) * aNumChannels; params.format = CUBEB_SAMPLE_FLOAT32NE;
break; #endif
case FORMAT_FLOAT32: mBytesPerFrame = sizeof(float) * aNumChannels;
params.format = CUBEB_SAMPLE_FLOAT32NE;
mBytesPerFrame = sizeof(float) * aNumChannels;
break;
default:
return NS_ERROR_FAILURE;
}
{ {
cubeb_stream* stream; cubeb_stream* stream;
@ -1221,30 +1188,22 @@ nsBufferedAudioStream::DataCallback(void* aBuffer, long aFrames)
output += input_size[i]; output += input_size[i];
} else { } else {
// Adjust volume as each sample is copied out. // Adjust volume as each sample is copied out.
switch (mFormat) { #ifdef MOZ_SAMPLE_TYPE_S16LE
case FORMAT_S16_LE: { PRInt32 volume = PRInt32(1 << 16) * scaled_volume;
PRInt32 volume = PRInt32(1 << 16) * scaled_volume;
const short* src = static_cast<const short*>(input[i]); const short* src = static_cast<const short*>(input[i]);
short* dst = reinterpret_cast<short*>(output); short* dst = reinterpret_cast<short*>(output);
for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) {
dst[j] = short((PRInt32(src[j]) * volume) >> 16); dst[j] = short((PRInt32(src[j]) * volume) >> 16);
}
output += input_size[i];
break;
} }
case FORMAT_FLOAT32: { #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
const float* src = static_cast<const float*>(input[i]); const float* src = static_cast<const float*>(input[i]);
float* dst = reinterpret_cast<float*>(output); float* dst = reinterpret_cast<float*>(output);
for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) { for (PRUint32 j = 0; j < input_size[i] / (mBytesPerFrame / mChannels); ++j) {
dst[j] = src[j] * scaled_volume; dst[j] = src[j] * scaled_volume;
}
output += input_size[i];
break;
}
default:
return -1;
} }
#endif
output += input_size[i];
} }
} }

View File

@ -11,6 +11,14 @@
#include "nsIThread.h" #include "nsIThread.h"
#include "nsAutoPtr.h" #include "nsAutoPtr.h"
#ifdef MOZ_SAMPLE_TYPE_S16LE
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
typedef short SampleType;
#else
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
typedef float SampleType;
#endif
// Access to a single instance of this class must be synchronized by // Access to a single instance of this class must be synchronized by
// callers, or made from a single thread. One exception is that access to // callers, or made from a single thread. One exception is that access to
// GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels,Format} // GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels,Format}
@ -28,8 +36,7 @@ public:
nsAudioStream() nsAudioStream()
: mRate(0), : mRate(0),
mChannels(0), mChannels(0)
mFormat(FORMAT_S16_LE)
{} {}
virtual ~nsAudioStream(); virtual ~nsAudioStream();
@ -56,7 +63,7 @@ public:
// (22050Hz, 44100Hz, etc). // (22050Hz, 44100Hz, etc).
// Unsafe to call with a monitor held due to synchronous event execution // Unsafe to call with a monitor held due to synchronous event execution
// on the main thread, which may attempt to acquire any held monitor. // on the main thread, which may attempt to acquire any held monitor.
virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat) = 0; virtual nsresult Init(PRInt32 aNumChannels, PRInt32 aRate) = 0;
// Closes the stream. All future use of the stream is an error. // Closes the stream. All future use of the stream is an error.
// Unsafe to call with a monitor held due to synchronous event execution // Unsafe to call with a monitor held due to synchronous event execution
@ -106,7 +113,7 @@ public:
int GetRate() { return mRate; } int GetRate() { return mRate; }
int GetChannels() { return mChannels; } int GetChannels() { return mChannels; }
SampleFormat GetFormat() { return mFormat; } SampleFormat GetFormat() { return MOZ_AUDIO_DATA_FORMAT; }
protected: protected:
nsCOMPtr<nsIThread> mAudioPlaybackThread; nsCOMPtr<nsIThread> mAudioPlaybackThread;

View File

@ -53,29 +53,25 @@ public:
bool mHasVideo; bool mHasVideo;
}; };
#ifdef MOZ_TREMOR #ifdef MOZ_SAMPLE_TYPE_S16LE
#include <ogg/os_types.h> #include <ogg/os_types.h>
typedef ogg_int32_t VorbisPCMValue; typedef ogg_int32_t VorbisPCMValue;
typedef short AudioDataValue; typedef short AudioDataValue;
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_S16_LE)
#define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767) #define MOZ_CLIP_TO_15(x) ((x)<-32768?-32768:(x)<=32767?(x):32767)
// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue // Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
#define MOZ_CONVERT_VORBIS_SAMPLE(x) \ #define MOZ_CONVERT_VORBIS_SAMPLE(x) \
(static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9))) (static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x)>>9)))
// Convert a AudioDataValue to a float for the Audio API // Convert a AudioDataValue to a float for the Audio API
#define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768)) #define MOZ_CONVERT_AUDIO_SAMPLE(x) ((x)*(1.F/32768))
#define MOZ_SAMPLE_TYPE_S16LE 1
#else /*MOZ_VORBIS*/ #else /* MOZ_SAMPLE_TYPE_FLOAT32 */
typedef float VorbisPCMValue; typedef float VorbisPCMValue;
typedef float AudioDataValue; typedef float AudioDataValue;
#define MOZ_AUDIO_DATA_FORMAT (nsAudioStream::FORMAT_FLOAT32)
#define MOZ_CONVERT_VORBIS_SAMPLE(x) (x) #define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
#define MOZ_CONVERT_AUDIO_SAMPLE(x) (x) #define MOZ_CONVERT_AUDIO_SAMPLE(x) (x)
#define MOZ_SAMPLE_TYPE_FLOAT32 1
#endif #endif

View File

@ -1008,7 +1008,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// are unsafe to call with the decoder monitor held are documented as such // are unsafe to call with the decoder monitor held are documented as such
// in nsAudioStream.h. // in nsAudioStream.h.
nsRefPtr<nsAudioStream> audioStream = nsAudioStream::AllocateStream(); nsRefPtr<nsAudioStream> audioStream = nsAudioStream::AllocateStream();
audioStream->Init(channels, rate, MOZ_AUDIO_DATA_FORMAT); audioStream->Init(channels, rate);
{ {
// We must hold the monitor while setting mAudioStream or whenever we query // We must hold the monitor while setting mAudioStream or whenever we query

View File

@ -297,9 +297,7 @@ AudioParent::AudioParent(PRInt32 aNumChannels, PRInt32 aRate, PRInt32 aFormat)
{ {
mStream = nsAudioStream::AllocateStream(); mStream = nsAudioStream::AllocateStream();
NS_ASSERTION(mStream, "AudioStream allocation failed."); NS_ASSERTION(mStream, "AudioStream allocation failed.");
if (NS_FAILED(mStream->Init(aNumChannels, if (NS_FAILED(mStream->Init(aNumChannels, aRate))) {
aRate,
(nsAudioStream::SampleFormat) aFormat))) {
NS_WARNING("AudioStream initialization failed."); NS_WARNING("AudioStream initialization failed.");
mStream = nullptr; mStream = nullptr;
return; return;