mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 22:01:30 +00:00
Backed out changeset 1362f0ca86d2 (bug 490705 - Support Audio Data API: Get, Manipulate, Play & Save) due to test failures. (merge commit) a=bustage fix
This commit is contained in:
commit
860a0298a2
@ -669,7 +669,6 @@ nsContentUtils::InitializeEventTable() {
|
||||
{ nsGkAtoms::onratechange, NS_RATECHANGE, EventNameType_HTML, NS_EVENT_NULL },
|
||||
{ nsGkAtoms::ondurationchange, NS_DURATIONCHANGE, EventNameType_HTML, NS_EVENT_NULL },
|
||||
{ nsGkAtoms::onvolumechange, NS_VOLUMECHANGE, EventNameType_HTML, NS_EVENT_NULL },
|
||||
{ nsGkAtoms::onMozAudioAvailable, NS_MOZAUDIOAVAILABLE, EventNameType_None, NS_EVENT_NULL },
|
||||
#endif // MOZ_MEDIA
|
||||
{ nsGkAtoms::onMozAfterPaint, NS_AFTERPAINT, EventNameType_None, NS_EVENT },
|
||||
{ nsGkAtoms::onMozBeforePaint, NS_BEFOREPAINT, EventNameType_None, NS_EVENT_NULL },
|
||||
|
@ -1665,7 +1665,6 @@ GK_ATOM(onended, "onended")
|
||||
GK_ATOM(onratechange, "onratechange")
|
||||
GK_ATOM(ondurationchange, "ondurationchange")
|
||||
GK_ATOM(onvolumechange, "onvolumechange")
|
||||
GK_ATOM(onMozAudioAvailable, "onMozAudioAvailable")
|
||||
GK_ATOM(loadstart, "loadstart")
|
||||
GK_ATOM(progress, "progress")
|
||||
GK_ATOM(suspend, "suspend")
|
||||
|
@ -511,11 +511,6 @@ nsNodeUtils::CloneAndAdopt(nsINode *aNode, PRBool aClone, PRBool aDeep,
|
||||
if (elm->MayHavePaintEventListener()) {
|
||||
window->SetHasPaintEventListeners();
|
||||
}
|
||||
#ifdef MOZ_MEDIA
|
||||
if (elm->MayHaveAudioAvailableEventListener()) {
|
||||
window->SetHasAudioAvailableEventListeners();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,6 @@ public:
|
||||
mMayHaveMutationListeners(PR_FALSE),
|
||||
mMayHaveCapturingListeners(PR_FALSE),
|
||||
mMayHaveSystemGroupListeners(PR_FALSE),
|
||||
mMayHaveAudioAvailableEventListener(PR_FALSE),
|
||||
mNoListenerForEvent(0)
|
||||
{}
|
||||
|
||||
@ -206,20 +205,12 @@ public:
|
||||
*/
|
||||
PRBool MayHavePaintEventListener() { return mMayHavePaintEventListener; }
|
||||
|
||||
/**
|
||||
* Returns PR_TRUE if there may be a MozAudioAvailable event listener registered,
|
||||
* PR_FALSE if there definitely isn't.
|
||||
*/
|
||||
PRBool MayHaveAudioAvailableEventListener() { return mMayHaveAudioAvailableEventListener; }
|
||||
|
||||
|
||||
protected:
|
||||
PRUint32 mMayHavePaintEventListener : 1;
|
||||
PRUint32 mMayHaveMutationListeners : 1;
|
||||
PRUint32 mMayHaveCapturingListeners : 1;
|
||||
PRUint32 mMayHaveSystemGroupListeners : 1;
|
||||
PRUint32 mMayHaveAudioAvailableEventListener : 1;
|
||||
PRUint32 mNoListenerForEvent : 27;
|
||||
PRUint32 mNoListenerForEvent : 28;
|
||||
};
|
||||
|
||||
NS_DEFINE_STATIC_IID_ACCESSOR(nsIEventListenerManager,
|
||||
|
@ -122,13 +122,6 @@ NS_NewDOMNotifyPaintEvent(nsIDOMEvent** aResult, nsPresContext* aPresContext,
|
||||
PRUint32 aEventType = 0,
|
||||
nsInvalidateRequestList* aInvalidateRequests = nsnull);
|
||||
nsresult
|
||||
NS_NewDOMAudioAvailableEvent(nsIDOMEvent** aResult, nsPresContext* aPresContext,
|
||||
nsEvent* aEvent,
|
||||
PRUint32 aEventType = 0,
|
||||
float* aFrameBuffer = nsnull,
|
||||
PRUint32 aFrameBufferLength = 0,
|
||||
float aTime = 0);
|
||||
nsresult
|
||||
NS_NewDOMSimpleGestureEvent(nsIDOMEvent** aInstancePtrResult, nsPresContext* aPresContext, class nsSimpleGestureEvent* aEvent);
|
||||
nsresult
|
||||
NS_NewDOMScrollAreaEvent(nsIDOMEvent** aInstancePtrResult, nsPresContext* aPresContext, class nsScrollAreaEvent* aEvent);
|
||||
|
@ -78,7 +78,6 @@ CPPSRCS = \
|
||||
nsDOMProgressEvent.cpp \
|
||||
nsDOMDataTransfer.cpp \
|
||||
nsDOMNotifyPaintEvent.cpp \
|
||||
nsDOMNotifyAudioAvailableEvent.cpp \
|
||||
nsDOMSimpleGestureEvent.cpp \
|
||||
nsDOMMozTouchEvent.cpp \
|
||||
nsDOMEventTargetHelper.cpp \
|
||||
|
@ -88,7 +88,7 @@ static const char* const sEventNames[] = {
|
||||
"loadstart", "progress", "suspend", "emptied", "stalled", "play", "pause",
|
||||
"loadedmetadata", "loadeddata", "waiting", "playing", "canplay",
|
||||
"canplaythrough", "seeking", "seeked", "timeupdate", "ended", "ratechange",
|
||||
"durationchange", "volumechange", "MozAudioAvailable",
|
||||
"durationchange", "volumechange",
|
||||
#endif // MOZ_MEDIA
|
||||
"MozAfterPaint",
|
||||
"MozBeforePaint",
|
||||
@ -1300,8 +1300,6 @@ const char* nsDOMEvent::GetEventName(PRUint32 aEventType)
|
||||
return sEventNames[eDOMEvents_durationchange];
|
||||
case NS_VOLUMECHANGE:
|
||||
return sEventNames[eDOMEvents_volumechange];
|
||||
case NS_MOZAUDIOAVAILABLE:
|
||||
return sEventNames[eDOMEvents_mozaudioavailable];
|
||||
#endif
|
||||
case NS_AFTERPAINT:
|
||||
return sEventNames[eDOMEvents_afterpaint];
|
||||
|
@ -168,7 +168,6 @@ public:
|
||||
eDOMEvents_ratechange,
|
||||
eDOMEvents_durationchange,
|
||||
eDOMEvents_volumechange,
|
||||
eDOMEvents_mozaudioavailable,
|
||||
#endif
|
||||
eDOMEvents_afterpaint,
|
||||
eDOMEvents_beforepaint,
|
||||
|
@ -1,171 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla code.
|
||||
*
|
||||
* The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2010
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Humphrey <david.humphrey@senecac.on.ca>
|
||||
* Yury Delendik
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nsDOMNotifyAudioAvailableEvent.h"
|
||||
#include "jstypedarray.h"
|
||||
|
||||
nsDOMNotifyAudioAvailableEvent::nsDOMNotifyAudioAvailableEvent(nsPresContext* aPresContext,
|
||||
nsEvent* aEvent,
|
||||
PRUint32 aEventType,
|
||||
float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
float aTime)
|
||||
: nsDOMEvent(aPresContext, aEvent),
|
||||
mFrameBuffer(aFrameBuffer),
|
||||
mFrameBufferLength(aFrameBufferLength),
|
||||
mTime(aTime),
|
||||
mCachedArray(nsnull),
|
||||
mAllowAudioData(PR_FALSE)
|
||||
{
|
||||
if (mEvent) {
|
||||
mEvent->message = aEventType;
|
||||
}
|
||||
}
|
||||
|
||||
DOMCI_DATA(NotifyAudioAvailableEvent, nsDOMNotifyAudioAvailableEvent)
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_CLASS(nsDOMNotifyAudioAvailableEvent)
|
||||
|
||||
NS_IMPL_ADDREF_INHERITED(nsDOMNotifyAudioAvailableEvent, nsDOMEvent)
|
||||
NS_IMPL_RELEASE_INHERITED(nsDOMNotifyAudioAvailableEvent, nsDOMEvent)
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_ROOT_BEGIN(nsDOMNotifyAudioAvailableEvent)
|
||||
if (tmp->mCachedArray) {
|
||||
NS_DROP_JS_OBJECTS(tmp, nsDOMNotifyAudioAvailableEvent);
|
||||
tmp->mCachedArray = nsnull;
|
||||
}
|
||||
NS_IMPL_CYCLE_COLLECTION_ROOT_END
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(nsDOMNotifyAudioAvailableEvent, nsDOMEvent)
|
||||
NS_IMPL_CYCLE_COLLECTION_UNLINK_END
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(nsDOMNotifyAudioAvailableEvent, nsDOMEvent)
|
||||
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
|
||||
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
|
||||
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(nsDOMNotifyAudioAvailableEvent)
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mCachedArray)
|
||||
NS_IMPL_CYCLE_COLLECTION_TRACE_END
|
||||
|
||||
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(nsDOMNotifyAudioAvailableEvent)
|
||||
NS_INTERFACE_MAP_ENTRY(nsIDOMNotifyAudioAvailableEvent)
|
||||
NS_DOM_INTERFACE_MAP_ENTRY_CLASSINFO(NotifyAudioAvailableEvent)
|
||||
NS_INTERFACE_MAP_END_INHERITING(nsDOMEvent)
|
||||
|
||||
nsDOMNotifyAudioAvailableEvent::~nsDOMNotifyAudioAvailableEvent()
|
||||
{
|
||||
if (mCachedArray) {
|
||||
NS_DROP_JS_OBJECTS(this, nsDOMNotifyAudioAvailableEvent);
|
||||
mCachedArray = nsnull;
|
||||
}
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsDOMNotifyAudioAvailableEvent::GetFrameBuffer(JSContext* aCx, jsval* aResult)
|
||||
{
|
||||
if (!mAllowAudioData) {
|
||||
// Media is not same-origin, don't allow the data out.
|
||||
return NS_ERROR_DOM_SECURITY_ERR;
|
||||
}
|
||||
|
||||
if (mCachedArray) {
|
||||
*aResult = OBJECT_TO_JSVAL(mCachedArray);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// Cache this array so we don't recreate on next call.
|
||||
NS_HOLD_JS_OBJECTS(this, nsDOMNotifyAudioAvailableEvent);
|
||||
|
||||
mCachedArray = js_CreateTypedArray(aCx, js::TypedArray::TYPE_FLOAT32, mFrameBufferLength);
|
||||
if (!mCachedArray) {
|
||||
NS_DROP_JS_OBJECTS(this, nsDOMNotifyAudioAvailableEvent);
|
||||
NS_ERROR("Failed to get audio signal!");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
js::TypedArray *tdest = js::TypedArray::fromJSObject(mCachedArray);
|
||||
memcpy(tdest->data, mFrameBuffer.get(), mFrameBufferLength * sizeof(float));
|
||||
|
||||
*aResult = OBJECT_TO_JSVAL(mCachedArray);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsDOMNotifyAudioAvailableEvent::GetTime(float *aRetVal)
|
||||
{
|
||||
*aRetVal = mTime;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsDOMNotifyAudioAvailableEvent::InitAudioAvailableEvent(const nsAString& aType,
|
||||
PRBool aCanBubble,
|
||||
PRBool aCancelable,
|
||||
float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
float aTime,
|
||||
PRBool aAllowAudioData)
|
||||
{
|
||||
nsresult rv = nsDOMEvent::InitEvent(aType, aCanBubble, aCancelable);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
mFrameBuffer = aFrameBuffer;
|
||||
mFrameBufferLength = aFrameBufferLength;
|
||||
mTime = aTime;
|
||||
mAllowAudioData = aAllowAudioData;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult NS_NewDOMAudioAvailableEvent(nsIDOMEvent** aInstancePtrResult,
|
||||
nsPresContext* aPresContext,
|
||||
nsEvent *aEvent,
|
||||
PRUint32 aEventType,
|
||||
float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
float aTime)
|
||||
{
|
||||
nsDOMNotifyAudioAvailableEvent* it =
|
||||
new nsDOMNotifyAudioAvailableEvent(aPresContext, aEvent, aEventType,
|
||||
aFrameBuffer, aFrameBufferLength, aTime);
|
||||
if (nsnull == it) {
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return CallQueryInterface(it, aInstancePtrResult);
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla code.
|
||||
*
|
||||
* The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2010
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Humphrey <david.humphrey@senecac.on.ca>
|
||||
* Yury Delendik
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#ifndef nsDOMNotifyAudioAvailableEvent_h_
|
||||
#define nsDOMNotifyAudioAvailableEvent_h_
|
||||
|
||||
#include "nsIDOMNotifyAudioAvailableEvent.h"
|
||||
#include "nsDOMEvent.h"
|
||||
#include "nsPresContext.h"
|
||||
#include "nsCycleCollectionParticipant.h"
|
||||
|
||||
class nsDOMNotifyAudioAvailableEvent : public nsDOMEvent,
|
||||
public nsIDOMNotifyAudioAvailableEvent
|
||||
{
|
||||
public:
|
||||
nsDOMNotifyAudioAvailableEvent(nsPresContext* aPresContext, nsEvent* aEvent,
|
||||
PRUint32 aEventType, float * aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength, float aTime);
|
||||
|
||||
NS_DECL_ISUPPORTS_INHERITED
|
||||
NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS_INHERITED(nsDOMNotifyAudioAvailableEvent,
|
||||
nsDOMEvent)
|
||||
|
||||
NS_DECL_NSIDOMNOTIFYAUDIOAVAILABLEEVENT
|
||||
NS_FORWARD_NSIDOMEVENT(nsDOMEvent::)
|
||||
|
||||
nsresult NS_NewDOMAudioAvailableEvent(nsIDOMEvent** aInstancePtrResult,
|
||||
nsPresContext* aPresContext,
|
||||
nsEvent *aEvent,
|
||||
PRUint32 aEventType,
|
||||
float * aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
float aTime);
|
||||
|
||||
~nsDOMNotifyAudioAvailableEvent();
|
||||
|
||||
private:
|
||||
nsAutoArrayPtr<float> mFrameBuffer;
|
||||
PRUint32 mFrameBufferLength;
|
||||
float mTime;
|
||||
JSObject* mCachedArray;
|
||||
PRPackedBool mAllowAudioData;
|
||||
};
|
||||
|
||||
#endif // nsDOMNotifyAudioAvailableEvent_h_
|
@ -840,8 +840,6 @@ nsEventDispatcher::CreateEvent(nsPresContext* aPresContext,
|
||||
return NS_NewDOMTransitionEvent(aDOMEvent, aPresContext, nsnull);
|
||||
if (aEventType.LowerCaseEqualsLiteral("popstateevent"))
|
||||
return NS_NewDOMPopStateEvent(aDOMEvent, aPresContext, nsnull);
|
||||
if (aEventType.LowerCaseEqualsLiteral("mozaudioavailableevent"))
|
||||
return NS_NewDOMAudioAvailableEvent(aDOMEvent, aPresContext, nsnull);
|
||||
if (aEventType.LowerCaseEqualsLiteral("closeevent"))
|
||||
return NS_NewDOMCloseEvent(aDOMEvent, aPresContext, nsnull);
|
||||
|
||||
|
@ -474,12 +474,6 @@ nsEventListenerManager::AddEventListener(nsIDOMEventListener *aListener,
|
||||
if (window) {
|
||||
window->SetHasPaintEventListeners();
|
||||
}
|
||||
} else if (aType == NS_MOZAUDIOAVAILABLE) {
|
||||
mMayHaveAudioAvailableEventListener = PR_TRUE;
|
||||
nsPIDOMWindow* window = GetInnerWindowForTarget();
|
||||
if (window) {
|
||||
window->SetHasAudioAvailableEventListeners();
|
||||
}
|
||||
} else if (aType >= NS_MUTATION_START && aType <= NS_MUTATION_END) {
|
||||
// For mutation listeners, we need to update the global bit on the DOM window.
|
||||
// Otherwise we won't actually fire the mutation event.
|
||||
|
@ -50,8 +50,6 @@
|
||||
#include "nsIObserver.h"
|
||||
#include "ImageLayers.h"
|
||||
|
||||
#include "nsAudioStream.h"
|
||||
|
||||
// Define to output information on decoding and painting framerate
|
||||
/* #define DEBUG_FRAME_RATE 1 */
|
||||
|
||||
@ -129,7 +127,7 @@ public:
|
||||
// Called by the video decoder object, on the main thread,
|
||||
// when it has read the metadata containing video dimensions,
|
||||
// etc.
|
||||
void MetadataLoaded(PRUint32 aChannels, PRUint32 aRate);
|
||||
void MetadataLoaded();
|
||||
|
||||
// Called by the video decoder object, on the main thread,
|
||||
// when it has read the first frame of the video
|
||||
@ -188,9 +186,6 @@ public:
|
||||
nsresult DispatchProgressEvent(const nsAString& aName);
|
||||
nsresult DispatchAsyncSimpleEvent(const nsAString& aName);
|
||||
nsresult DispatchAsyncProgressEvent(const nsAString& aName);
|
||||
nsresult DispatchAudioAvailableEvent(float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
PRUint64 aTime);
|
||||
|
||||
// Called by the decoder when some data has been downloaded or
|
||||
// buffering/seeking has ended. aNextFrameAvailable is true when
|
||||
@ -290,18 +285,6 @@ public:
|
||||
*/
|
||||
void NotifyLoadError();
|
||||
|
||||
/**
|
||||
* Called when data has been written to the underlying audio stream.
|
||||
*/
|
||||
void NotifyAudioAvailable(float* aFrameBuffer, PRUint32 aFrameBufferLength,
|
||||
PRUint64 aTime);
|
||||
|
||||
/**
|
||||
* Called in order to check whether some node (this window, its document,
|
||||
* or content in that document) has a MozAudioAvailable event listener.
|
||||
*/
|
||||
PRBool MayHaveAudioAvailableEventListener();
|
||||
|
||||
virtual PRBool IsNodeOfType(PRUint32 aFlags) const;
|
||||
|
||||
/**
|
||||
@ -499,23 +482,10 @@ protected:
|
||||
// Current audio volume
|
||||
float mVolume;
|
||||
|
||||
// Current number of audio channels.
|
||||
PRUint32 mChannels;
|
||||
|
||||
// Current audio sample rate.
|
||||
PRUint32 mRate;
|
||||
|
||||
// Size of the media. Updated by the decoder on the main thread if
|
||||
// it changes. Defaults to a width and height of -1 if not set.
|
||||
nsIntSize mMediaSize;
|
||||
|
||||
// An audio stream for writing audio directly from JS.
|
||||
nsAutoPtr<nsAudioStream> mAudioStream;
|
||||
|
||||
// PR_TRUE if MozAudioAvailable events can be safely dispatched, based on
|
||||
// a media and element same-origin check.
|
||||
PRBool mAllowAudioData;
|
||||
|
||||
// If true then we have begun downloading the media content.
|
||||
// Set to false when completed, or not yet started.
|
||||
PRPackedBool mBegun;
|
||||
@ -601,9 +571,6 @@ protected:
|
||||
// down.
|
||||
PRPackedBool mShuttingDown;
|
||||
|
||||
// PR_TRUE if a same-origin check has been done for the media element and resource.
|
||||
PRPackedBool mMediaSecurityVerified;
|
||||
|
||||
nsRefPtr<gfxASurface> mPrintSurface;
|
||||
};
|
||||
|
||||
|
@ -57,8 +57,6 @@
|
||||
#include "nsIScriptSecurityManager.h"
|
||||
#include "nsIXPConnect.h"
|
||||
#include "jsapi.h"
|
||||
#include "jscntxt.h"
|
||||
#include "jstypedarray.h"
|
||||
#include "nsJSUtils.h"
|
||||
|
||||
#include "nsIRenderingContext.h"
|
||||
@ -149,99 +147,6 @@ nsHTMLAudioElement::Initialize(nsISupports* aOwner, JSContext* aContext,
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLAudioElement::MozSetup(PRUint32 aChannels, PRUint32 aRate)
|
||||
{
|
||||
// If there is already a src provided, don't setup another stream
|
||||
if (mDecoder) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
// MozWriteAudio divides by mChannels, so validate now.
|
||||
if (0 == aChannels) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (mAudioStream) {
|
||||
mAudioStream->Shutdown();
|
||||
}
|
||||
|
||||
mAudioStream = new nsAudioStream();
|
||||
nsresult rv = mAudioStream->Init(aChannels, aRate,
|
||||
nsAudioStream::FORMAT_FLOAT32);
|
||||
if (NS_FAILED(rv)) {
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nsnull;
|
||||
return rv;
|
||||
}
|
||||
|
||||
MetadataLoaded(aChannels, aRate);
|
||||
mAudioStream->SetVolume(mVolume);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLAudioElement::MozWriteAudio(const jsval &aData, JSContext *aCx, PRUint32 *aRetVal)
|
||||
{
|
||||
if (!mAudioStream) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
if (JSVAL_IS_PRIMITIVE(aData)) {
|
||||
return NS_ERROR_DOM_TYPE_MISMATCH_ERR;
|
||||
}
|
||||
|
||||
JSObject *darray = JSVAL_TO_OBJECT(aData);
|
||||
js::AutoValueRooter tsrc_tvr(aCx);
|
||||
js::TypedArray *tsrc = NULL;
|
||||
|
||||
// Allow either Float32Array or plain JS Array
|
||||
if (darray->getClass() == &js::TypedArray::fastClasses[js::TypedArray::TYPE_FLOAT32])
|
||||
{
|
||||
tsrc = js::TypedArray::fromJSObject(darray);
|
||||
} else if (JS_IsArrayObject(aCx, darray)) {
|
||||
JSObject *nobj = js_CreateTypedArrayWithArray(aCx, js::TypedArray::TYPE_FLOAT32, darray);
|
||||
if (!nobj) {
|
||||
return NS_ERROR_DOM_TYPE_MISMATCH_ERR;
|
||||
}
|
||||
*tsrc_tvr.jsval_addr() = OBJECT_TO_JSVAL(nobj);
|
||||
tsrc = js::TypedArray::fromJSObject(nobj);
|
||||
} else {
|
||||
return NS_ERROR_DOM_TYPE_MISMATCH_ERR;
|
||||
}
|
||||
|
||||
PRUint32 dataLength = tsrc->length;
|
||||
|
||||
// Make sure that we are going to write the correct amount of data based
|
||||
// on number of channels.
|
||||
if (dataLength % mChannels != 0) {
|
||||
return NS_ERROR_DOM_INDEX_SIZE_ERR;
|
||||
}
|
||||
|
||||
// Don't write more than can be written without blocking.
|
||||
PRUint32 writeLen = NS_MIN(mAudioStream->Available(), dataLength);
|
||||
|
||||
nsresult rv = mAudioStream->Write(tsrc->data, writeLen, PR_TRUE);
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
// Return the actual amount written.
|
||||
*aRetVal = writeLen;
|
||||
return rv;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLAudioElement::MozCurrentSampleOffset(PRUint64 *aRetVal)
|
||||
{
|
||||
if (!mAudioStream) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
*aRetVal = mAudioStream->GetSampleOffset();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
||||
nsresult nsHTMLAudioElement::SetAcceptHeader(nsIHttpChannel* aChannel)
|
||||
{
|
||||
|
@ -89,9 +89,6 @@
|
||||
#include "nsIDocShellTreeItem.h"
|
||||
#include "nsIAsyncVerifyRedirectCallback.h"
|
||||
|
||||
#include "nsIPrivateDOMEvent.h"
|
||||
#include "nsIDOMNotifyAudioAvailableEvent.h"
|
||||
|
||||
#ifdef MOZ_OGG
|
||||
#include "nsOggDecoder.h"
|
||||
#endif
|
||||
@ -119,8 +116,6 @@ static PRLogModuleInfo* gMediaElementEventsLog;
|
||||
#include "nsIChannelPolicy.h"
|
||||
#include "nsChannelPolicy.h"
|
||||
|
||||
#define MS_PER_SECOND 1000
|
||||
|
||||
using namespace mozilla::layers;
|
||||
|
||||
// Under certain conditions there may be no-one holding references to
|
||||
@ -197,12 +192,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
NS_IMETHOD Run()
|
||||
{
|
||||
NS_IMETHOD Run() {
|
||||
// Silently cancel if our load has been cancelled.
|
||||
if (IsCancelled())
|
||||
return NS_OK;
|
||||
|
||||
return mProgress ?
|
||||
mElement->DispatchProgressEvent(mName) :
|
||||
mElement->DispatchSimpleEvent(mName);
|
||||
@ -637,40 +630,6 @@ void nsHTMLMediaElement::NotifyLoadError()
|
||||
}
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::NotifyAudioAvailable(float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
PRUint64 aTime)
|
||||
{
|
||||
// Do same-origin check on element and media before allowing MozAudioAvailable events.
|
||||
if (!mMediaSecurityVerified) {
|
||||
nsCOMPtr<nsIPrincipal> principal = GetCurrentPrincipal();
|
||||
nsresult rv = NodePrincipal()->Subsumes(principal, &mAllowAudioData);
|
||||
if (NS_FAILED(rv)) {
|
||||
mAllowAudioData = PR_FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
DispatchAudioAvailableEvent(aFrameBuffer, aFrameBufferLength, aTime);
|
||||
}
|
||||
|
||||
PRBool nsHTMLMediaElement::MayHaveAudioAvailableEventListener()
|
||||
{
|
||||
// Determine if the current element is focused, if it is not focused
|
||||
// then we should not try to blur. Note: we allow for the case of
|
||||
// |var a = new Audio()| with no parent document.
|
||||
nsIDocument *document = GetDocument();
|
||||
if (!document) {
|
||||
return PR_TRUE;
|
||||
}
|
||||
|
||||
nsPIDOMWindow *window = document->GetInnerWindow();
|
||||
if (!window) {
|
||||
return PR_TRUE;
|
||||
}
|
||||
|
||||
return window->HasAudioAvailableEventListeners();
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::LoadFromSourceChildren()
|
||||
{
|
||||
NS_ASSERTION(mDelayingLoadEvent,
|
||||
@ -703,13 +662,6 @@ nsresult nsHTMLMediaElement::LoadResource(nsIURI* aURI)
|
||||
"Should delay load event (if in document) during load");
|
||||
nsresult rv;
|
||||
|
||||
// If a previous call to mozSetup() was made, kill that media stream
|
||||
// in order to use this new src instead.
|
||||
if (mAudioStream) {
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nsnull;
|
||||
}
|
||||
|
||||
if (mChannel) {
|
||||
mChannel->Cancel(NS_BINDING_ABORTED);
|
||||
mChannel = nsnull;
|
||||
@ -982,61 +934,14 @@ NS_IMETHODIMP nsHTMLMediaElement::SetVolume(float aVolume)
|
||||
|
||||
mVolume = aVolume;
|
||||
|
||||
if (mDecoder && !mMuted) {
|
||||
if (mDecoder && !mMuted)
|
||||
mDecoder->SetVolume(mVolume);
|
||||
} else if (mAudioStream && !mMuted) {
|
||||
mAudioStream->SetVolume(mVolume);
|
||||
}
|
||||
|
||||
DispatchAsyncSimpleEvent(NS_LITERAL_STRING("volumechange"));
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLMediaElement::GetMozChannels(PRUint32 *aMozChannels)
|
||||
{
|
||||
if (!mDecoder && !mAudioStream) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
*aMozChannels = mChannels;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLMediaElement::GetMozSampleRate(PRUint32 *aMozSampleRate)
|
||||
{
|
||||
if (!mDecoder && !mAudioStream) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
*aMozSampleRate = mRate;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLMediaElement::GetMozFrameBufferLength(PRUint32 *aMozFrameBufferLength)
|
||||
{
|
||||
// The framebuffer (via MozAudioAvailable events) is only available
|
||||
// when reading vs. writing audio directly.
|
||||
if (!mDecoder) {
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
}
|
||||
|
||||
*aMozFrameBufferLength = mDecoder->GetFrameBufferLength();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
NS_IMETHODIMP
|
||||
nsHTMLMediaElement::SetMozFrameBufferLength(PRUint32 aMozFrameBufferLength)
|
||||
{
|
||||
if (!mDecoder)
|
||||
return NS_ERROR_DOM_INVALID_STATE_ERR;
|
||||
|
||||
return mDecoder->RequestFrameBufferLength(aMozFrameBufferLength);
|
||||
}
|
||||
|
||||
/* attribute boolean muted; */
|
||||
NS_IMETHODIMP nsHTMLMediaElement::GetMuted(PRBool *aMuted)
|
||||
{
|
||||
@ -1054,8 +959,6 @@ NS_IMETHODIMP nsHTMLMediaElement::SetMuted(PRBool aMuted)
|
||||
|
||||
if (mDecoder) {
|
||||
mDecoder->SetVolume(mMuted ? 0.0 : mVolume);
|
||||
} else if (mAudioStream) {
|
||||
mAudioStream->SetVolume(mMuted ? 0.0 : mVolume);
|
||||
}
|
||||
|
||||
DispatchAsyncSimpleEvent(NS_LITERAL_STRING("volumechange"));
|
||||
@ -1071,10 +974,7 @@ nsHTMLMediaElement::nsHTMLMediaElement(already_AddRefed<nsINodeInfo> aNodeInfo,
|
||||
mReadyState(nsIDOMHTMLMediaElement::HAVE_NOTHING),
|
||||
mLoadWaitStatus(NOT_WAITING),
|
||||
mVolume(1.0),
|
||||
mChannels(0),
|
||||
mRate(0),
|
||||
mMediaSize(-1,-1),
|
||||
mAllowAudioData(PR_FALSE),
|
||||
mBegun(PR_FALSE),
|
||||
mLoadedFirstFrame(PR_FALSE),
|
||||
mAutoplaying(PR_TRUE),
|
||||
@ -1094,8 +994,7 @@ nsHTMLMediaElement::nsHTMLMediaElement(already_AddRefed<nsINodeInfo> aNodeInfo,
|
||||
mAllowSuspendAfterFirstFrame(PR_TRUE),
|
||||
mHasPlayedOrSeeked(PR_FALSE),
|
||||
mHasSelfReference(PR_FALSE),
|
||||
mShuttingDown(PR_FALSE),
|
||||
mMediaSecurityVerified(PR_FALSE)
|
||||
mShuttingDown(PR_FALSE)
|
||||
{
|
||||
#ifdef PR_LOGGING
|
||||
if (!gMediaElementLog) {
|
||||
@ -1124,10 +1023,6 @@ nsHTMLMediaElement::~nsHTMLMediaElement()
|
||||
mChannel->Cancel(NS_BINDING_ABORTED);
|
||||
mChannel = nsnull;
|
||||
}
|
||||
if (mAudioStream) {
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nsnull;
|
||||
}
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::StopSuspendingAfterFirstFrame()
|
||||
@ -1674,9 +1569,6 @@ nsresult nsHTMLMediaElement::FinishDecoderSetup(nsMediaDecoder* aDecoder)
|
||||
{
|
||||
mDecoder = aDecoder;
|
||||
|
||||
// Force a same-origin check before allowing events for this media resource.
|
||||
mMediaSecurityVerified = PR_FALSE;
|
||||
|
||||
// The new stream has not been suspended by us.
|
||||
mPausedForInactiveDocument = PR_FALSE;
|
||||
// But we may want to suspend it now.
|
||||
@ -1732,10 +1624,8 @@ nsresult nsHTMLMediaElement::NewURIFromString(const nsAutoString& aURISpec, nsIU
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void nsHTMLMediaElement::MetadataLoaded(PRUint32 aChannels, PRUint32 aRate)
|
||||
void nsHTMLMediaElement::MetadataLoaded()
|
||||
{
|
||||
mChannels = aChannels;
|
||||
mRate = aRate;
|
||||
ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_METADATA);
|
||||
DispatchAsyncSimpleEvent(NS_LITERAL_STRING("durationchange"));
|
||||
DispatchAsyncSimpleEvent(NS_LITERAL_STRING("loadedmetadata"));
|
||||
@ -1988,29 +1878,6 @@ ImageContainer* nsHTMLMediaElement::GetImageContainer()
|
||||
return mImageContainer;
|
||||
}
|
||||
|
||||
nsresult nsHTMLMediaElement::DispatchAudioAvailableEvent(float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
PRUint64 aTime)
|
||||
{
|
||||
nsCOMPtr<nsIDOMDocumentEvent> docEvent(do_QueryInterface(GetOwnerDoc()));
|
||||
nsCOMPtr<nsIDOMEventTarget> target(do_QueryInterface(static_cast<nsIContent*>(this)));
|
||||
NS_ENSURE_TRUE(docEvent && target, NS_ERROR_INVALID_ARG);
|
||||
|
||||
nsCOMPtr<nsIDOMEvent> event;
|
||||
nsresult rv = docEvent->CreateEvent(NS_LITERAL_STRING("MozAudioAvailableEvent"),
|
||||
getter_AddRefs(event));
|
||||
nsCOMPtr<nsIDOMNotifyAudioAvailableEvent> audioavailableEvent(do_QueryInterface(event));
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
rv = audioavailableEvent->InitAudioAvailableEvent(NS_LITERAL_STRING("MozAudioAvailable"),
|
||||
PR_TRUE, PR_TRUE, aFrameBuffer, aFrameBufferLength,
|
||||
(float)aTime / MS_PER_SECOND, mAllowAudioData);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
PRBool dummy;
|
||||
return target->DispatchEvent(event, &dummy);
|
||||
}
|
||||
|
||||
nsresult nsHTMLMediaElement::DispatchSimpleEvent(const nsAString& aName)
|
||||
{
|
||||
LOG_EVENT(PR_LOG_DEBUG, ("%p Dispatching simple event %s", this,
|
||||
|
@ -53,7 +53,6 @@ EXPORTS = \
|
||||
nsBuiltinDecoderStateMachine.h \
|
||||
nsBuiltinDecoderReader.h \
|
||||
VideoUtils.h \
|
||||
nsAudioAvailableEventManager.h \
|
||||
$(NULL)
|
||||
|
||||
CPPSRCS = \
|
||||
@ -64,7 +63,6 @@ CPPSRCS = \
|
||||
nsBuiltinDecoderStateMachine.cpp \
|
||||
nsBuiltinDecoderReader.cpp \
|
||||
VideoUtils.cpp \
|
||||
nsAudioAvailableEventManager.cpp \
|
||||
$(NULL)
|
||||
|
||||
ifdef MOZ_SYDNEYAUDIO
|
||||
|
@ -1,207 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla code.
|
||||
*
|
||||
* The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2010
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Humphrey <david.humphrey@senecac.on.ca>
|
||||
* Yury Delendik
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nsTArray.h"
|
||||
#include "nsAudioAvailableEventManager.h"
|
||||
|
||||
#define MILLISECONDS_PER_SECOND 1000
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
class nsAudioAvailableEventRunner : public nsRunnable
|
||||
{
|
||||
private:
|
||||
nsCOMPtr<nsBuiltinDecoder> mDecoder;
|
||||
nsAutoArrayPtr<float> mFrameBuffer;
|
||||
public:
|
||||
nsAudioAvailableEventRunner(nsBuiltinDecoder* aDecoder, float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength, PRUint64 aTime) :
|
||||
mDecoder(aDecoder),
|
||||
mFrameBuffer(aFrameBuffer),
|
||||
mFrameBufferLength(aFrameBufferLength),
|
||||
mTime(aTime)
|
||||
{
|
||||
}
|
||||
|
||||
NS_IMETHOD Run()
|
||||
{
|
||||
mDecoder->AudioAvailable(mFrameBuffer.forget(), mFrameBufferLength, mTime);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
const PRUint32 mFrameBufferLength;
|
||||
const PRUint64 mTime;
|
||||
};
|
||||
|
||||
|
||||
nsAudioAvailableEventManager::nsAudioAvailableEventManager(nsBuiltinDecoder* aDecoder) :
|
||||
mDecoder(aDecoder),
|
||||
mSignalBuffer(new float[mDecoder->GetFrameBufferLength()]),
|
||||
mSignalBufferLength(mDecoder->GetFrameBufferLength()),
|
||||
mSignalBufferPosition(0),
|
||||
mMonitor("media.audioavailableeventmanager")
|
||||
{
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::Init(PRUint32 aChannels, PRUint32 aRate)
|
||||
{
|
||||
mSamplesPerSecond = aChannels * aRate;
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::DispatchPendingEvents(PRUint64 aCurrentTime)
|
||||
{
|
||||
MonitorAutoEnter mon(mMonitor);
|
||||
|
||||
while (mPendingEvents.Length() > 0) {
|
||||
nsAudioAvailableEventRunner* e =
|
||||
(nsAudioAvailableEventRunner*)mPendingEvents[0].get();
|
||||
if (e->mTime > aCurrentTime) {
|
||||
break;
|
||||
}
|
||||
nsCOMPtr<nsIRunnable> event = mPendingEvents[0];
|
||||
mPendingEvents.RemoveElementAt(0);
|
||||
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
|
||||
}
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::QueueWrittenAudioData(float* aAudioData,
|
||||
PRUint32 aAudioDataLength,
|
||||
PRUint64 aEndTimeSampleOffset)
|
||||
{
|
||||
PRUint32 currentBufferSize = mDecoder->GetFrameBufferLength();
|
||||
if (!mSignalBuffer ||
|
||||
(mSignalBufferPosition == 0 && mSignalBufferLength != currentBufferSize)) {
|
||||
if (!mSignalBuffer || (mSignalBufferLength < currentBufferSize)) {
|
||||
// Only resize if buffer is empty or smaller.
|
||||
mSignalBuffer = new float[currentBufferSize];
|
||||
}
|
||||
mSignalBufferLength = currentBufferSize;
|
||||
}
|
||||
float* audioData = aAudioData;
|
||||
PRUint32 audioDataLength = aAudioDataLength;
|
||||
PRUint32 signalBufferTail = mSignalBufferLength - mSignalBufferPosition;
|
||||
|
||||
// Group audio samples into optimal size for event dispatch, and queue.
|
||||
while (signalBufferTail <= audioDataLength) {
|
||||
PRUint64 time = 0;
|
||||
// Guard against unsigned number overflow during first frame time calculation.
|
||||
if (aEndTimeSampleOffset > mSignalBufferPosition + audioDataLength) {
|
||||
time = MILLISECONDS_PER_SECOND * (aEndTimeSampleOffset -
|
||||
mSignalBufferPosition - audioDataLength) / mSamplesPerSecond;
|
||||
}
|
||||
|
||||
// Fill the signalBuffer.
|
||||
memcpy(mSignalBuffer.get() + mSignalBufferPosition,
|
||||
audioData, sizeof(float) * signalBufferTail);
|
||||
audioData += signalBufferTail;
|
||||
audioDataLength -= signalBufferTail;
|
||||
|
||||
MonitorAutoEnter mon(mMonitor);
|
||||
|
||||
if (mPendingEvents.Length() > 0) {
|
||||
// Check last event timecode to make sure that all queued events
|
||||
// are in non-decending sequence.
|
||||
nsAudioAvailableEventRunner* lastPendingEvent =
|
||||
(nsAudioAvailableEventRunner*)mPendingEvents[mPendingEvents.Length() - 1].get();
|
||||
if (lastPendingEvent->mTime > time) {
|
||||
// Clear the queue to start a fresh sequence.
|
||||
mPendingEvents.Clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Inform the element that we've written sound data.
|
||||
nsCOMPtr<nsIRunnable> event =
|
||||
new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),
|
||||
mSignalBufferLength, time);
|
||||
mPendingEvents.AppendElement(event);
|
||||
|
||||
// Reset the buffer
|
||||
mSignalBufferLength = currentBufferSize;
|
||||
mSignalBuffer = new float[currentBufferSize];
|
||||
mSignalBufferPosition = 0;
|
||||
signalBufferTail = currentBufferSize;
|
||||
NS_ASSERTION(audioDataLength >= 0, "Past new signal data length.");
|
||||
}
|
||||
|
||||
NS_ASSERTION(mSignalBufferPosition + audioDataLength < mSignalBufferLength,
|
||||
"Intermediate signal buffer must fit at least one more item.");
|
||||
|
||||
if (audioDataLength > 0) {
|
||||
// Add data to the signalBuffer.
|
||||
memcpy(mSignalBuffer.get() + mSignalBufferPosition,
|
||||
audioData, sizeof(float) * audioDataLength);
|
||||
mSignalBufferPosition += audioDataLength;
|
||||
}
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::Clear()
|
||||
{
|
||||
MonitorAutoEnter mon(mMonitor);
|
||||
|
||||
mPendingEvents.Clear();
|
||||
mSignalBufferPosition = 0;
|
||||
}
|
||||
|
||||
void nsAudioAvailableEventManager::Drain(PRUint64 aEndTime)
|
||||
{
|
||||
MonitorAutoEnter mon(mMonitor);
|
||||
|
||||
// Force all pending events to go now.
|
||||
for (PRUint32 i = 0; i < mPendingEvents.Length(); ++i) {
|
||||
nsCOMPtr<nsIRunnable> event = mPendingEvents[i];
|
||||
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
|
||||
}
|
||||
mPendingEvents.Clear();
|
||||
|
||||
// If there is anything left in the signal buffer, put it in an event and fire.
|
||||
if (0 == mSignalBufferPosition)
|
||||
return;
|
||||
|
||||
// Zero-pad the end of the signal buffer so it's complete.
|
||||
memset(mSignalBuffer.get() + mSignalBufferPosition, 0,
|
||||
(mSignalBufferLength - mSignalBufferPosition) * sizeof(float));
|
||||
|
||||
// Force this last event to go now.
|
||||
nsCOMPtr<nsIRunnable> lastEvent =
|
||||
new nsAudioAvailableEventRunner(mDecoder, mSignalBuffer.forget(),
|
||||
mSignalBufferLength, aEndTime);
|
||||
NS_DispatchToMainThread(lastEvent, NS_DISPATCH_NORMAL);
|
||||
|
||||
mSignalBufferPosition = 0;
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla code.
|
||||
*
|
||||
* The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2010
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Humphrey <david.humphrey@senecac.on.ca>
|
||||
* Yury Delendik
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#ifndef nsAudioAvailableEventManager_h__
|
||||
#define nsAudioAvailableEventManager_h__
|
||||
|
||||
#include "nsCOMPtr.h"
|
||||
#include "nsIRunnable.h"
|
||||
#include "nsBuiltinDecoder.h"
|
||||
#include "nsBuiltinDecoderReader.h"
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
class nsAudioAvailableEventManager
|
||||
{
|
||||
public:
|
||||
nsAudioAvailableEventManager(nsBuiltinDecoder* aDecoder);
|
||||
|
||||
// Initialize the event manager with audio metadata. Called before
|
||||
// audio begins to get queued or events are dispatched.
|
||||
void Init(PRUint32 aChannels, PRUint32 aRate);
|
||||
|
||||
// Dispatch pending MozAudioAvailable events in the queue. Called
|
||||
// from the state machine thread.
|
||||
void DispatchPendingEvents(PRUint64 aCurrentTime);
|
||||
|
||||
// Queues audio sample data and re-packages it into equal sized
|
||||
// framebuffers. Called from the audio thread.
|
||||
void QueueWrittenAudioData(float* aAudioData, PRUint32 aAudioDataLength,
|
||||
PRUint64 aEndTimeSampleOffset);
|
||||
|
||||
// Clears the queue of any existing events. Called from both the state
|
||||
// machine and audio threads.
|
||||
void Clear();
|
||||
|
||||
// Fires one last event for any extra samples that didn't fit in a whole
|
||||
// framebuffer. This is meant to be called only once when the audio finishes.
|
||||
// Called from the state machine thread.
|
||||
void Drain(PRUint64 aTime);
|
||||
|
||||
private:
|
||||
// The decoder associated with the event manager. The event manager shares
|
||||
// the same lifetime as the decoder (the decoder holds a reference to the
|
||||
// manager).
|
||||
nsBuiltinDecoder* mDecoder;
|
||||
|
||||
// The number of samples per second.
|
||||
PRUint64 mSamplesPerSecond;
|
||||
|
||||
// A buffer for audio data to be dispatched in DOM events.
|
||||
nsAutoArrayPtr<float> mSignalBuffer;
|
||||
|
||||
// The current size of the signal buffer, may change due to DOM calls.
|
||||
PRUint32 mSignalBufferLength;
|
||||
|
||||
// The position of the first available item in mSignalBuffer
|
||||
PRUint32 mSignalBufferPosition;
|
||||
|
||||
// The MozAudioAvailable events to be dispatched. This queue is shared
|
||||
// between the state machine and audio threads.
|
||||
nsTArray< nsCOMPtr<nsIRunnable> > mPendingEvents;
|
||||
|
||||
// Monitor for shared access to mPendingEvents queue.
|
||||
Monitor mMonitor;
|
||||
};
|
||||
|
||||
#endif
|
@ -58,7 +58,6 @@ PRLogModuleInfo* gAudioStreamLog = nsnull;
|
||||
#endif
|
||||
|
||||
#define FAKE_BUFFER_SIZE 176400
|
||||
#define MILLISECONDS_PER_SECOND 1000
|
||||
|
||||
void nsAudioStream::InitLibrary()
|
||||
{
|
||||
@ -86,7 +85,7 @@ nsAudioStream::~nsAudioStream()
|
||||
Shutdown();
|
||||
}
|
||||
|
||||
nsresult nsAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat)
|
||||
void nsAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat)
|
||||
{
|
||||
mRate = aRate;
|
||||
mChannels = aNumChannels;
|
||||
@ -99,17 +98,15 @@ nsresult nsAudioStream::Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat a
|
||||
aNumChannels) != SA_SUCCESS) {
|
||||
mAudioHandle = nsnull;
|
||||
PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_create_pcm error"));
|
||||
return NS_ERROR_FAILURE;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (sa_stream_open(static_cast<sa_stream_t*>(mAudioHandle)) != SA_SUCCESS) {
|
||||
sa_stream_destroy(static_cast<sa_stream_t*>(mAudioHandle));
|
||||
mAudioHandle = nsnull;
|
||||
PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_open error"));
|
||||
return NS_ERROR_FAILURE;
|
||||
return;
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void nsAudioStream::Shutdown()
|
||||
@ -121,7 +118,7 @@ void nsAudioStream::Shutdown()
|
||||
mAudioHandle = nsnull;
|
||||
}
|
||||
|
||||
nsresult nsAudioStream::Write(const void* aBuf, PRUint32 aCount, PRBool aBlocking)
|
||||
void nsAudioStream::Write(const void* aBuf, PRUint32 aCount, PRBool aBlocking)
|
||||
{
|
||||
NS_ABORT_IF_FALSE(aCount % mChannels == 0,
|
||||
"Buffer size must be divisible by channel count");
|
||||
@ -131,7 +128,7 @@ nsresult nsAudioStream::Write(const void* aBuf, PRUint32 aCount, PRBool aBlockin
|
||||
PRUint32 count = aCount + offset;
|
||||
|
||||
if (!mAudioHandle)
|
||||
return NS_ERROR_FAILURE;
|
||||
return;
|
||||
|
||||
nsAutoArrayPtr<short> s_data(new short[count]);
|
||||
|
||||
@ -197,11 +194,8 @@ nsresult nsAudioStream::Write(const void* aBuf, PRUint32 aCount, PRBool aBlockin
|
||||
{
|
||||
PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsAudioStream: sa_stream_write error"));
|
||||
Shutdown();
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
PRUint32 nsAudioStream::Available()
|
||||
@ -268,16 +262,6 @@ void nsAudioStream::Resume()
|
||||
}
|
||||
|
||||
PRInt64 nsAudioStream::GetPosition()
|
||||
{
|
||||
PRInt64 sampleOffset = GetSampleOffset();
|
||||
if(sampleOffset >= 0) {
|
||||
return ((MILLISECONDS_PER_SECOND * sampleOffset) / mRate / mChannels);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
PRInt64 nsAudioStream::GetSampleOffset()
|
||||
{
|
||||
if (!mAudioHandle)
|
||||
return -1;
|
||||
@ -289,8 +273,9 @@ PRInt64 nsAudioStream::GetSampleOffset()
|
||||
PRInt64 position = 0;
|
||||
if (sa_stream_get_position(static_cast<sa_stream_t*>(mAudioHandle),
|
||||
positionType, &position) == SA_SUCCESS) {
|
||||
return position / sizeof(short);
|
||||
return ((1000 * position) / mRate / mChannels / sizeof(short));
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ class nsAudioStream
|
||||
// Initialize the audio stream. aNumChannels is the number of audio channels
|
||||
// (1 for mono, 2 for stereo, etc) and aRate is the frequency of the sound
|
||||
// samples (22050, 44100, etc).
|
||||
nsresult Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat);
|
||||
void Init(PRInt32 aNumChannels, PRInt32 aRate, SampleFormat aFormat);
|
||||
|
||||
// Closes the stream. All future use of the stream is an error.
|
||||
void Shutdown();
|
||||
@ -79,7 +79,7 @@ class nsAudioStream
|
||||
// When aBlocking is PR_TRUE, we'll block until the write has completed,
|
||||
// otherwise we'll buffer any data we can't write immediately, and write
|
||||
// it in a later call.
|
||||
nsresult Write(const void* aBuf, PRUint32 aCount, PRBool aBlocking);
|
||||
void Write(const void* aBuf, PRUint32 aCount, PRBool aBlocking);
|
||||
|
||||
// Return the number of sound samples that can be written to the audio device
|
||||
// without blocking.
|
||||
@ -102,10 +102,6 @@ class nsAudioStream
|
||||
// audio hardware.
|
||||
PRInt64 GetPosition();
|
||||
|
||||
// Return the position, measured in samples played since the start, by
|
||||
// the audio hardware.
|
||||
PRInt64 GetSampleOffset();
|
||||
|
||||
// Returns PR_TRUE when the audio stream is paused.
|
||||
PRBool IsPaused() { return mPaused; }
|
||||
|
||||
|
@ -293,32 +293,11 @@ already_AddRefed<nsIPrincipal> nsBuiltinDecoder::GetCurrentPrincipal()
|
||||
return mStream ? mStream->GetCurrentPrincipal() : nsnull;
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::AudioAvailable(float* aFrameBuffer,
|
||||
PRUint32 aFrameBufferLength,
|
||||
PRUint64 aTime)
|
||||
void nsBuiltinDecoder::MetadataLoaded()
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
if (mShuttingDown) {
|
||||
if (mShuttingDown)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!mElement->MayHaveAudioAvailableEventListener()) {
|
||||
return;
|
||||
}
|
||||
|
||||
mElement->NotifyAudioAvailable(aFrameBuffer, aFrameBufferLength, aTime);
|
||||
}
|
||||
|
||||
void nsBuiltinDecoder::MetadataLoaded(PRUint32 aChannels,
|
||||
PRUint32 aRate,
|
||||
PRUint32 aFrameBufferLength)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
|
||||
if (mShuttingDown) {
|
||||
return;
|
||||
}
|
||||
|
||||
mFrameBufferLength = aFrameBufferLength;
|
||||
|
||||
// Only inform the element of MetadataLoaded if not doing a load() in order
|
||||
// to fulfill a seek, otherwise we'll get multiple metadataloaded events.
|
||||
@ -336,7 +315,7 @@ void nsBuiltinDecoder::MetadataLoaded(PRUint32 aChannels,
|
||||
// Make sure the element and the frame (if any) are told about
|
||||
// our new size.
|
||||
Invalidate();
|
||||
mElement->MetadataLoaded(aChannels, aRate);
|
||||
mElement->MetadataLoaded();
|
||||
}
|
||||
|
||||
if (!mResourceLoaded) {
|
||||
|
@ -410,8 +410,6 @@ class nsBuiltinDecoder : public nsMediaDecoder
|
||||
// state machine.
|
||||
void Stop();
|
||||
|
||||
void AudioAvailable(float* aFrameBuffer, PRUint32 aFrameBufferLength, PRUint64 aTime);
|
||||
|
||||
// Called by the state machine to notify the decoder that the duration
|
||||
// has changed.
|
||||
void DurationChanged();
|
||||
@ -480,9 +478,7 @@ class nsBuiltinDecoder : public nsMediaDecoder
|
||||
|
||||
// Called when the metadata from the media file has been read.
|
||||
// Call on the main thread only.
|
||||
void MetadataLoaded(PRUint32 aChannels,
|
||||
PRUint32 aRate,
|
||||
PRUint32 aFrameBufferLength);
|
||||
void MetadataLoaded();
|
||||
|
||||
// Called when the first frame has been loaded.
|
||||
// Call on the main thread only.
|
||||
|
@ -102,31 +102,6 @@ static const PRUint32 LOW_VIDEO_FRAMES = 1;
|
||||
// Arbitrary "frame duration" when playing only audio.
|
||||
static const int AUDIO_DURATION_MS = 40;
|
||||
|
||||
class nsAudioMetadataEventRunner : public nsRunnable
|
||||
{
|
||||
private:
|
||||
nsCOMPtr<nsBuiltinDecoder> mDecoder;
|
||||
public:
|
||||
nsAudioMetadataEventRunner(nsBuiltinDecoder* aDecoder, PRUint32 aChannels,
|
||||
PRUint32 aRate, PRUint32 aFrameBufferLength) :
|
||||
mDecoder(aDecoder),
|
||||
mChannels(aChannels),
|
||||
mRate(aRate),
|
||||
mFrameBufferLength(aFrameBufferLength)
|
||||
{
|
||||
}
|
||||
|
||||
NS_IMETHOD Run()
|
||||
{
|
||||
mDecoder->MetadataLoaded(mChannels, mRate, mFrameBufferLength);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
const PRUint32 mChannels;
|
||||
const PRUint32 mRate;
|
||||
const PRUint32 mFrameBufferLength;
|
||||
};
|
||||
|
||||
nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDecoder,
|
||||
nsBuiltinDecoderReader* aReader) :
|
||||
mDecoder(aDecoder),
|
||||
@ -149,8 +124,7 @@ nsBuiltinDecoderStateMachine::nsBuiltinDecoderStateMachine(nsBuiltinDecoder* aDe
|
||||
mAudioCompleted(PR_FALSE),
|
||||
mBufferExhausted(PR_FALSE),
|
||||
mGotDurationFromHeader(PR_FALSE),
|
||||
mStopDecodeThreads(PR_TRUE),
|
||||
mEventManager(aDecoder)
|
||||
mStopDecodeThreads(PR_TRUE)
|
||||
{
|
||||
MOZ_COUNT_CTOR(nsBuiltinDecoderStateMachine);
|
||||
}
|
||||
@ -434,10 +408,9 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
||||
// hardware so that the next sound chunk begins playback at the correct
|
||||
// time.
|
||||
missingSamples = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingSamples);
|
||||
audioDuration += PlaySilence(static_cast<PRUint32>(missingSamples),
|
||||
channels, sampleTime);
|
||||
audioDuration += PlaySilence(static_cast<PRUint32>(missingSamples), channels);
|
||||
} else {
|
||||
audioDuration += PlayFromAudioQueue(sampleTime, channels);
|
||||
audioDuration += PlayFromAudioQueue();
|
||||
}
|
||||
{
|
||||
MonitorAutoEnter mon(mDecoder->GetMonitor());
|
||||
@ -477,8 +450,6 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
||||
MonitorAutoEnter audioMon(mAudioMonitor);
|
||||
if (mAudioStream) {
|
||||
mAudioStream->Drain();
|
||||
// Fire one last event for any extra samples that didn't fill a framebuffer.
|
||||
mEventManager.Drain(mAudioEndTime);
|
||||
}
|
||||
LOG(PR_LOG_DEBUG, ("%p Reached audio stream end.", mDecoder));
|
||||
}
|
||||
@ -493,10 +464,7 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
|
||||
LOG(PR_LOG_DEBUG, ("Audio stream finished playing, audio thread exit"));
|
||||
}
|
||||
|
||||
PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aSamples,
|
||||
PRUint32 aChannels,
|
||||
PRUint64 aSampleOffset)
|
||||
|
||||
PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aSamples, PRUint32 aChannels)
|
||||
{
|
||||
MonitorAutoEnter audioMon(mAudioMonitor);
|
||||
if (mAudioStream->IsPaused()) {
|
||||
@ -510,14 +478,10 @@ PRUint32 nsBuiltinDecoderStateMachine::PlaySilence(PRUint32 aSamples,
|
||||
nsAutoArrayPtr<float> buf(new float[numFloats]);
|
||||
memset(buf.get(), 0, sizeof(float) * numFloats);
|
||||
mAudioStream->Write(buf, numFloats, PR_TRUE);
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(buf.get(), numFloats,
|
||||
(aSampleOffset + samples) * aChannels);
|
||||
return samples;
|
||||
}
|
||||
|
||||
PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset,
|
||||
PRUint32 aChannels)
|
||||
PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue()
|
||||
{
|
||||
nsAutoPtr<SoundData> sound(mReader->mAudioQueue.PopFront());
|
||||
{
|
||||
@ -538,21 +502,15 @@ PRUint32 nsBuiltinDecoderStateMachine::PlayFromAudioQueue(PRUint64 aSampleOffset
|
||||
// monitor and acquired the audio monitor. Rather than acquire both
|
||||
// monitors, the audio stream also maintains whether its paused or not.
|
||||
// This prevents us from doing a blocking write while holding the audio
|
||||
// monitor while paused; we would block, and the state machine won't be
|
||||
// monitor while paused; we would block, and the state machine won't be
|
||||
// able to acquire the audio monitor in order to resume or destroy the
|
||||
// audio stream.
|
||||
if (!mAudioStream->IsPaused()) {
|
||||
mAudioStream->Write(sound->mAudioData,
|
||||
sound->AudioDataLength(),
|
||||
PR_TRUE);
|
||||
|
||||
offset = sound->mOffset;
|
||||
samples = sound->mSamples;
|
||||
|
||||
// Dispatch events to the DOM for the audio just written.
|
||||
mEventManager.QueueWrittenAudioData(sound->mAudioData.get(),
|
||||
sound->AudioDataLength(),
|
||||
(aSampleOffset + samples) * aChannels);
|
||||
} else {
|
||||
mReader->mAudioQueue.PushFront(sound);
|
||||
sound.forget();
|
||||
@ -594,7 +552,6 @@ void nsBuiltinDecoderStateMachine::StopPlayback(eStopMode aMode)
|
||||
} else if (aMode == AUDIO_SHUTDOWN) {
|
||||
mAudioStream->Shutdown();
|
||||
mAudioStream = nsnull;
|
||||
mEventManager.Clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -651,9 +608,6 @@ void nsBuiltinDecoderStateMachine::UpdatePlaybackPosition(PRInt64 aTime)
|
||||
NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::PlaybackPositionChanged);
|
||||
NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
|
||||
}
|
||||
|
||||
// Notify DOM of any queued up audioavailable events
|
||||
mEventManager.DispatchPendingEvents(mCurrentFrameTime + mStartTime);
|
||||
}
|
||||
|
||||
void nsBuiltinDecoderStateMachine::ClearPositionChangeFlag()
|
||||
@ -897,16 +851,10 @@ nsresult nsBuiltinDecoderStateMachine::Run()
|
||||
if (mState == DECODER_STATE_SHUTDOWN)
|
||||
continue;
|
||||
|
||||
// Inform the element that we've loaded the metadata and the first frame,
|
||||
// setting the default framebuffer size for audioavailable events. Also
|
||||
// let the MozAudioAvailable event manager know about the metadata.
|
||||
const nsVideoInfo& info = mReader->GetInfo();
|
||||
PRUint32 frameBufferLength = info.mAudioChannels * FRAMEBUFFER_LENGTH_PER_CHANNEL;
|
||||
mEventManager.Init(info.mAudioChannels, info.mAudioRate);
|
||||
mDecoder->RequestFrameBufferLength(frameBufferLength);
|
||||
// Inform the element that we've loaded the metadata and the
|
||||
// first frame.
|
||||
nsCOMPtr<nsIRunnable> metadataLoadedEvent =
|
||||
new nsAudioMetadataEventRunner(mDecoder, info.mAudioChannels,
|
||||
info.mAudioRate, frameBufferLength);
|
||||
NS_NewRunnableMethod(mDecoder, &nsBuiltinDecoder::MetadataLoaded);
|
||||
NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
|
||||
|
||||
if (mState == DECODER_STATE_DECODING_METADATA) {
|
||||
|
@ -116,7 +116,6 @@ not yet time to display the next frame.
|
||||
#include "nsThreadUtils.h"
|
||||
#include "nsBuiltinDecoder.h"
|
||||
#include "nsBuiltinDecoderReader.h"
|
||||
#include "nsAudioAvailableEventManager.h"
|
||||
#include "nsHTMLMediaElement.h"
|
||||
#include "mozilla/Monitor.h"
|
||||
|
||||
@ -294,13 +293,11 @@ protected:
|
||||
// hardware. This ensures that the playback position advances smoothly, and
|
||||
// guarantees that we don't try to allocate an impossibly large chunk of
|
||||
// memory in order to play back silence. Called on the audio thread.
|
||||
PRUint32 PlaySilence(PRUint32 aSamples, PRUint32 aChannels,
|
||||
PRUint64 aSampleOffset);
|
||||
PRUint32 PlaySilence(PRUint32 aSamples, PRUint32 aChannels);
|
||||
|
||||
// Pops an audio chunk from the front of the audio queue, and pushes its
|
||||
// sound data to the audio hardware. MozAudioAvailable sample data is also
|
||||
// queued here. Called on the audio thread.
|
||||
PRUint32 PlayFromAudioQueue(PRUint64 aSampleOffset, PRUint32 aChannels);
|
||||
// sound data to the audio hardware. Called on the audio thread.
|
||||
PRUint32 PlayFromAudioQueue();
|
||||
|
||||
// Stops the decode threads. The decoder monitor must be held with exactly
|
||||
// one lock count. Called on the state machine thread.
|
||||
@ -457,12 +454,7 @@ protected:
|
||||
// PR_FALSE while decode threads should be running. Accessed on audio,
|
||||
// state machine and decode threads. Syncrhonised by decoder monitor.
|
||||
PRPackedBool mStopDecodeThreads;
|
||||
|
||||
private:
|
||||
// Manager for queuing and dispatching MozAudioAvailable events. The
|
||||
// event manager is accessed from the state machine and audio threads,
|
||||
// and takes care of synchronizing access to its internal queue.
|
||||
nsAudioAvailableEventManager mEventManager;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -76,7 +76,6 @@ nsMediaDecoder::nsMediaDecoder() :
|
||||
mDataTime(),
|
||||
mVideoUpdateLock(nsnull),
|
||||
mPixelAspectRatio(1.0),
|
||||
mFrameBufferLength(0),
|
||||
mPinnedForSeek(PR_FALSE),
|
||||
mSizeChanged(PR_FALSE),
|
||||
mShuttingDown(PR_FALSE)
|
||||
@ -112,19 +111,6 @@ nsHTMLMediaElement* nsMediaDecoder::GetMediaElement()
|
||||
return mElement;
|
||||
}
|
||||
|
||||
nsresult nsMediaDecoder::RequestFrameBufferLength(PRUint32 aLength)
|
||||
{
|
||||
// Must be a power of 2 between 512 and 32768
|
||||
if (aLength < FRAMEBUFFER_LENGTH_MIN || aLength > FRAMEBUFFER_LENGTH_MAX ||
|
||||
(aLength & (aLength - 1)) > 0) {
|
||||
return NS_ERROR_DOM_INDEX_SIZE_ERR;
|
||||
}
|
||||
|
||||
mFrameBufferLength = aLength;
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
||||
static PRInt32 ConditionDimension(float aValue, PRInt32 aDefault)
|
||||
{
|
||||
// This will exclude NaNs and infinities
|
||||
|
@ -53,16 +53,6 @@ class nsMediaStream;
|
||||
class nsIStreamListener;
|
||||
class nsHTMLTimeRanges;
|
||||
|
||||
// The size to use for audio data frames in audioavailable events.
|
||||
// This value is per channel, and is chosen to give ~43 fps of events,
|
||||
// for example, 44100 with 2 channels, 2*1024 = 2048.
|
||||
#define FRAMEBUFFER_LENGTH_PER_CHANNEL 1024
|
||||
|
||||
// The total size of the framebuffer used for audioavailable events
|
||||
// has to be a power of 2, and must fit in the following range.
|
||||
#define FRAMEBUFFER_LENGTH_MIN 512
|
||||
#define FRAMEBUFFER_LENGTH_MAX 32768
|
||||
|
||||
// All methods of nsMediaDecoder must be called from the main thread only
|
||||
// with the exception of GetImageContainer, SetVideoData and GetStatistics,
|
||||
// which can be called from any thread.
|
||||
@ -229,13 +219,6 @@ public:
|
||||
// if it's available.
|
||||
nsHTMLMediaElement* GetMediaElement();
|
||||
|
||||
// Returns the current size of the framebuffer used in audioavailable events.
|
||||
PRUint32 GetFrameBufferLength() { return mFrameBufferLength; };
|
||||
|
||||
// Sets the length of the framebuffer used in audioavailable events. The
|
||||
// new size must be a power of 2 between 512 and 32768.
|
||||
nsresult RequestFrameBufferLength(PRUint32 aLength);
|
||||
|
||||
// Moves any existing channel loads into the background, so that they don't
|
||||
// block the load event. This is called when we stop delaying the load
|
||||
// event. Any new loads initiated (for example to seek) will also be in the
|
||||
@ -315,9 +298,6 @@ protected:
|
||||
// Pixel aspect ratio (ratio of the pixel width to pixel height)
|
||||
float mPixelAspectRatio;
|
||||
|
||||
// The framebuffer size to use for audioavailable events.
|
||||
PRUint32 mFrameBufferLength;
|
||||
|
||||
// PR_TRUE when our media stream has been pinned. We pin the stream
|
||||
// while seeking.
|
||||
PRPackedBool mPinnedForSeek;
|
||||
|
@ -139,10 +139,6 @@ _TEST_FILES = \
|
||||
test_volume.html \
|
||||
test_video_to_canvas.html \
|
||||
use_large_cache.js \
|
||||
test_a4_tone.html \
|
||||
test_audiowrite.html \
|
||||
file_audio_event_adopt_iframe.html \
|
||||
test_audio_event_adopt.html \
|
||||
$(NULL)
|
||||
|
||||
# These tests are disabled until we figure out random failures.
|
||||
@ -208,7 +204,6 @@ _TEST_FILES += \
|
||||
small-shot.ogg \
|
||||
sound.ogg \
|
||||
video-overhang.ogg \
|
||||
file_a4_tone.ogg \
|
||||
$(NULL)
|
||||
|
||||
# Wave sample files
|
||||
|
Binary file not shown.
@ -1,16 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<script>
|
||||
function audioAvailable(e) {
|
||||
document.getElementById("wasAudioAvailableCalled").checked = true;
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<audio id="a1" src="sound.ogg" controls></audio>
|
||||
<script>
|
||||
document.getElementById("a1").addEventListener("MozAudioAvailable", audioAvailable, false);
|
||||
</script>
|
||||
<input id="wasAudioAvailableCalled" type="checkbox" readonly />
|
||||
</body>
|
||||
</html>
|
@ -1,262 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=490705
|
||||
-->
|
||||
|
||||
<head>
|
||||
<title>Media test: simple audioAvalailable event checks</title>
|
||||
<script type="text/javascript" src="/MochiKit/packed.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=490705">Mozilla Bug 490705</a>
|
||||
|
||||
<!-- mute audio, since there is no need to hear the sound for these tests -->
|
||||
<audio id='a1' onerror="event.stopPropagation();" controls></audio>
|
||||
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
|
||||
/**
|
||||
* FFT is a class for calculating the Discrete Fourier Transform of a signal
|
||||
* with the Fast Fourier Transform algorithm.
|
||||
*
|
||||
* Source: github.com/corbanbrook/dsp.js; License: MIT; Copyright: Corban Brook
|
||||
*
|
||||
* @param {Number} bufferSize The size of the sample buffer to be computed. Must be power of 2
|
||||
* @param {Number} sampleRate The sampleRate of the buffer (eg. 44100)
|
||||
*
|
||||
* @constructor
|
||||
*/
|
||||
FFT = function(bufferSize, sampleRate) {
|
||||
this.bufferSize = bufferSize;
|
||||
this.sampleRate = sampleRate;
|
||||
this.spectrum = new Float32Array(bufferSize/2);
|
||||
this.real = new Float32Array(bufferSize);
|
||||
this.imag = new Float32Array(bufferSize);
|
||||
|
||||
this.reverseTable = new Uint32Array(bufferSize);
|
||||
|
||||
var limit = 1;
|
||||
var bit = bufferSize >> 1;
|
||||
|
||||
while ( limit < bufferSize ) {
|
||||
for ( var i = 0; i < limit; i++ ) {
|
||||
this.reverseTable[i + limit] = this.reverseTable[i] + bit;
|
||||
}
|
||||
|
||||
limit = limit << 1;
|
||||
bit = bit >> 1;
|
||||
}
|
||||
|
||||
this.sinTable = new Float32Array(bufferSize);
|
||||
this.cosTable = new Float32Array(bufferSize);
|
||||
|
||||
for ( var i = 0; i < bufferSize; i++ ) {
|
||||
this.sinTable[i] = Math.sin(-Math.PI/i);
|
||||
this.cosTable[i] = Math.cos(-Math.PI/i);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Performs a forward tranform on the sample buffer.
|
||||
* Converts a time domain signal to frequency domain spectra.
|
||||
*
|
||||
* @param {Array} buffer The sample buffer. Buffer Length must be power of 2
|
||||
*
|
||||
* @returns The frequency spectrum array
|
||||
*/
|
||||
FFT.prototype.forward = function(buffer) {
|
||||
// Locally scope variables for speed up
|
||||
var bufferSize = this.bufferSize,
|
||||
cosTable = this.cosTable,
|
||||
sinTable = this.sinTable,
|
||||
reverseTable = this.reverseTable,
|
||||
real = this.real,
|
||||
imag = this.imag,
|
||||
spectrum = this.spectrum;
|
||||
|
||||
var k = Math.floor(Math.log(bufferSize) / Math.LN2);
|
||||
if ( Math.pow(2, k) !== bufferSize ) {
|
||||
throw "Invalid buffer size, must be a power of 2.";
|
||||
}
|
||||
if ( bufferSize !== buffer.length ) {
|
||||
throw "Supplied buffer is not the same size as defined FFT. FFT Size: " + bufferSize + " Buffer Size: " + buffer.length;
|
||||
}
|
||||
|
||||
for ( var i = 0; i < bufferSize; i++ ) {
|
||||
real[i] = buffer[reverseTable[i]];
|
||||
imag[i] = 0;
|
||||
}
|
||||
|
||||
var halfSize = 1,
|
||||
phaseShiftStepReal,
|
||||
phaseShiftStepImag,
|
||||
currentPhaseShiftReal,
|
||||
currentPhaseShiftImag,
|
||||
off,
|
||||
tr,
|
||||
ti,
|
||||
tmpReal,
|
||||
i;
|
||||
|
||||
while ( halfSize < bufferSize ) {
|
||||
phaseShiftStepReal = cosTable[halfSize];
|
||||
phaseShiftStepImag = sinTable[halfSize];
|
||||
currentPhaseShiftReal = 1;
|
||||
currentPhaseShiftImag = 0;
|
||||
|
||||
for ( var fftStep = 0; fftStep < halfSize; fftStep++ ) {
|
||||
i = fftStep;
|
||||
|
||||
while ( i < bufferSize ) {
|
||||
off = i + halfSize;
|
||||
tr = (currentPhaseShiftReal * real[off]) - (currentPhaseShiftImag * imag[off]);
|
||||
ti = (currentPhaseShiftReal * imag[off]) + (currentPhaseShiftImag * real[off]);
|
||||
|
||||
real[off] = real[i] - tr;
|
||||
imag[off] = imag[i] - ti;
|
||||
real[i] += tr;
|
||||
imag[i] += ti;
|
||||
|
||||
i += halfSize << 1;
|
||||
}
|
||||
|
||||
tmpReal = currentPhaseShiftReal;
|
||||
currentPhaseShiftReal = (tmpReal * phaseShiftStepReal) - (currentPhaseShiftImag * phaseShiftStepImag);
|
||||
currentPhaseShiftImag = (tmpReal * phaseShiftStepImag) + (currentPhaseShiftImag * phaseShiftStepReal);
|
||||
}
|
||||
|
||||
halfSize = halfSize << 1;
|
||||
}
|
||||
|
||||
i = bufferSize/2;
|
||||
while(i--) {
|
||||
spectrum[i] = 2 * Math.sqrt(real[i] * real[i] + imag[i] * imag[i]) / bufferSize;
|
||||
}
|
||||
|
||||
return spectrum;
|
||||
};
|
||||
/* end of FFT */
|
||||
|
||||
|
||||
var testFile = "file_a4_tone.ogg";
|
||||
var testFileDuration = 3.0;
|
||||
var testFileChannelCount = 1;
|
||||
var testFileSampleRate = 44100;
|
||||
var testFileFrameBufferLength = 1024;
|
||||
var signal = [{start:1.1, end: 1.9, fftBin: 10 } ];
|
||||
var noSignal = [{start:0.1, end: 0.9 }, {start:2.1, end: 2.9 } ];
|
||||
|
||||
var undef;
|
||||
var fft, fftBufferSize;
|
||||
var currentSampleOffset = 0;
|
||||
var spectrumMaxs = [];
|
||||
var isTimePropertyValid = true;
|
||||
|
||||
function audioAvailable(event) {
|
||||
var buffer = event.frameBuffer;
|
||||
|
||||
if(fft === undef) {
|
||||
fftBufferSize = buffer.length;
|
||||
fft = new FFT(fftBufferSize, testFileSampleRate);
|
||||
}
|
||||
|
||||
fft.forward(buffer);
|
||||
|
||||
var spectrum = fft.spectrum;
|
||||
// Finding pick frequency
|
||||
var maxIndex = 0, maxValue = spectrum[0];
|
||||
for(var i=0;i<spectrum.length;i++) {
|
||||
if(maxValue < spectrum[i]) {
|
||||
maxValue = spectrum[maxIndex = i];
|
||||
}
|
||||
}
|
||||
|
||||
spectrumMaxs.push({ value: maxValue, index: maxIndex, time: (currentSampleOffset / testFileSampleRate) });
|
||||
|
||||
if( (typeof event.time !== "number") ||
|
||||
(Math.abs(event.time - currentSampleOffset / testFileSampleRate) > 0.01) ) {
|
||||
isTimePropertyValid = false;
|
||||
}
|
||||
|
||||
currentSampleOffset += buffer.length;
|
||||
}
|
||||
|
||||
var loadedMetadataCalled = false;
|
||||
function loadedMetadata() {
|
||||
loadedMetadataCalled = true;
|
||||
var a1 = document.getElementById('a1');
|
||||
is(a1.mozChannels, testFileChannelCount, "mozChannels should be " + testFileChannelCount + ".");
|
||||
is(a1.mozSampleRate, testFileSampleRate, "mozSampleRate should be " + testFileSampleRate + ".");
|
||||
is(a1.mozFrameBufferLength, testFileFrameBufferLength, "mozFrameBufferLength should be " + testFileFrameBufferLength + ".");
|
||||
}
|
||||
|
||||
function checkResults() {
|
||||
ok(loadedMetadataCalled, "loadedmetadata event not dispatched.");
|
||||
ok(isTimePropertyValid, "The audioAvailable event's time attribute was invalid.");
|
||||
|
||||
var expectedOffset = Math.ceil(testFileDuration * testFileSampleRate);
|
||||
if(expectedOffset % fftBufferSize !== 0) { expectedOffset += (fftBufferSize - (expectedOffset % fftBufferSize)); }
|
||||
is(currentSampleOffset, expectedOffset, "Check amount of signal data processed");
|
||||
|
||||
var i, j;
|
||||
var signalPresent = true;
|
||||
for(i=0;i<signal.length;++i) {
|
||||
var signalAnalysed = false;
|
||||
for(j=0;j<spectrumMaxs.length;++j) {
|
||||
if(signal[i].start <= spectrumMaxs[j].time && spectrumMaxs[j].time < signal[i].end) {
|
||||
signalAnalysed = true;
|
||||
signalPresent = spectrumMaxs[j].index == signal[i].fftBin;
|
||||
}
|
||||
if(!signalPresent) break;
|
||||
}
|
||||
if(!signalAnalysed) signalPresent = false;;
|
||||
if(!signalPresent) break;
|
||||
}
|
||||
is(signalPresent, true, "Check signal present");
|
||||
|
||||
var noSignalPresent = true;
|
||||
for(i=0;i<noSignal.length;++i) {
|
||||
var signalAnalysed = false;
|
||||
for(j=0;j<spectrumMaxs.length;++j) {
|
||||
if(noSignal[i].start <= spectrumMaxs[j].time && spectrumMaxs[j].time < noSignal[i].end) {
|
||||
signalAnalysed = true;
|
||||
noSignalPresent = spectrumMaxs[j].index == 0;
|
||||
}
|
||||
if(!noSignalPresent) break;
|
||||
}
|
||||
if(!signalAnalysed) noSignalPresent = false;;
|
||||
if(!noSignalPresent) break;
|
||||
}
|
||||
is(signalPresent, true, "Check mute fragments present");
|
||||
|
||||
SimpleTest.finish();
|
||||
}
|
||||
|
||||
function audioEnded() {
|
||||
checkResults();
|
||||
}
|
||||
|
||||
function initTest() {
|
||||
var a1 = document.getElementById('a1');
|
||||
a1.addEventListener("ended", audioEnded, false);
|
||||
a1.addEventListener("loadedmetadata", loadedMetadata, false);
|
||||
a1.addEventListener("MozAudioAvailable", audioAvailable, false);
|
||||
a1.src = testFile;
|
||||
a1.muted = true;
|
||||
a1.play();
|
||||
}
|
||||
|
||||
window.addEventListener("load", function(e) {
|
||||
initTest();
|
||||
}, false);
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
@ -1,41 +0,0 @@
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=490705
|
||||
-->
|
||||
<head>
|
||||
<title>Media test: addEventListener optimization and adoptNode</title>
|
||||
<script type="text/javascript" src="/MochiKit/packed.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
|
||||
<script>
|
||||
function adopt() {
|
||||
var a1Node = document.getElementById("f1").contentDocument.getElementById("a1");
|
||||
var adopted = document.adoptNode(a1Node);
|
||||
document.body.appendChild(adopted);
|
||||
return adopted;
|
||||
}
|
||||
function wasAudioAvailableCalled() {
|
||||
var resultNode = document.getElementById("f1").contentDocument.getElementById("wasAudioAvailableCalled");
|
||||
return document.adoptNode(resultNode).checked;
|
||||
}
|
||||
function endTest() {
|
||||
is(wasAudioAvailableCalled(), true, "audioAvailable was not called");
|
||||
|
||||
SimpleTest.finish();
|
||||
}
|
||||
function startTest() {
|
||||
var audio = adopt();
|
||||
audio.addEventListener("ended", endTest, false);
|
||||
audio.play();
|
||||
}
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
</script>
|
||||
|
||||
</head>
|
||||
<body>
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=490705">Mozilla Bug 490705</a>
|
||||
<iframe id="f1" src="file_audio_event_adopt_iframe.html" onload="startTest()"></iframe>
|
||||
</body>
|
||||
</html>
|
@ -1,70 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<!--
|
||||
https://bugzilla.mozilla.org/show_bug.cgi?id=490705
|
||||
-->
|
||||
|
||||
<head>
|
||||
<title>Media test: simple audio write checks</title>
|
||||
<script type="text/javascript" src="/MochiKit/packed.js"></script>
|
||||
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
|
||||
</head>
|
||||
<body>
|
||||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=490705">Mozilla Bug 490705</a>
|
||||
|
||||
<pre id="test">
|
||||
<script class="testbody" type="text/javascript">
|
||||
|
||||
var channels = 2;
|
||||
var rate = 44100;
|
||||
|
||||
function runTests() {
|
||||
var a1 = new Audio();
|
||||
a1.mozSetup(channels, rate);
|
||||
|
||||
is(a1.mozChannels, channels, "mozChannels should be " + channels + ".");
|
||||
is(a1.mozSampleRate, rate, "mozSampleRate should be " + rate + ".");
|
||||
is(a1.volume, 1.0, "volume should be 1.0 by default.");
|
||||
|
||||
// Make sure changing volume on audio changes write audio stream.
|
||||
a1.volume = 0.5;
|
||||
is(a1.volume, 0.5, "volume should have been changed to 0.5.");
|
||||
a1.muted = true;
|
||||
ok(a1.muted, "volume should be muted.");
|
||||
|
||||
is(a1.mozCurrentSampleOffset(), 0, "mozCurrentSampleOffset() not working.");
|
||||
|
||||
// Test writing with js array
|
||||
var samples1 = [.5, .5];
|
||||
var written = sampleOffset = a1.mozWriteAudio(samples1);
|
||||
is(written, samples1.length, "Not all samples in JS Array written.");
|
||||
|
||||
// Test writing with Float32Array
|
||||
var samples2 = Float32Array([.2, .3, .2, .3]);
|
||||
written = a1.mozWriteAudio(samples2);
|
||||
is(written, samples2.length, "Not all samples in Float32Array written.");
|
||||
|
||||
// Test passing the wrong arguments to mozWriteAudio.
|
||||
var writeArgsOK = false;
|
||||
try {
|
||||
// incorrect, should throw
|
||||
written = a1.mozWriteAudio(samples2.length, samples2);
|
||||
} catch(e) {
|
||||
writeArgsOK = true;
|
||||
}
|
||||
ok(writeArgsOK, "mozWriteAudio args test failed.");
|
||||
|
||||
SimpleTest.finish();
|
||||
}
|
||||
|
||||
window.addEventListener("load", function(e) {
|
||||
runTests();
|
||||
}, false);
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
@ -147,15 +147,6 @@ public:
|
||||
// before metadata validation has completed. Threadsafe.
|
||||
float GetDuration();
|
||||
|
||||
// Returns the number of channels extracted from the metadata. Returns 0
|
||||
// if called before metadata validation has completed. Threadsafe.
|
||||
PRUint32 GetChannels();
|
||||
|
||||
// Returns the audio sample rate (number of samples per second) extracted
|
||||
// from the metadata. Returns 0 if called before metadata validation has
|
||||
// completed. Threadsafe.
|
||||
PRUint32 GetSampleRate();
|
||||
|
||||
// Returns true if the state machine is seeking. Threadsafe.
|
||||
PRBool IsSeeking();
|
||||
|
||||
@ -480,26 +471,6 @@ nsWaveStateMachine::GetDuration()
|
||||
return std::numeric_limits<float>::quiet_NaN();
|
||||
}
|
||||
|
||||
PRUint32
|
||||
nsWaveStateMachine::GetChannels()
|
||||
{
|
||||
nsAutoMonitor monitor(mMonitor);
|
||||
if (mMetadataValid) {
|
||||
return mChannels;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
PRUint32
|
||||
nsWaveStateMachine::GetSampleRate()
|
||||
{
|
||||
nsAutoMonitor monitor(mMonitor);
|
||||
if (mMetadataValid) {
|
||||
return mSampleRate;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
PRBool
|
||||
nsWaveStateMachine::IsSeeking()
|
||||
{
|
||||
@ -1401,8 +1372,7 @@ nsWaveDecoder::MetadataLoaded()
|
||||
}
|
||||
|
||||
if (mElement) {
|
||||
mElement->MetadataLoaded(mPlaybackStateMachine->GetChannels(),
|
||||
mPlaybackStateMachine->GetSampleRate());
|
||||
mElement->MetadataLoaded();
|
||||
mElement->FirstFrameLoaded(mResourceLoaded);
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,6 @@
|
||||
#include "nsIDOMMessageEvent.h"
|
||||
#include "nsPaintRequest.h"
|
||||
#include "nsIDOMNotifyPaintEvent.h"
|
||||
#include "nsIDOMNotifyAudioAvailableEvent.h"
|
||||
#include "nsIDOMScrollAreaEvent.h"
|
||||
#include "nsIDOMTransitionEvent.h"
|
||||
#include "nsIDOMNSDocumentStyle.h"
|
||||
@ -1381,9 +1380,6 @@ static nsDOMClassInfoData sClassInfoData[] = {
|
||||
NS_DEFINE_CLASSINFO_DATA(NotifyPaintEvent, nsDOMGenericSH,
|
||||
DOM_DEFAULT_SCRIPTABLE_FLAGS)
|
||||
|
||||
NS_DEFINE_CLASSINFO_DATA(NotifyAudioAvailableEvent, nsDOMGenericSH,
|
||||
DOM_DEFAULT_SCRIPTABLE_FLAGS)
|
||||
|
||||
NS_DEFINE_CLASSINFO_DATA(SimpleGestureEvent, nsDOMGenericSH,
|
||||
DOM_DEFAULT_SCRIPTABLE_FLAGS)
|
||||
|
||||
@ -3922,11 +3918,6 @@ nsDOMClassInfo::Init()
|
||||
DOM_CLASSINFO_EVENT_MAP_ENTRIES
|
||||
DOM_CLASSINFO_MAP_END
|
||||
|
||||
DOM_CLASSINFO_MAP_BEGIN(NotifyAudioAvailableEvent, nsIDOMNotifyAudioAvailableEvent)
|
||||
DOM_CLASSINFO_MAP_ENTRY(nsIDOMNotifyAudioAvailableEvent)
|
||||
DOM_CLASSINFO_EVENT_MAP_ENTRIES
|
||||
DOM_CLASSINFO_MAP_END
|
||||
|
||||
DOM_CLASSINFO_MAP_BEGIN(SimpleGestureEvent, nsIDOMSimpleGestureEvent)
|
||||
DOM_CLASSINFO_MAP_ENTRY(nsIDOMSimpleGestureEvent)
|
||||
DOM_CLASSINFO_MAP_ENTRY(nsIDOMMouseEvent)
|
||||
|
@ -447,8 +447,6 @@ DOMCI_CLASS(DataTransfer)
|
||||
|
||||
DOMCI_CLASS(NotifyPaintEvent)
|
||||
|
||||
DOMCI_CLASS(NotifyAudioAvailableEvent)
|
||||
|
||||
DOMCI_CLASS(SimpleGestureEvent)
|
||||
|
||||
DOMCI_CLASS(MozTouchEvent)
|
||||
|
@ -646,8 +646,8 @@ nsPIDOMWindow::nsPIDOMWindow(nsPIDOMWindow *aOuterWindow)
|
||||
mRunningTimeout(nsnull), mMutationBits(0), mIsDocumentLoaded(PR_FALSE),
|
||||
mIsHandlingResizeEvent(PR_FALSE), mIsInnerWindow(aOuterWindow != nsnull),
|
||||
mMayHavePaintEventListener(PR_FALSE), mMayHaveTouchEventListener(PR_FALSE),
|
||||
mMayHaveAudioAvailableEventListener(PR_FALSE), mIsModalContentWindow(PR_FALSE),
|
||||
mIsActive(PR_FALSE), mInnerWindow(nsnull), mOuterWindow(aOuterWindow) {}
|
||||
mIsModalContentWindow(PR_FALSE), mIsActive(PR_FALSE),
|
||||
mInnerWindow(nsnull), mOuterWindow(aOuterWindow) {}
|
||||
|
||||
nsPIDOMWindow::~nsPIDOMWindow() {}
|
||||
|
||||
|
@ -430,24 +430,6 @@ public:
|
||||
MaybeUpdateTouchState();
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this to check whether some node (this window, its document,
|
||||
* or content in that document) has a MozAudioAvailable event listener.
|
||||
*/
|
||||
PRBool HasAudioAvailableEventListeners()
|
||||
{
|
||||
return mMayHaveAudioAvailableEventListener;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call this to indicate that some node (this window, its document,
|
||||
* or content in that document) has a MozAudioAvailable event listener.
|
||||
*/
|
||||
void SetHasAudioAvailableEventListeners()
|
||||
{
|
||||
mMayHaveAudioAvailableEventListener = PR_TRUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize window.java and window.Packages.
|
||||
*/
|
||||
@ -587,7 +569,6 @@ protected:
|
||||
PRPackedBool mIsInnerWindow;
|
||||
PRPackedBool mMayHavePaintEventListener;
|
||||
PRPackedBool mMayHaveTouchEventListener;
|
||||
PRPackedBool mMayHaveAudioAvailableEventListener;
|
||||
|
||||
// This variable is used on both inner and outer windows (and they
|
||||
// should match).
|
||||
|
@ -76,7 +76,6 @@ XPIDLSRCS = \
|
||||
nsIDOMCommandEvent.idl \
|
||||
nsIDOMMessageEvent.idl \
|
||||
nsIDOMNotifyPaintEvent.idl \
|
||||
nsIDOMNotifyAudioAvailableEvent.idl \
|
||||
nsIDOMPaintRequest.idl \
|
||||
nsIDOMPaintRequestList.idl \
|
||||
nsIDOMSimpleGestureEvent.idl \
|
||||
|
@ -1,62 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla code.
|
||||
*
|
||||
* The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2010
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Humphrey <david.humphrey@senecac.on.ca>
|
||||
* Yury Delendik
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nsIDOMEvent.idl"
|
||||
#include "nsIVariant.idl"
|
||||
|
||||
%{ C++
|
||||
#include "jspubtd.h"
|
||||
%}
|
||||
|
||||
[scriptable, uuid(6250652d-7a6a-49a4-a2ee-9114e1e83427)]
|
||||
interface nsIDOMNotifyAudioAvailableEvent : nsIDOMEvent
|
||||
{
|
||||
[implicit_jscontext]
|
||||
readonly attribute jsval frameBuffer;
|
||||
|
||||
readonly attribute float time;
|
||||
|
||||
void initAudioAvailableEvent(in DOMString typeArg,
|
||||
in boolean canBubbleArg,
|
||||
in boolean cancelableArg,
|
||||
[array, size_is(frameBufferLength)] in float frameBufferPtr,
|
||||
in unsigned long frameBufferLength,
|
||||
in float time,
|
||||
in boolean allowAudioData);
|
||||
};
|
@ -38,10 +38,6 @@
|
||||
|
||||
#include "nsIDOMHTMLMediaElement.idl"
|
||||
|
||||
%{C++
|
||||
#include "jsapi.h"
|
||||
%}
|
||||
|
||||
/**
|
||||
* The nsIDOMHTMLAudioElement interface is the interface to a HTML
|
||||
* <audio> element.
|
||||
@ -52,17 +48,8 @@
|
||||
* @status UNDER_DEVELOPMENT
|
||||
*/
|
||||
|
||||
[scriptable, uuid(cd1a6a6b-bc4c-4e5a-b7da-53dccc878ab8)]
|
||||
[scriptable, uuid(5ecd8913-a738-41be-8597-7f3a4ffba017)]
|
||||
interface nsIDOMHTMLAudioElement : nsIDOMHTMLMediaElement
|
||||
{
|
||||
// Setup the audio stream for writing
|
||||
void mozSetup(in PRUint32 channels, in PRUint32 rate);
|
||||
|
||||
// Write audio to the audio stream
|
||||
[implicit_jscontext]
|
||||
unsigned long mozWriteAudio(in jsval data);
|
||||
|
||||
// Get the current offset (measured in samples since the start) of the audio
|
||||
// stream created using mozWriteAudio().
|
||||
unsigned long long mozCurrentSampleOffset();
|
||||
};
|
||||
|
||||
|
@ -57,7 +57,7 @@
|
||||
#endif
|
||||
%}
|
||||
|
||||
[scriptable, uuid(f748b7db-4ab9-4370-835d-59f30c8de57c)]
|
||||
[scriptable, uuid(b6c9f51d-237c-44d1-842d-996f4d62c843)]
|
||||
interface nsIDOMHTMLMediaElement : nsIDOMHTMLElement
|
||||
{
|
||||
// error state
|
||||
@ -101,16 +101,6 @@ interface nsIDOMHTMLMediaElement : nsIDOMHTMLElement
|
||||
attribute float volume;
|
||||
attribute boolean muted;
|
||||
|
||||
// Mozilla extension: extra stream metadata information, used as part
|
||||
// of audioavailable events and the mozWriteAudio() method. The
|
||||
// mozFrameBufferLength method allows for the size of the framebuffer
|
||||
// used within audioavailable events to be changed. The new size must
|
||||
// be a power of 2 between 512 and 32768. The default size, for a
|
||||
// media element with audio, is (mozChannels * 1024).
|
||||
readonly attribute unsigned long mozChannels;
|
||||
readonly attribute unsigned long mozSampleRate;
|
||||
attribute unsigned long mozFrameBufferLength;
|
||||
|
||||
// Mozilla extension: load data from another media element. This is like
|
||||
// load() but we don't run the resource selection algorithm; instead
|
||||
// we just set our source to other's currentSrc. This is optimized
|
||||
|
@ -451,11 +451,6 @@ members = [
|
||||
'-nsICanvasRenderingContextWebGL.getVertexAttrib',
|
||||
'-nsICanvasRenderingContextWebGL.getShaderParameter',
|
||||
|
||||
# Audio
|
||||
'nsIDOMNotifyAudioAvailableEvent.frameBuffer',
|
||||
'nsIDOMNotifyAudioAvailableEvent.time',
|
||||
'nsIDOMHTMLAudioElement.mozWriteAudio',
|
||||
|
||||
# dom/indexedDB
|
||||
'nsIIDBCursor.*',
|
||||
'nsIIDBDatabase.*',
|
||||
|
@ -417,7 +417,6 @@ class nsHashKey;
|
||||
#define NS_RATECHANGE (NS_MEDIA_EVENT_START+17)
|
||||
#define NS_DURATIONCHANGE (NS_MEDIA_EVENT_START+18)
|
||||
#define NS_VOLUMECHANGE (NS_MEDIA_EVENT_START+19)
|
||||
#define NS_MOZAUDIOAVAILABLE (NS_MEDIA_EVENT_START+20)
|
||||
#endif // MOZ_MEDIA
|
||||
|
||||
// paint notification events
|
||||
|
@ -597,18 +597,6 @@ public: \
|
||||
}; \
|
||||
NS_CYCLE_COLLECTION_PARTICIPANT_INSTANCE
|
||||
|
||||
#define NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS_INHERITED(_class, \
|
||||
_base_class) \
|
||||
class NS_CYCLE_COLLECTION_INNERCLASS \
|
||||
: public NS_CYCLE_COLLECTION_CLASSNAME(_base_class) \
|
||||
{ \
|
||||
public: \
|
||||
NS_IMETHOD RootAndUnlinkJSObjects(void *p); \
|
||||
NS_IMETHOD_(void) Trace(void *p, TraceCallback cb, void *closure); \
|
||||
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED_BODY(_class, _base_class) \
|
||||
}; \
|
||||
NS_CYCLE_COLLECTION_PARTICIPANT_INSTANCE
|
||||
|
||||
/**
|
||||
* This implements a stub UnmarkPurple function for classes that want to be
|
||||
* traversed but whose AddRef/Release functions don't add/remove them to/from
|
||||
|
Loading…
Reference in New Issue
Block a user