mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-12-04 11:26:09 +00:00
Backed out 15 changesets (bug 1014393) for causing bustages in TestMuxer.cpp and gmock-internal-utils.h CLOSED TREE
Backed out changeset 0ddbdbfa87f6 (bug 1014393) Backed out changeset 37399c83d3bf (bug 1014393) Backed out changeset fe652475a785 (bug 1014393) Backed out changeset 2812ca77d87d (bug 1014393) Backed out changeset 6755ef7b361c (bug 1014393) Backed out changeset 72e545bdcce3 (bug 1014393) Backed out changeset 2f030ee55722 (bug 1014393) Backed out changeset e9416b502170 (bug 1014393) Backed out changeset 5a01d6d44634 (bug 1014393) Backed out changeset 3457a5065dc4 (bug 1014393) Backed out changeset 2a4999e0475b (bug 1014393) Backed out changeset 0be3804dc04e (bug 1014393) Backed out changeset 234994d8f136 (bug 1014393) Backed out changeset d17f91ff2014 (bug 1014393) Backed out changeset 25f58baa8159 (bug 1014393) --HG-- rename : dom/media/encoder/EncodedFrame.h => dom/media/encoder/EncodedFrameContainer.h
This commit is contained in:
parent
9fb0a30230
commit
e0b0dfa5eb
@ -5,7 +5,6 @@
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "Blob.h"
|
||||
#include "EmptyBlobImpl.h"
|
||||
#include "File.h"
|
||||
#include "MemoryBlobImpl.h"
|
||||
#include "mozilla/dom/BlobBinding.h"
|
||||
@ -73,14 +72,6 @@ Blob* Blob::Create(nsISupports* aParent, BlobImpl* aImpl) {
|
||||
return aImpl->IsFile() ? new File(aParent, aImpl) : new Blob(aParent, aImpl);
|
||||
}
|
||||
|
||||
/* static */
|
||||
already_AddRefed<Blob> Blob::CreateEmptyBlob(nsISupports* aParent,
|
||||
const nsAString& aContentType) {
|
||||
RefPtr<Blob> blob = Blob::Create(aParent, new EmptyBlobImpl(aContentType));
|
||||
MOZ_ASSERT(!blob->mImpl->IsFile());
|
||||
return blob.forget();
|
||||
}
|
||||
|
||||
/* static */
|
||||
already_AddRefed<Blob> Blob::CreateStringBlob(nsISupports* aParent,
|
||||
const nsACString& aData,
|
||||
|
@ -50,9 +50,6 @@ class Blob : public nsIMutable,
|
||||
// This creates a Blob or a File based on the type of BlobImpl.
|
||||
static Blob* Create(nsISupports* aParent, BlobImpl* aImpl);
|
||||
|
||||
static already_AddRefed<Blob> CreateEmptyBlob(nsISupports* aParent,
|
||||
const nsAString& aContentType);
|
||||
|
||||
static already_AddRefed<Blob> CreateStringBlob(nsISupports* aParent,
|
||||
const nsACString& aData,
|
||||
const nsAString& aContentType);
|
||||
|
@ -3112,5 +3112,3 @@ void MediaFormatReader::OnFirstDemuxFailed(TrackInfo::TrackType aType,
|
||||
} // namespace mozilla
|
||||
|
||||
#undef NS_DispatchToMainThread
|
||||
#undef LOGV
|
||||
#undef LOG
|
||||
|
@ -170,6 +170,10 @@ class nsMainThreadPtrHolder<
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
#ifdef LOG
|
||||
# undef LOG
|
||||
#endif
|
||||
|
||||
LazyLogModule gMediaManagerLog("MediaManager");
|
||||
#define LOG(...) MOZ_LOG(gMediaManagerLog, LogLevel::Debug, (__VA_ARGS__))
|
||||
|
||||
@ -4657,6 +4661,4 @@ void GetUserMediaWindowListener::NotifyChrome() {
|
||||
}));
|
||||
}
|
||||
|
||||
#undef LOG
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -40,6 +40,10 @@
|
||||
#include "nsProxyRelease.h"
|
||||
#include "nsTArray.h"
|
||||
|
||||
#ifdef LOG
|
||||
# undef LOG
|
||||
#endif
|
||||
|
||||
mozilla::LazyLogModule gMediaRecorderLog("MediaRecorder");
|
||||
#define LOG(type, msg) MOZ_LOG(gMediaRecorderLog, type, msg)
|
||||
|
||||
@ -197,15 +201,72 @@ NS_IMPL_RELEASE_INHERITED(MediaRecorder, DOMEventTargetHelper)
|
||||
* Therefore, the reference dependency in gecko is:
|
||||
* ShutdownBlocker -> Session <-> MediaRecorder, note that there is a cycle
|
||||
* reference between Session and MediaRecorder.
|
||||
* 2) A Session is destroyed after MediaRecorder::Stop has been called _and_ all
|
||||
* encoded media data has been passed to OnDataAvailable handler. 3)
|
||||
* MediaRecorder::Stop is called by user or the document is going to inactive or
|
||||
* invisible.
|
||||
* 2) A Session is destroyed in DestroyRunnable after MediaRecorder::Stop being
|
||||
* called _and_ all encoded media data been passed to OnDataAvailable handler.
|
||||
* 3) MediaRecorder::Stop is called by user or the document is going to
|
||||
* inactive or invisible.
|
||||
*/
|
||||
class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
public DOMMediaStream::TrackListener {
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Session)
|
||||
|
||||
// Main thread task.
|
||||
// Create a blob event and send back to client.
|
||||
class PushBlobRunnable : public Runnable, public MutableBlobStorageCallback {
|
||||
public:
|
||||
// We need to always declare refcounting because
|
||||
// MutableBlobStorageCallback has pure-virtual refcounting.
|
||||
NS_DECL_ISUPPORTS_INHERITED
|
||||
|
||||
// aDestroyRunnable can be null. If it's not, it will be dispatched after
|
||||
// the PushBlobRunnable::Run().
|
||||
PushBlobRunnable(Session* aSession, Runnable* aDestroyRunnable)
|
||||
: Runnable("dom::MediaRecorder::Session::PushBlobRunnable"),
|
||||
mSession(aSession),
|
||||
mDestroyRunnable(aDestroyRunnable) {}
|
||||
|
||||
NS_IMETHOD Run() override {
|
||||
LOG(LogLevel::Debug, ("Session.PushBlobRunnable s=(%p)", mSession.get()));
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
mSession->GetBlobWhenReady(this);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, Blob* aBlob,
|
||||
nsresult aRv) override {
|
||||
RefPtr<MediaRecorder> recorder = mSession->mRecorder;
|
||||
if (!recorder) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (NS_FAILED(aRv)) {
|
||||
mSession->DoSessionEndTask(aRv);
|
||||
return;
|
||||
}
|
||||
|
||||
nsresult rv = recorder->CreateAndDispatchBlobEvent(aBlob);
|
||||
if (NS_FAILED(rv)) {
|
||||
mSession->DoSessionEndTask(aRv);
|
||||
}
|
||||
|
||||
if (mDestroyRunnable &&
|
||||
NS_FAILED(NS_DispatchToMainThread(mDestroyRunnable.forget()))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread failed");
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
~PushBlobRunnable() = default;
|
||||
|
||||
RefPtr<Session> mSession;
|
||||
|
||||
// The generation of the blob is async. In order to avoid dispatching the
|
||||
// DestroyRunnable before pushing the blob event, we store the runnable
|
||||
// here.
|
||||
RefPtr<Runnable> mDestroyRunnable;
|
||||
};
|
||||
|
||||
class StoreEncodedBufferRunnable final : public Runnable {
|
||||
RefPtr<Session> mSession;
|
||||
nsTArray<nsTArray<uint8_t>> mBuffer;
|
||||
@ -238,6 +299,31 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
}
|
||||
};
|
||||
|
||||
// Notify encoder error, run in main thread task. (Bug 1095381)
|
||||
class EncoderErrorNotifierRunnable : public Runnable {
|
||||
public:
|
||||
explicit EncoderErrorNotifierRunnable(Session* aSession)
|
||||
: Runnable("dom::MediaRecorder::Session::EncoderErrorNotifierRunnable"),
|
||||
mSession(aSession) {}
|
||||
|
||||
NS_IMETHOD Run() override {
|
||||
LOG(LogLevel::Debug,
|
||||
("Session.ErrorNotifyRunnable s=(%p)", mSession.get()));
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
RefPtr<MediaRecorder> recorder = mSession->mRecorder;
|
||||
if (!recorder) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
recorder->NotifyError(NS_ERROR_UNEXPECTED);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
private:
|
||||
RefPtr<Session> mSession;
|
||||
};
|
||||
|
||||
// Fire a named event, run in main thread task.
|
||||
class DispatchEventRunnable : public Runnable {
|
||||
public:
|
||||
@ -264,6 +350,75 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
nsString mEventName;
|
||||
};
|
||||
|
||||
// Main thread task.
|
||||
// To delete RecordingSession object.
|
||||
class DestroyRunnable : public Runnable {
|
||||
public:
|
||||
explicit DestroyRunnable(Session* aSession)
|
||||
: Runnable("dom::MediaRecorder::Session::DestroyRunnable"),
|
||||
mSession(aSession) {}
|
||||
|
||||
explicit DestroyRunnable(already_AddRefed<Session> aSession)
|
||||
: Runnable("dom::MediaRecorder::Session::DestroyRunnable"),
|
||||
mSession(aSession) {}
|
||||
|
||||
NS_IMETHOD Run() override {
|
||||
LOG(LogLevel::Debug,
|
||||
("Session.DestroyRunnable session refcnt = (%d) s=(%p)",
|
||||
static_cast<int>(mSession->mRefCnt), mSession.get()));
|
||||
MOZ_ASSERT(NS_IsMainThread() && mSession);
|
||||
RefPtr<MediaRecorder> recorder = mSession->mRecorder;
|
||||
if (!recorder) {
|
||||
return NS_OK;
|
||||
}
|
||||
// SourceMediaStream is ended, and send out TRACK_EVENT_END notification.
|
||||
// Read Thread will be terminate soon.
|
||||
// We need to switch MediaRecorder to "Stop" state first to make sure
|
||||
// MediaRecorder is not associated with this Session anymore, then, it's
|
||||
// safe to delete this Session.
|
||||
// Also avoid to run if this session already call stop before
|
||||
if (mSession->mRunningState.isOk() &&
|
||||
mSession->mRunningState.unwrap() != RunningState::Stopping &&
|
||||
mSession->mRunningState.unwrap() != RunningState::Stopped) {
|
||||
recorder->StopForSessionDestruction();
|
||||
if (NS_FAILED(NS_DispatchToMainThread(
|
||||
new DestroyRunnable(mSession.forget())))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread failed");
|
||||
}
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
if (mSession->mRunningState.isOk()) {
|
||||
mSession->mRunningState = RunningState::Stopped;
|
||||
}
|
||||
|
||||
// Dispatch stop event and clear MIME type.
|
||||
mSession->mMimeType = NS_LITERAL_STRING("");
|
||||
recorder->SetMimeType(mSession->mMimeType);
|
||||
recorder->DispatchSimpleEvent(NS_LITERAL_STRING("stop"));
|
||||
|
||||
RefPtr<Session> session = mSession.forget();
|
||||
session->Shutdown()->Then(
|
||||
GetCurrentThreadSerialEventTarget(), __func__,
|
||||
[session]() {
|
||||
gSessions.RemoveEntry(session);
|
||||
if (gSessions.Count() == 0 && gMediaRecorderShutdownBlocker) {
|
||||
// All sessions finished before shutdown, no need to keep the
|
||||
// blocker.
|
||||
RefPtr<nsIAsyncShutdownClient> barrier = GetShutdownBarrier();
|
||||
barrier->RemoveBlocker(gMediaRecorderShutdownBlocker);
|
||||
gMediaRecorderShutdownBlocker = nullptr;
|
||||
}
|
||||
},
|
||||
[]() { MOZ_CRASH("Not reached"); });
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
private:
|
||||
// Call mSession::Release automatically while DestroyRunnable be destroy.
|
||||
RefPtr<Session> mSession;
|
||||
};
|
||||
|
||||
class EncoderListener : public MediaEncoderListener {
|
||||
public:
|
||||
EncoderListener(TaskQueue* aEncoderThread, Session* aSession)
|
||||
@ -307,21 +462,22 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
RefPtr<Session> mSession;
|
||||
};
|
||||
|
||||
friend class EncoderErrorNotifierRunnable;
|
||||
friend class PushBlobRunnable;
|
||||
friend class DestroyRunnable;
|
||||
|
||||
public:
|
||||
Session(MediaRecorder* aRecorder, uint32_t aTimeSlice)
|
||||
: mRecorder(aRecorder),
|
||||
mMediaStreamReady(false),
|
||||
mMainThread(mRecorder->GetOwner()->EventTargetFor(TaskCategory::Other)),
|
||||
mTimeSlice(aTimeSlice),
|
||||
mStartTime(TimeStamp::Now()),
|
||||
mRunningState(RunningState::Idling) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
aRecorder->GetMimeType(mMimeType);
|
||||
mMaxMemory = Preferences::GetUint("media.recorder.max_memory",
|
||||
MAX_ALLOW_MEMORY_BUFFER);
|
||||
mLastBlobTimeStamp = mStartTime;
|
||||
Telemetry::ScalarAdd(Telemetry::ScalarID::MEDIARECORDER_RECORDING_COUNT, 1);
|
||||
mLastBlobTimeStamp = TimeStamp::Now();
|
||||
}
|
||||
|
||||
void PrincipalChanged(MediaStreamTrack* aTrack) override {
|
||||
@ -433,7 +589,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
if (mRunningState.isOk() &&
|
||||
mRunningState.unwrap() == RunningState::Idling) {
|
||||
LOG(LogLevel::Debug, ("Session.Stop Explicit end task %p", this));
|
||||
// End the Session directly if there is no encoder.
|
||||
// End the Session directly if there is no ExtractRunnable.
|
||||
DoSessionEndTask(NS_OK);
|
||||
} else if (mRunningState.isOk() &&
|
||||
(mRunningState.unwrap() == RunningState::Starting ||
|
||||
@ -470,26 +626,17 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void RequestData() {
|
||||
nsresult RequestData() {
|
||||
LOG(LogLevel::Debug, ("Session.RequestData"));
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
GatherBlob()->Then(
|
||||
mMainThread, __func__,
|
||||
[this, self = RefPtr<Session>(this)](
|
||||
const BlobPromise::ResolveOrRejectValue& aResult) {
|
||||
if (aResult.IsReject()) {
|
||||
LOG(LogLevel::Warning, ("GatherBlob failed for RequestData()"));
|
||||
DoSessionEndTask(aResult.RejectValue());
|
||||
return;
|
||||
}
|
||||
if (NS_FAILED(
|
||||
NS_DispatchToMainThread(new PushBlobRunnable(this, nullptr)))) {
|
||||
MOZ_ASSERT(false, "RequestData NS_DispatchToMainThread failed");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
nsresult rv =
|
||||
mRecorder->CreateAndDispatchBlobEvent(aResult.ResolveValue());
|
||||
if (NS_FAILED(rv)) {
|
||||
DoSessionEndTask(NS_OK);
|
||||
}
|
||||
});
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void MaybeCreateMutableBlobStorage() {
|
||||
@ -499,42 +646,14 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
}
|
||||
}
|
||||
|
||||
static const bool IsExclusive = true;
|
||||
using BlobPromise = MozPromise<RefPtr<Blob>, nsresult, IsExclusive>;
|
||||
class BlobStorer : public MutableBlobStorageCallback {
|
||||
MozPromiseHolder<BlobPromise> mHolder;
|
||||
|
||||
virtual ~BlobStorer() = default;
|
||||
|
||||
public:
|
||||
BlobStorer() = default;
|
||||
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BlobStorer, override)
|
||||
|
||||
void BlobStoreCompleted(MutableBlobStorage*, Blob* aBlob,
|
||||
nsresult aRv) override {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (NS_FAILED(aRv)) {
|
||||
mHolder.Reject(aRv, __func__);
|
||||
} else {
|
||||
mHolder.Resolve(aBlob, __func__);
|
||||
}
|
||||
}
|
||||
|
||||
RefPtr<BlobPromise> Promise() { return mHolder.Ensure(__func__); }
|
||||
};
|
||||
|
||||
// Stops gathering data into the current blob and resolves when the current
|
||||
// blob is available. Future data will be stored in a new blob.
|
||||
RefPtr<BlobPromise> GatherBlob() {
|
||||
void GetBlobWhenReady(MutableBlobStorageCallback* aCallback) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
RefPtr<BlobStorer> storer = MakeAndAddRef<BlobStorer>();
|
||||
MaybeCreateMutableBlobStorage();
|
||||
mMutableBlobStorage->GetBlobWhenReady(
|
||||
mRecorder->GetOwner(), NS_ConvertUTF16toUTF8(mMimeType), storer);
|
||||
mMutableBlobStorage = nullptr;
|
||||
|
||||
return storer->Promise();
|
||||
MaybeCreateMutableBlobStorage();
|
||||
mMutableBlobStorage->GetBlobWhenReady(mRecorder->GetParentObject(),
|
||||
NS_ConvertUTF16toUTF8(mMimeType),
|
||||
aCallback);
|
||||
mMutableBlobStorage = nullptr;
|
||||
}
|
||||
|
||||
RefPtr<SizeOfPromise> SizeOfExcludingThis(
|
||||
@ -559,16 +678,17 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
}
|
||||
|
||||
private:
|
||||
// Only DestroyRunnable is allowed to delete Session object on main thread.
|
||||
virtual ~Session() {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(mShutdownPromise);
|
||||
LOG(LogLevel::Debug, ("Session.~Session (%p)", this));
|
||||
}
|
||||
|
||||
// Pull encoded media data from MediaEncoder and put into MutableBlobStorage.
|
||||
// If the bool aForceFlush is true, we will force a dispatch of a blob to
|
||||
// main thread.
|
||||
void Extract(bool aForceFlush) {
|
||||
// Destroy this session object in the end of this function.
|
||||
// If the bool aForceFlush is true, we will force to dispatch a
|
||||
// PushBlobRunnable to main thread.
|
||||
void Extract(bool aForceFlush, Runnable* aDestroyRunnable) {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
LOG(LogLevel::Debug, ("Session.Extract %p", this));
|
||||
@ -596,24 +716,16 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
pushBlob = true;
|
||||
}
|
||||
if (pushBlob) {
|
||||
mLastBlobTimeStamp = TimeStamp::Now();
|
||||
InvokeAsync(mMainThread, this, __func__, &Session::GatherBlob)
|
||||
->Then(mMainThread, __func__,
|
||||
[this, self = RefPtr<Session>(this)](
|
||||
const BlobPromise::ResolveOrRejectValue& aResult) {
|
||||
if (aResult.IsReject()) {
|
||||
LOG(LogLevel::Warning,
|
||||
("GatherBlob failed for pushing blob"));
|
||||
DoSessionEndTask(aResult.RejectValue());
|
||||
return;
|
||||
}
|
||||
|
||||
nsresult rv = mRecorder->CreateAndDispatchBlobEvent(
|
||||
aResult.ResolveValue());
|
||||
if (NS_FAILED(rv)) {
|
||||
DoSessionEndTask(NS_OK);
|
||||
}
|
||||
});
|
||||
if (NS_FAILED(NS_DispatchToMainThread(
|
||||
new PushBlobRunnable(this, aDestroyRunnable)))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed");
|
||||
} else {
|
||||
mLastBlobTimeStamp = TimeStamp::Now();
|
||||
}
|
||||
} else if (aDestroyRunnable) {
|
||||
if (NS_FAILED(NS_DispatchToMainThread(aDestroyRunnable))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -667,7 +779,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
// When MediaRecorder supports multiple tracks, we should set up a single
|
||||
// MediaInputPort from the input stream, and let main thread check
|
||||
// track principals async later.
|
||||
nsPIDOMWindowInner* window = mRecorder->GetOwner();
|
||||
nsPIDOMWindowInner* window = mRecorder->GetParentObject();
|
||||
Document* document = window ? window->GetExtantDoc() : nullptr;
|
||||
nsContentUtils::ReportToConsole(nsIScriptError::errorFlag,
|
||||
NS_LITERAL_CSTRING("Media"), document,
|
||||
@ -870,18 +982,12 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
// appropriate video keyframe interval defined in milliseconds.
|
||||
mEncoder->SetVideoKeyFrameInterval(mTimeSlice);
|
||||
|
||||
// Set mRunningState to Running so that DoSessionEndTask will
|
||||
// Set mRunningState to Running so that ExtractRunnable/DestroyRunnable will
|
||||
// take the responsibility to end the session.
|
||||
mRunningState = RunningState::Starting;
|
||||
}
|
||||
|
||||
// This is the task that will stop recording per spec:
|
||||
// - Stop gathering data (this is inherently async)
|
||||
// - Set state to "inactive"
|
||||
// - Fire an error event, if NS_FAILED(rv)
|
||||
// - Discard blob data if rv is NS_ERROR_DOM_SECURITY_ERR
|
||||
// - Fire a Blob event
|
||||
// - Fire an event named stop
|
||||
// application should get blob and onstop event
|
||||
void DoSessionEndTask(nsresult rv) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (mRunningState.isErr()) {
|
||||
@ -895,11 +1001,11 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
return;
|
||||
}
|
||||
|
||||
bool needsStartEvent = false;
|
||||
if (mRunningState.isOk() &&
|
||||
(mRunningState.unwrap() == RunningState::Idling ||
|
||||
mRunningState.unwrap() == RunningState::Starting)) {
|
||||
needsStartEvent = true;
|
||||
NS_DispatchToMainThread(
|
||||
new DispatchEventRunnable(this, NS_LITERAL_STRING("start")));
|
||||
}
|
||||
|
||||
if (rv == NS_OK) {
|
||||
@ -908,91 +1014,68 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
mRunningState = Err(rv);
|
||||
}
|
||||
|
||||
GatherBlob()
|
||||
->Then(mMainThread, __func__,
|
||||
[this, self = RefPtr<Session>(this), rv, needsStartEvent](
|
||||
const BlobPromise::ResolveOrRejectValue& aResult) {
|
||||
if (mRecorder->mSessions.LastElement() == this) {
|
||||
// Set state to inactive, but only if the recorder is not
|
||||
// controlled by another session already.
|
||||
mRecorder->ForceInactive();
|
||||
}
|
||||
if (NS_FAILED(rv)) {
|
||||
mRecorder->ForceInactive();
|
||||
NS_DispatchToMainThread(NewRunnableMethod<nsresult>(
|
||||
"dom::MediaRecorder::NotifyError", mRecorder,
|
||||
&MediaRecorder::NotifyError, rv));
|
||||
}
|
||||
|
||||
if (needsStartEvent) {
|
||||
mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("start"));
|
||||
}
|
||||
RefPtr<Runnable> destroyRunnable = new DestroyRunnable(this);
|
||||
|
||||
// If there was an error, Fire the appropriate one
|
||||
if (NS_FAILED(rv)) {
|
||||
mRecorder->NotifyError(rv);
|
||||
}
|
||||
|
||||
// Fire a blob event named dataavailable
|
||||
RefPtr<Blob> blob;
|
||||
if (rv == NS_ERROR_DOM_SECURITY_ERR || aResult.IsReject()) {
|
||||
// In case of SecurityError, the blob data must be discarded.
|
||||
// We create a new empty one and throw the blob with its data
|
||||
// away.
|
||||
// In case we failed to gather blob data, we create an empty
|
||||
// memory blob instead.
|
||||
blob = Blob::CreateEmptyBlob(mRecorder->GetParentObject(),
|
||||
mMimeType);
|
||||
} else {
|
||||
blob = aResult.ResolveValue();
|
||||
}
|
||||
if (NS_FAILED(mRecorder->CreateAndDispatchBlobEvent(blob))) {
|
||||
// Failed to dispatch blob event. That's unexpected. It's
|
||||
// probably all right to fire an error event if we haven't
|
||||
// already.
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
mRecorder->NotifyError(NS_ERROR_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatch stop event and clear MIME type.
|
||||
mMimeType = NS_LITERAL_STRING("");
|
||||
mRecorder->SetMimeType(mMimeType);
|
||||
|
||||
// Fire an event named stop
|
||||
mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("stop"));
|
||||
|
||||
// And finally, Shutdown and destroy the Session
|
||||
return Shutdown();
|
||||
})
|
||||
->Then(mMainThread, __func__, [this, self = RefPtr<Session>(this)] {
|
||||
gSessions.RemoveEntry(this);
|
||||
if (gSessions.Count() == 0 && gMediaRecorderShutdownBlocker) {
|
||||
// All sessions finished before shutdown, no need to keep the
|
||||
// blocker.
|
||||
RefPtr<nsIAsyncShutdownClient> barrier = GetShutdownBarrier();
|
||||
barrier->RemoveBlocker(gMediaRecorderShutdownBlocker);
|
||||
gMediaRecorderShutdownBlocker = nullptr;
|
||||
}
|
||||
});
|
||||
if (rv != NS_ERROR_DOM_SECURITY_ERR) {
|
||||
// Don't push a blob if there was a security error.
|
||||
if (NS_FAILED(NS_DispatchToMainThread(
|
||||
new PushBlobRunnable(this, destroyRunnable)))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed");
|
||||
}
|
||||
} else {
|
||||
if (NS_FAILED(NS_DispatchToMainThread(destroyRunnable))) {
|
||||
MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MediaEncoderInitialized() {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
Extract(false);
|
||||
// Pull encoded metadata from MediaEncoder
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsString mime;
|
||||
nsresult rv = mEncoder->GetEncodedMetadata(&encodedBuf, mime);
|
||||
|
||||
NS_DispatchToMainThread(NewRunnableFrom([self = RefPtr<Session>(this), this,
|
||||
mime = mEncoder->MimeType()]() {
|
||||
if (mRunningState.isErr()) {
|
||||
if (NS_FAILED(rv)) {
|
||||
MOZ_ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Append pulled data into cache buffer.
|
||||
NS_DispatchToMainThread(
|
||||
new StoreEncodedBufferRunnable(this, std::move(encodedBuf)));
|
||||
|
||||
RefPtr<Session> self = this;
|
||||
NS_DispatchToMainThread(NewRunnableFrom([self, mime]() {
|
||||
if (!self->mRecorder) {
|
||||
MOZ_ASSERT_UNREACHABLE("Recorder should be live");
|
||||
return NS_OK;
|
||||
}
|
||||
mMimeType = mime;
|
||||
mRecorder->SetMimeType(mime);
|
||||
auto state = mRunningState.unwrap();
|
||||
if (state == RunningState::Starting || state == RunningState::Stopping) {
|
||||
if (state == RunningState::Starting) {
|
||||
// We set it to Running in the runnable since we can only assign
|
||||
// mRunningState on main thread. We set it before running the start
|
||||
// event runnable since that dispatches synchronously (and may cause
|
||||
// js calls to methods depending on mRunningState).
|
||||
mRunningState = RunningState::Running;
|
||||
if (self->mRunningState.isOk()) {
|
||||
auto state = self->mRunningState.unwrap();
|
||||
if (state == RunningState::Starting ||
|
||||
state == RunningState::Stopping) {
|
||||
if (state == RunningState::Starting) {
|
||||
// We set it to Running in the runnable since we can only assign
|
||||
// mRunningState on main thread. We set it before running the start
|
||||
// event runnable since that dispatches synchronously (and may cause
|
||||
// js calls to methods depending on mRunningState).
|
||||
self->mRunningState = RunningState::Running;
|
||||
}
|
||||
self->mMimeType = mime;
|
||||
self->mRecorder->SetMimeType(self->mMimeType);
|
||||
auto startEvent = MakeRefPtr<DispatchEventRunnable>(
|
||||
self, NS_LITERAL_STRING("start"));
|
||||
startEvent->Run();
|
||||
}
|
||||
mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("start"));
|
||||
}
|
||||
return NS_OK;
|
||||
}));
|
||||
@ -1001,7 +1084,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
void MediaEncoderDataAvailable() {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
Extract(false);
|
||||
Extract(false, nullptr);
|
||||
}
|
||||
|
||||
void MediaEncoderError() {
|
||||
@ -1015,9 +1098,12 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mEncoder->IsShutdown());
|
||||
|
||||
mMainThread->Dispatch(NewRunnableMethod<nsresult>(
|
||||
"MediaRecorder::Session::MediaEncoderShutdown->DoSessionEndTask", this,
|
||||
&Session::DoSessionEndTask, NS_OK));
|
||||
// For the stop event. Let's the creation of the blob to dispatch this
|
||||
// runnable.
|
||||
RefPtr<Runnable> destroyRunnable = new DestroyRunnable(this);
|
||||
|
||||
// Forces the last blob even if it's not time for it yet.
|
||||
Extract(true, destroyRunnable);
|
||||
|
||||
// Clean up.
|
||||
mEncoderListener->Forget();
|
||||
@ -1034,13 +1120,6 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
return mShutdownPromise;
|
||||
}
|
||||
|
||||
// This is a coarse calculation and does not reflect the duration of the
|
||||
// final recording for reasons such as pauses. However it allows us an
|
||||
// idea of how long people are running their recorders for.
|
||||
TimeDuration timeDelta = TimeStamp::Now() - mStartTime;
|
||||
Telemetry::Accumulate(Telemetry::MEDIA_RECORDER_RECORDING_DURATION,
|
||||
timeDelta.ToSeconds());
|
||||
|
||||
mShutdownPromise = ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
RefPtr<Session> self = this;
|
||||
|
||||
@ -1077,16 +1156,19 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
}
|
||||
|
||||
// Break the cycle reference between Session and MediaRecorder.
|
||||
mShutdownPromise = mShutdownPromise->Then(
|
||||
GetCurrentThreadSerialEventTarget(), __func__,
|
||||
[self]() {
|
||||
self->mRecorder->RemoveSession(self);
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
},
|
||||
[]() {
|
||||
MOZ_ASSERT_UNREACHABLE("Unexpected reject");
|
||||
return ShutdownPromise::CreateAndReject(false, __func__);
|
||||
});
|
||||
if (mRecorder) {
|
||||
mShutdownPromise = mShutdownPromise->Then(
|
||||
GetCurrentThreadSerialEventTarget(), __func__,
|
||||
[self]() {
|
||||
self->mRecorder->RemoveSession(self);
|
||||
self->mRecorder = nullptr;
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
},
|
||||
[]() {
|
||||
MOZ_ASSERT_UNREACHABLE("Unexpected reject");
|
||||
return ShutdownPromise::CreateAndReject(false, __func__);
|
||||
});
|
||||
}
|
||||
|
||||
if (mEncoderThread) {
|
||||
RefPtr<TaskQueue>& encoderThread = mEncoderThread;
|
||||
@ -1111,8 +1193,9 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
Stopped, // Session has stopped without any error
|
||||
};
|
||||
|
||||
// Our associated MediaRecorder.
|
||||
const RefPtr<MediaRecorder> mRecorder;
|
||||
// Hold reference to MediaRecorder that ensure MediaRecorder is alive
|
||||
// if there is an active session. Access ONLY on main thread.
|
||||
RefPtr<MediaRecorder> mRecorder;
|
||||
|
||||
// Stream currently recorded.
|
||||
RefPtr<DOMMediaStream> mMediaStream;
|
||||
@ -1124,8 +1207,6 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
// set.
|
||||
nsTArray<RefPtr<MediaStreamTrack>> mMediaStreamTracks;
|
||||
|
||||
// Main thread used for MozPromise operations.
|
||||
const RefPtr<nsISerialEventTarget> mMainThread;
|
||||
// Runnable thread for reading data from MediaEncoder.
|
||||
RefPtr<TaskQueue> mEncoderThread;
|
||||
// MediaEncoder pipeline.
|
||||
@ -1145,14 +1226,14 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
|
||||
// The interval of passing encoded data from MutableBlobStorage to
|
||||
// onDataAvailable handler.
|
||||
const uint32_t mTimeSlice;
|
||||
// The time this session started, for telemetry.
|
||||
const TimeStamp mStartTime;
|
||||
// The session's current main thread state. The error type gets set when
|
||||
// ending a recording with an error. An NS_OK error is invalid.
|
||||
// The session's current main thread state. The error type gets setwhen ending
|
||||
// a recording with an error. An NS_OK error is invalid.
|
||||
// Main thread only.
|
||||
Result<RunningState, nsresult> mRunningState;
|
||||
};
|
||||
|
||||
NS_IMPL_ISUPPORTS_INHERITED0(MediaRecorder::Session::PushBlobRunnable, Runnable)
|
||||
|
||||
MediaRecorder::~MediaRecorder() {
|
||||
LOG(LogLevel::Debug, ("~MediaRecorder (%p)", this));
|
||||
UnRegisterActivityObserver();
|
||||
@ -1247,6 +1328,8 @@ void MediaRecorder::Start(const Optional<uint32_t>& aTimeSlice,
|
||||
mSessions.AppendElement();
|
||||
mSessions.LastElement() = new Session(this, timeSlice);
|
||||
mSessions.LastElement()->Start();
|
||||
mStartTime = TimeStamp::Now();
|
||||
Telemetry::ScalarAdd(Telemetry::ScalarID::MEDIARECORDER_RECORDING_COUNT, 1);
|
||||
}
|
||||
|
||||
void MediaRecorder::Stop(ErrorResult& aResult) {
|
||||
@ -1308,7 +1391,10 @@ void MediaRecorder::RequestData(ErrorResult& aResult) {
|
||||
return;
|
||||
}
|
||||
MOZ_ASSERT(mSessions.Length() > 0);
|
||||
mSessions.LastElement()->RequestData();
|
||||
nsresult rv = mSessions.LastElement()->RequestData();
|
||||
if (NS_FAILED(rv)) {
|
||||
NotifyError(rv);
|
||||
}
|
||||
}
|
||||
|
||||
JSObject* MediaRecorder::WrapObject(JSContext* aCx,
|
||||
@ -1597,6 +1683,22 @@ void MediaRecorder::ForceInactive() {
|
||||
mState = RecordingState::Inactive;
|
||||
}
|
||||
|
||||
void MediaRecorder::StopForSessionDestruction() {
|
||||
LOG(LogLevel::Debug, ("MediaRecorder.StopForSessionDestruction %p", this));
|
||||
MediaRecorderReporter::RemoveMediaRecorder(this);
|
||||
// We do not perform a mState != RecordingState::Recording) check here as
|
||||
// we may already be inactive due to ForceInactive().
|
||||
mState = RecordingState::Inactive;
|
||||
MOZ_ASSERT(mSessions.Length() > 0);
|
||||
mSessions.LastElement()->Stop();
|
||||
// This is a coarse calculation and does not reflect the duration of the
|
||||
// final recording for reasons such as pauses. However it allows us an idea
|
||||
// of how long people are running their recorders for.
|
||||
TimeDuration timeDelta = TimeStamp::Now() - mStartTime;
|
||||
Telemetry::Accumulate(Telemetry::MEDIA_RECORDER_RECORDING_DURATION,
|
||||
timeDelta.ToSeconds());
|
||||
}
|
||||
|
||||
void MediaRecorder::InitializeDomExceptions() {
|
||||
mSecurityDomException = DOMException::Create(NS_ERROR_DOM_SECURITY_ERR);
|
||||
mUnknownDomException = DOMException::Create(NS_ERROR_DOM_UNKNOWN_ERR);
|
||||
@ -1635,5 +1737,3 @@ StaticRefPtr<MediaRecorderReporter> MediaRecorderReporter::sUniqueInstance;
|
||||
|
||||
} // namespace dom
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOG
|
||||
|
@ -61,6 +61,8 @@ class MediaRecorder final : public DOMEventTargetHelper,
|
||||
JSObject* WrapObject(JSContext* aCx,
|
||||
JS::Handle<JSObject*> aGivenProto) override;
|
||||
|
||||
nsPIDOMWindowInner* GetParentObject() { return GetOwner(); }
|
||||
|
||||
NS_DECL_ISUPPORTS_INHERITED
|
||||
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaRecorder, DOMEventTargetHelper)
|
||||
|
||||
@ -175,6 +177,8 @@ class MediaRecorder final : public DOMEventTargetHelper,
|
||||
uint32_t mVideoBitsPerSecond;
|
||||
uint32_t mBitsPerSecond;
|
||||
|
||||
TimeStamp mStartTime;
|
||||
|
||||
// DOMExceptions that are created early and possibly thrown in NotifyError.
|
||||
// Creating them early allows us to capture the JS stack for which cannot be
|
||||
// done at the time the error event is fired.
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define ContainerWriter_h_
|
||||
|
||||
#include "nsTArray.h"
|
||||
#include "EncodedFrame.h"
|
||||
#include "EncodedFrameContainer.h"
|
||||
#include "TrackMetadataBase.h"
|
||||
|
||||
namespace mozilla {
|
||||
@ -26,26 +26,23 @@ class ContainerWriter {
|
||||
enum { END_OF_STREAM = 1 << 0 };
|
||||
|
||||
/**
|
||||
* Writes encoded track data from aData into the internal stream of container
|
||||
* writer. aFlags is used to signal the impl of different conditions
|
||||
* such as END_OF_STREAM. Each impl may handle different flags, and should be
|
||||
* documented accordingly. Currently, WriteEncodedTrack doesn't support
|
||||
* explicit track specification, though each impl may provide logic to
|
||||
* allocate frames into different tracks.
|
||||
* Writes encoded track data from aBuffer to a packet, and insert this packet
|
||||
* into the internal stream of container writer. aDuration is the playback
|
||||
* duration of this packet in number of samples. aFlags is true with
|
||||
* END_OF_STREAM if this is the last packet of track.
|
||||
* Currently, WriteEncodedTrack doesn't support multiple tracks.
|
||||
*/
|
||||
virtual nsresult WriteEncodedTrack(
|
||||
const nsTArray<RefPtr<EncodedFrame>>& aData, uint32_t aFlags = 0) = 0;
|
||||
virtual nsresult WriteEncodedTrack(const EncodedFrameContainer& aData,
|
||||
uint32_t aFlags = 0) = 0;
|
||||
|
||||
/**
|
||||
* Stores the metadata for all given tracks to the muxer.
|
||||
*
|
||||
* This method checks the integrity of aMetadata.
|
||||
* If the metadata isn't well formatted, this method returns NS_ERROR_FAILURE.
|
||||
* If the metadata is well formatted, it stores the metadata and returns
|
||||
* Set the meta data pointer into muxer
|
||||
* This function will check the integrity of aMetadata.
|
||||
* If the meta data isn't well format, this function will return
|
||||
* NS_ERROR_FAILURE to caller, else save the pointer to mMetadata and return
|
||||
* NS_OK.
|
||||
*/
|
||||
virtual nsresult SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) = 0;
|
||||
virtual nsresult SetMetadata(TrackMetadataBase* aMetadata) = 0;
|
||||
|
||||
/**
|
||||
* Indicate if the writer has finished to output data
|
||||
@ -62,7 +59,7 @@ class ContainerWriter {
|
||||
* even it is not full, and copy these container data to a buffer for
|
||||
* aOutputBufs to append.
|
||||
*/
|
||||
virtual nsresult GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
virtual nsresult GetContainerData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
|
||||
uint32_t aFlags = 0) = 0;
|
||||
|
||||
protected:
|
||||
|
@ -1,71 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef EncodedFrame_h_
|
||||
#define EncodedFrame_h_
|
||||
|
||||
#include "nsISupportsImpl.h"
|
||||
#include "VideoUtils.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
// Represent an encoded frame emitted by an encoder
|
||||
class EncodedFrame final {
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncodedFrame)
|
||||
public:
|
||||
EncodedFrame() : mTime(0), mDuration(0), mFrameType(UNKNOWN) {}
|
||||
enum FrameType {
|
||||
VP8_I_FRAME, // VP8 intraframe
|
||||
VP8_P_FRAME, // VP8 predicted frame
|
||||
OPUS_AUDIO_FRAME, // Opus audio frame
|
||||
UNKNOWN // FrameType not set
|
||||
};
|
||||
void SwapInFrameData(nsTArray<uint8_t>& aData) {
|
||||
mFrameData.SwapElements(aData);
|
||||
}
|
||||
nsresult SwapOutFrameData(nsTArray<uint8_t>& aData) {
|
||||
if (mFrameType != UNKNOWN) {
|
||||
// Reset this frame type to UNKNOWN once the data is swapped out.
|
||||
mFrameData.SwapElements(aData);
|
||||
mFrameType = UNKNOWN;
|
||||
return NS_OK;
|
||||
}
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
const nsTArray<uint8_t>& GetFrameData() const { return mFrameData; }
|
||||
// Timestamp in microseconds
|
||||
uint64_t mTime;
|
||||
// The playback duration of this packet. The unit is determined by the use
|
||||
// case. For VP8 the unit should be microseconds. For opus this is the number
|
||||
// of samples.
|
||||
uint64_t mDuration;
|
||||
// Represent what is in the FrameData
|
||||
FrameType mFrameType;
|
||||
|
||||
uint64_t GetEndTime() const {
|
||||
// Defend against untested types. This assert can be removed but we want
|
||||
// to make sure other types are correctly accounted for.
|
||||
MOZ_ASSERT(mFrameType == OPUS_AUDIO_FRAME || mFrameType == VP8_I_FRAME ||
|
||||
mFrameType == VP8_P_FRAME);
|
||||
if (mFrameType == OPUS_AUDIO_FRAME) {
|
||||
// See bug 1356054 for discussion around standardization of time units
|
||||
// (can remove videoutils import when this goes)
|
||||
return mTime + FramesToUsecs(mDuration, 48000).value();
|
||||
} else {
|
||||
return mTime + mDuration;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// Private destructor, to discourage deletion outside of Release():
|
||||
~EncodedFrame() {}
|
||||
|
||||
// Encoded data
|
||||
nsTArray<uint8_t> mFrameData;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // EncodedFrame_h_
|
97
dom/media/encoder/EncodedFrameContainer.h
Normal file
97
dom/media/encoder/EncodedFrameContainer.h
Normal file
@ -0,0 +1,97 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef EncodedFrameContainer_H_
|
||||
#define EncodedFrameContainer_H_
|
||||
|
||||
#include "nsTArray.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class EncodedFrame;
|
||||
|
||||
/*
|
||||
* This container is used to carry video or audio encoded data from encoder to
|
||||
* muxer. The media data object is created by encoder and recycle by the
|
||||
* destructor. Only allow to store audio or video encoded data in EncodedData.
|
||||
*/
|
||||
class EncodedFrameContainer {
|
||||
public:
|
||||
// Append encoded frame data
|
||||
void AppendEncodedFrame(EncodedFrame* aEncodedFrame) {
|
||||
mEncodedFrames.AppendElement(aEncodedFrame);
|
||||
}
|
||||
// Retrieve all of the encoded frames
|
||||
const nsTArray<RefPtr<EncodedFrame> >& GetEncodedFrames() const {
|
||||
return mEncodedFrames;
|
||||
}
|
||||
|
||||
private:
|
||||
// This container is used to store the video or audio encoded packets.
|
||||
// Muxer should check mFrameType and get the encoded data type from
|
||||
// mEncodedFrames.
|
||||
nsTArray<RefPtr<EncodedFrame> > mEncodedFrames;
|
||||
};
|
||||
|
||||
// Represent one encoded frame
|
||||
class EncodedFrame final {
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncodedFrame)
|
||||
public:
|
||||
EncodedFrame() : mTimeStamp(0), mDuration(0), mFrameType(UNKNOWN) {}
|
||||
enum FrameType {
|
||||
VP8_I_FRAME, // VP8 intraframe
|
||||
VP8_P_FRAME, // VP8 predicted frame
|
||||
OPUS_AUDIO_FRAME, // Opus audio frame
|
||||
VORBIS_AUDIO_FRAME,
|
||||
AVC_I_FRAME,
|
||||
AVC_P_FRAME,
|
||||
AVC_B_FRAME,
|
||||
AVC_CSD, // AVC codec specific data
|
||||
AAC_AUDIO_FRAME,
|
||||
AAC_CSD, // AAC codec specific data
|
||||
AMR_AUDIO_CSD,
|
||||
AMR_AUDIO_FRAME,
|
||||
EVRC_AUDIO_CSD,
|
||||
EVRC_AUDIO_FRAME,
|
||||
UNKNOWN // FrameType not set
|
||||
};
|
||||
void SwapInFrameData(nsTArray<uint8_t>& aData) {
|
||||
mFrameData.SwapElements(aData);
|
||||
}
|
||||
nsresult SwapOutFrameData(nsTArray<uint8_t>& aData) {
|
||||
if (mFrameType != UNKNOWN) {
|
||||
// Reset this frame type to UNKNOWN once the data is swapped out.
|
||||
mFrameData.SwapElements(aData);
|
||||
mFrameType = UNKNOWN;
|
||||
return NS_OK;
|
||||
}
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
const nsTArray<uint8_t>& GetFrameData() const { return mFrameData; }
|
||||
uint64_t GetTimeStamp() const { return mTimeStamp; }
|
||||
void SetTimeStamp(uint64_t aTimeStamp) { mTimeStamp = aTimeStamp; }
|
||||
|
||||
uint64_t GetDuration() const { return mDuration; }
|
||||
void SetDuration(uint64_t aDuration) { mDuration = aDuration; }
|
||||
|
||||
FrameType GetFrameType() const { return mFrameType; }
|
||||
void SetFrameType(FrameType aFrameType) { mFrameType = aFrameType; }
|
||||
|
||||
private:
|
||||
// Private destructor, to discourage deletion outside of Release():
|
||||
~EncodedFrame() {}
|
||||
|
||||
// Encoded data
|
||||
nsTArray<uint8_t> mFrameData;
|
||||
uint64_t mTimeStamp;
|
||||
// The playback duration of this packet in number of samples
|
||||
uint64_t mDuration;
|
||||
// Represent what is in the FrameData
|
||||
FrameType mFrameType;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
@ -25,7 +25,6 @@
|
||||
#include "mozilla/StaticPtr.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/Unused.h"
|
||||
#include "Muxer.h"
|
||||
#include "nsIPrincipal.h"
|
||||
#include "nsMimeTypes.h"
|
||||
#include "nsThreadUtils.h"
|
||||
@ -39,6 +38,10 @@
|
||||
# include "WebMWriter.h"
|
||||
#endif
|
||||
|
||||
#ifdef LOG
|
||||
# undef LOG
|
||||
#endif
|
||||
|
||||
mozilla::LazyLogModule gMediaEncoderLog("MediaEncoder");
|
||||
#define LOG(type, msg) MOZ_LOG(gMediaEncoderLog, type, msg)
|
||||
|
||||
@ -395,13 +398,14 @@ MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread,
|
||||
VideoTrackEncoder* aVideoEncoder,
|
||||
TrackRate aTrackRate, const nsAString& aMIMEType)
|
||||
: mEncoderThread(aEncoderThread),
|
||||
mMuxer(MakeUnique<Muxer>(std::move(aWriter))),
|
||||
mWriter(std::move(aWriter)),
|
||||
mAudioEncoder(aAudioEncoder),
|
||||
mVideoEncoder(aVideoEncoder),
|
||||
mEncoderListener(MakeAndAddRef<EncoderListener>(mEncoderThread, this)),
|
||||
mStartTime(TimeStamp::Now()),
|
||||
mMIMEType(aMIMEType),
|
||||
mInitialized(false),
|
||||
mMetadataEncoded(false),
|
||||
mCompleted(false),
|
||||
mError(false),
|
||||
mCanceled(false),
|
||||
@ -647,7 +651,7 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
|
||||
driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW);
|
||||
}
|
||||
}
|
||||
writer = MakeUnique<WebMWriter>();
|
||||
writer = MakeUnique<WebMWriter>(aTrackTypes);
|
||||
mimeType = NS_LITERAL_STRING(VIDEO_WEBM);
|
||||
} else if (MediaEncoder::IsWebMEncoderEnabled() &&
|
||||
aMIMEType.EqualsLiteral(AUDIO_WEBM) &&
|
||||
@ -668,7 +672,7 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
|
||||
} else {
|
||||
mimeType = NS_LITERAL_STRING(AUDIO_WEBM);
|
||||
}
|
||||
writer = MakeUnique<WebMWriter>();
|
||||
writer = MakeUnique<WebMWriter>(aTrackTypes);
|
||||
}
|
||||
#endif // MOZ_WEBM_ENCODER
|
||||
else if (MediaDecoder::IsOggEnabled() && MediaDecoder::IsOpusEnabled() &&
|
||||
@ -695,7 +699,7 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
|
||||
driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW);
|
||||
}
|
||||
}
|
||||
writer = MakeUnique<WebMWriter>();
|
||||
writer = MakeUnique<WebMWriter>(aTrackTypes);
|
||||
mimeType = NS_LITERAL_STRING(VIDEO_WEBM);
|
||||
}
|
||||
#endif // MOZ_WEBM_ENCODER
|
||||
@ -733,78 +737,122 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
|
||||
audioEncoder, videoEncoder, aTrackRate, mimeType);
|
||||
}
|
||||
|
||||
nsresult MediaEncoder::GetEncodedMetadata(
|
||||
nsTArray<nsTArray<uint8_t>>* aOutputBufs, nsAString& aMIMEType) {
|
||||
AUTO_PROFILER_LABEL("MediaEncoder::GetEncodedMetadata", OTHER);
|
||||
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
if (mShutdown) {
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (!mInitialized) {
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (mMetadataEncoded) {
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
aMIMEType = mMIMEType;
|
||||
|
||||
LOG(LogLevel::Verbose,
|
||||
("GetEncodedMetadata TimeStamp = %f", GetEncodeTimeStamp()));
|
||||
|
||||
nsresult rv;
|
||||
|
||||
if (mAudioEncoder) {
|
||||
if (!mAudioEncoder->IsInitialized()) {
|
||||
LOG(LogLevel::Error,
|
||||
("GetEncodedMetadata Audio encoder not initialized"));
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
rv = CopyMetadataToMuxer(mAudioEncoder);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, ("Failed to Set Audio Metadata"));
|
||||
SetError();
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
if (mVideoEncoder) {
|
||||
if (!mVideoEncoder->IsInitialized()) {
|
||||
LOG(LogLevel::Error,
|
||||
("GetEncodedMetadata Video encoder not initialized"));
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
rv = CopyMetadataToMuxer(mVideoEncoder.get());
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, ("Failed to Set Video Metadata"));
|
||||
SetError();
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
rv = mWriter->GetContainerData(aOutputBufs, ContainerWriter::GET_HEADER);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, ("Writer fail to generate header!"));
|
||||
SetError();
|
||||
return rv;
|
||||
}
|
||||
LOG(LogLevel::Verbose,
|
||||
("Finish GetEncodedMetadata TimeStamp = %f", GetEncodeTimeStamp()));
|
||||
mMetadataEncoded = true;
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult MediaEncoder::GetEncodedData(
|
||||
nsTArray<nsTArray<uint8_t>>* aOutputBufs) {
|
||||
AUTO_PROFILER_LABEL("MediaEncoder::GetEncodedData", OTHER);
|
||||
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
MOZ_ASSERT(mInitialized);
|
||||
MOZ_ASSERT_IF(mAudioEncoder, mAudioEncoder->IsInitialized());
|
||||
MOZ_ASSERT_IF(mVideoEncoder, mVideoEncoder->IsInitialized());
|
||||
|
||||
if (!mMetadataEncoded) {
|
||||
MOZ_ASSERT(false);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
nsresult rv;
|
||||
LOG(LogLevel::Verbose,
|
||||
("GetEncodedData TimeStamp = %f", GetEncodeTimeStamp()));
|
||||
EncodedFrameContainer encodedData;
|
||||
|
||||
if (mMuxer->NeedsMetadata()) {
|
||||
nsTArray<RefPtr<TrackMetadataBase>> meta;
|
||||
if (mAudioEncoder && !*meta.AppendElement(mAudioEncoder->GetMetadata())) {
|
||||
LOG(LogLevel::Error, ("Audio metadata is null"));
|
||||
SetError();
|
||||
return NS_ERROR_ABORT;
|
||||
}
|
||||
if (mVideoEncoder && !*meta.AppendElement(mVideoEncoder->GetMetadata())) {
|
||||
LOG(LogLevel::Error, ("Video metadata is null"));
|
||||
SetError();
|
||||
return NS_ERROR_ABORT;
|
||||
}
|
||||
|
||||
rv = mMuxer->SetMetadata(meta);
|
||||
if (mVideoEncoder) {
|
||||
// We're most likely to actually wait for a video frame, so do that first
|
||||
// to minimize capture offset/lipsync issues.
|
||||
rv = WriteEncodedDataToMuxer(mVideoEncoder);
|
||||
LOG(LogLevel::Verbose,
|
||||
("Video encoded TimeStamp = %f", GetEncodeTimeStamp()));
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, ("SetMetadata failed"));
|
||||
SetError();
|
||||
LOG(LogLevel::Warning, ("Failed to write encoded video data to muxer"));
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
// First, feed encoded data from encoders to muxer.
|
||||
|
||||
if (mVideoEncoder && !mVideoEncoder->IsEncodingComplete()) {
|
||||
nsTArray<RefPtr<EncodedFrame>> videoFrames;
|
||||
rv = mVideoEncoder->GetEncodedTrack(videoFrames);
|
||||
if (mAudioEncoder) {
|
||||
rv = WriteEncodedDataToMuxer(mAudioEncoder);
|
||||
LOG(LogLevel::Verbose,
|
||||
("Audio encoded TimeStamp = %f", GetEncodeTimeStamp()));
|
||||
if (NS_FAILED(rv)) {
|
||||
// Encoding might be canceled.
|
||||
LOG(LogLevel::Error, ("Failed to get encoded data from video encoder."));
|
||||
LOG(LogLevel::Warning, ("Failed to write encoded audio data to muxer"));
|
||||
return rv;
|
||||
}
|
||||
for (RefPtr<EncodedFrame>& frame : videoFrames) {
|
||||
mMuxer->AddEncodedVideoFrame(std::move(frame));
|
||||
}
|
||||
if (mVideoEncoder->IsEncodingComplete()) {
|
||||
mMuxer->VideoEndOfStream();
|
||||
}
|
||||
}
|
||||
|
||||
if (mAudioEncoder && !mAudioEncoder->IsEncodingComplete()) {
|
||||
nsTArray<RefPtr<EncodedFrame>> audioFrames;
|
||||
rv = mAudioEncoder->GetEncodedTrack(audioFrames);
|
||||
if (NS_FAILED(rv)) {
|
||||
// Encoding might be canceled.
|
||||
LOG(LogLevel::Error, ("Failed to get encoded data from audio encoder."));
|
||||
return rv;
|
||||
}
|
||||
for (RefPtr<EncodedFrame>& frame : audioFrames) {
|
||||
mMuxer->AddEncodedAudioFrame(std::move(frame));
|
||||
}
|
||||
if (mAudioEncoder->IsEncodingComplete()) {
|
||||
mMuxer->AudioEndOfStream();
|
||||
}
|
||||
}
|
||||
|
||||
// Second, get data from muxer. This will do the actual muxing.
|
||||
|
||||
rv = mMuxer->GetData(aOutputBufs);
|
||||
if (mMuxer->IsFinished()) {
|
||||
// In audio only or video only case, let unavailable track's flag to be
|
||||
// true.
|
||||
bool isAudioCompleted = !mAudioEncoder || mAudioEncoder->IsEncodingComplete();
|
||||
bool isVideoCompleted = !mVideoEncoder || mVideoEncoder->IsEncodingComplete();
|
||||
rv = mWriter->GetContainerData(
|
||||
aOutputBufs,
|
||||
isAudioCompleted && isVideoCompleted ? ContainerWriter::FLUSH_NEEDED : 0);
|
||||
if (mWriter->IsWritingComplete()) {
|
||||
mCompleted = true;
|
||||
Shutdown();
|
||||
}
|
||||
@ -812,9 +860,7 @@ nsresult MediaEncoder::GetEncodedData(
|
||||
LOG(LogLevel::Verbose,
|
||||
("END GetEncodedData TimeStamp=%f "
|
||||
"mCompleted=%d, aComplete=%d, vComplete=%d",
|
||||
GetEncodeTimeStamp(), mCompleted,
|
||||
!mAudioEncoder || mAudioEncoder->IsEncodingComplete(),
|
||||
!mVideoEncoder || mVideoEncoder->IsEncodingComplete()));
|
||||
GetEncodeTimeStamp(), mCompleted, isAudioCompleted, isVideoCompleted));
|
||||
|
||||
return rv;
|
||||
}
|
||||
@ -858,6 +904,64 @@ void MediaEncoder::Shutdown() {
|
||||
}
|
||||
}
|
||||
|
||||
nsresult MediaEncoder::WriteEncodedDataToMuxer(TrackEncoder* aTrackEncoder) {
|
||||
AUTO_PROFILER_LABEL("MediaEncoder::WriteEncodedDataToMuxer", OTHER);
|
||||
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
if (!aTrackEncoder) {
|
||||
NS_ERROR("No track encoder to get data from");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (aTrackEncoder->IsEncodingComplete()) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
EncodedFrameContainer encodedData;
|
||||
nsresult rv = aTrackEncoder->GetEncodedTrack(encodedData);
|
||||
if (NS_FAILED(rv)) {
|
||||
// Encoding might be canceled.
|
||||
LOG(LogLevel::Error, ("Failed to get encoded data from encoder."));
|
||||
SetError();
|
||||
return rv;
|
||||
}
|
||||
rv = mWriter->WriteEncodedTrack(
|
||||
encodedData,
|
||||
aTrackEncoder->IsEncodingComplete() ? ContainerWriter::END_OF_STREAM : 0);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error,
|
||||
("Failed to write encoded track to the media container."));
|
||||
SetError();
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
nsresult MediaEncoder::CopyMetadataToMuxer(TrackEncoder* aTrackEncoder) {
|
||||
AUTO_PROFILER_LABEL("MediaEncoder::CopyMetadataToMuxer", OTHER);
|
||||
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
if (!aTrackEncoder) {
|
||||
NS_ERROR("No track encoder to get metadata from");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
RefPtr<TrackMetadataBase> meta = aTrackEncoder->GetMetadata();
|
||||
if (meta == nullptr) {
|
||||
LOG(LogLevel::Error, ("metadata == null"));
|
||||
SetError();
|
||||
return NS_ERROR_ABORT;
|
||||
}
|
||||
|
||||
nsresult rv = mWriter->SetMetadata(meta);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, ("SetMetadata failed"));
|
||||
SetError();
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
bool MediaEncoder::IsShutdown() {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
return mShutdown;
|
||||
@ -936,11 +1040,6 @@ bool MediaEncoder::IsWebMEncoderEnabled() {
|
||||
}
|
||||
#endif
|
||||
|
||||
const nsString& MediaEncoder::MimeType() const {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
return mMIMEType;
|
||||
}
|
||||
|
||||
void MediaEncoder::NotifyInitialized() {
|
||||
MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn());
|
||||
|
||||
@ -1021,5 +1120,3 @@ void MediaEncoder::SetVideoKeyFrameInterval(int32_t aVideoKeyFrameInterval) {
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOG
|
||||
|
@ -8,7 +8,6 @@
|
||||
|
||||
#include "ContainerWriter.h"
|
||||
#include "CubebUtils.h"
|
||||
#include "MediaQueue.h"
|
||||
#include "MediaStreamGraph.h"
|
||||
#include "MediaStreamListener.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
@ -20,7 +19,6 @@
|
||||
namespace mozilla {
|
||||
|
||||
class DriftCompensator;
|
||||
class Muxer;
|
||||
class Runnable;
|
||||
class TaskQueue;
|
||||
|
||||
@ -78,21 +76,29 @@ class MediaEncoderListener {
|
||||
* been initialized and when there's data available.
|
||||
* => encoder->RegisterListener(listener);
|
||||
*
|
||||
* 3) When the MediaEncoderListener is notified that the MediaEncoder has
|
||||
* data available, we can encode data. This also encodes metadata on its
|
||||
* first invocation.
|
||||
* 3) Connect the MediaStreamTracks to be recorded.
|
||||
* => encoder->ConnectMediaStreamTrack(track);
|
||||
* This creates the corresponding TrackEncoder and connects the track and
|
||||
* the TrackEncoder through a track listener. This also starts encoding.
|
||||
*
|
||||
* 4) When the MediaEncoderListener is notified that the MediaEncoder is
|
||||
* initialized, we can encode metadata.
|
||||
* => encoder->GetEncodedMetadata(...);
|
||||
*
|
||||
* 5) When the MediaEncoderListener is notified that the MediaEncoder has
|
||||
* data available, we can encode data.
|
||||
* => encoder->GetEncodedData(...);
|
||||
*
|
||||
* 4) To stop encoding, there are multiple options:
|
||||
* 6) To stop encoding, there are multiple options:
|
||||
*
|
||||
* 4.1) Stop() for a graceful stop.
|
||||
* 6.1) Stop() for a graceful stop.
|
||||
* => encoder->Stop();
|
||||
*
|
||||
* 4.2) Cancel() for an immediate stop, if you don't need the data currently
|
||||
* 6.2) Cancel() for an immediate stop, if you don't need the data currently
|
||||
* buffered.
|
||||
* => encoder->Cancel();
|
||||
*
|
||||
* 4.3) When all input tracks end, the MediaEncoder will automatically stop
|
||||
* 6.3) When all input tracks end, the MediaEncoder will automatically stop
|
||||
* and shut down.
|
||||
*/
|
||||
class MediaEncoder {
|
||||
@ -151,12 +157,24 @@ class MediaEncoder {
|
||||
uint32_t aAudioBitrate, uint32_t aVideoBitrate, uint8_t aTrackTypes,
|
||||
TrackRate aTrackRate);
|
||||
|
||||
/**
|
||||
* Encodes raw metadata for all tracks to aOutputBufs. aMIMEType is the valid
|
||||
* mime-type for the returned container data. The buffer of container data is
|
||||
* allocated in ContainerWriter::GetContainerData().
|
||||
*
|
||||
* Should there be insufficient input data for either track encoder to infer
|
||||
* the metadata, or if metadata has already been encoded, we return an error
|
||||
* and the output arguments are undefined. Otherwise we return NS_OK.
|
||||
*/
|
||||
nsresult GetEncodedMetadata(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
nsAString& aMIMEType);
|
||||
/**
|
||||
* Encodes raw data for all tracks to aOutputBufs. The buffer of container
|
||||
* data is allocated in ContainerWriter::GetContainerData().
|
||||
*
|
||||
* On its first call, metadata is also encoded. TrackEncoders must have been
|
||||
* initialized before this is called.
|
||||
* This implies that metadata has already been encoded and that all track
|
||||
* encoders are still active. Should either implication break, we return an
|
||||
* error and the output argument is undefined. Otherwise we return NS_OK.
|
||||
*/
|
||||
nsresult GetEncodedData(nsTArray<nsTArray<uint8_t>>* aOutputBufs);
|
||||
|
||||
@ -178,8 +196,6 @@ class MediaEncoder {
|
||||
static bool IsWebMEncoderEnabled();
|
||||
#endif
|
||||
|
||||
const nsString& MimeType() const;
|
||||
|
||||
/**
|
||||
* Notifies listeners that this MediaEncoder has been initialized.
|
||||
*/
|
||||
@ -237,10 +253,15 @@ class MediaEncoder {
|
||||
*/
|
||||
void SetError();
|
||||
|
||||
// Get encoded data from trackEncoder and write to muxer
|
||||
nsresult WriteEncodedDataToMuxer(TrackEncoder* aTrackEncoder);
|
||||
// Get metadata from trackEncoder and copy to muxer
|
||||
nsresult CopyMetadataToMuxer(TrackEncoder* aTrackEncoder);
|
||||
|
||||
const RefPtr<TaskQueue> mEncoderThread;
|
||||
const RefPtr<DriftCompensator> mDriftCompensator;
|
||||
|
||||
UniquePtr<Muxer> mMuxer;
|
||||
UniquePtr<ContainerWriter> mWriter;
|
||||
RefPtr<AudioTrackEncoder> mAudioEncoder;
|
||||
RefPtr<AudioTrackListener> mAudioListener;
|
||||
RefPtr<VideoTrackEncoder> mVideoEncoder;
|
||||
@ -263,10 +284,10 @@ class MediaEncoder {
|
||||
// A video track that we are encoding. Will be null if the input stream
|
||||
// doesn't contain video on start() or if the input is an AudioNode.
|
||||
RefPtr<dom::VideoStreamTrack> mVideoTrack;
|
||||
|
||||
TimeStamp mStartTime;
|
||||
const nsString mMIMEType;
|
||||
nsString mMIMEType;
|
||||
bool mInitialized;
|
||||
bool mMetadataEncoded;
|
||||
bool mCompleted;
|
||||
bool mError;
|
||||
bool mCanceled;
|
||||
|
@ -1,209 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "Muxer.h"
|
||||
|
||||
#include "ContainerWriter.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
LazyLogModule gMuxerLog("Muxer");
|
||||
#define LOG(type, ...) MOZ_LOG(gMuxerLog, type, (__VA_ARGS__))
|
||||
|
||||
Muxer::Muxer(UniquePtr<ContainerWriter> aWriter)
|
||||
: mWriter(std::move(aWriter)) {}
|
||||
|
||||
bool Muxer::IsFinished() { return mWriter->IsWritingComplete(); }
|
||||
|
||||
nsresult Muxer::SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) {
|
||||
nsresult rv = mWriter->SetMetadata(aMetadata);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, "%p Setting metadata failed, tracks=%zu", this,
|
||||
aMetadata.Length());
|
||||
return rv;
|
||||
}
|
||||
|
||||
for (const auto& track : aMetadata) {
|
||||
switch (track->GetKind()) {
|
||||
case TrackMetadataBase::METADATA_OPUS: {
|
||||
// In the case of Opus we need to calculate the codec delay based on the
|
||||
// pre-skip. For more information see:
|
||||
// https://tools.ietf.org/html/rfc7845#section-4.2
|
||||
// Calculate offset in microseconds
|
||||
OpusMetadata* opusMeta = static_cast<OpusMetadata*>(track.get());
|
||||
mAudioCodecDelay = static_cast<uint64_t>(
|
||||
LittleEndian::readUint16(opusMeta->mIdHeader.Elements() + 10) *
|
||||
PR_USEC_PER_SEC / 48000);
|
||||
MOZ_FALLTHROUGH;
|
||||
}
|
||||
case TrackMetadataBase::METADATA_VORBIS:
|
||||
case TrackMetadataBase::METADATA_AAC:
|
||||
case TrackMetadataBase::METADATA_AMR:
|
||||
case TrackMetadataBase::METADATA_EVRC:
|
||||
MOZ_ASSERT(!mHasAudio, "Only one audio track supported");
|
||||
mHasAudio = true;
|
||||
break;
|
||||
case TrackMetadataBase::METADATA_VP8:
|
||||
MOZ_ASSERT(!mHasVideo, "Only one video track supported");
|
||||
mHasVideo = true;
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Unknown codec metadata");
|
||||
};
|
||||
}
|
||||
mMetadataSet = true;
|
||||
MOZ_ASSERT(mHasAudio || mHasVideo);
|
||||
if (!mHasAudio) {
|
||||
mEncodedAudioFrames.Finish();
|
||||
MOZ_ASSERT(mEncodedAudioFrames.AtEndOfStream());
|
||||
}
|
||||
if (!mHasVideo) {
|
||||
mEncodedVideoFrames.Finish();
|
||||
MOZ_ASSERT(mEncodedVideoFrames.AtEndOfStream());
|
||||
}
|
||||
LOG(LogLevel::Info, "%p Metadata set; audio=%d, video=%d", this, mHasAudio,
|
||||
mHasVideo);
|
||||
return rv;
|
||||
}
|
||||
|
||||
void Muxer::AddEncodedAudioFrame(RefPtr<EncodedFrame> aFrame) {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasAudio);
|
||||
if (aFrame->mFrameType == EncodedFrame::FrameType::OPUS_AUDIO_FRAME) {
|
||||
aFrame->mTime += mAudioCodecDelay;
|
||||
}
|
||||
mEncodedAudioFrames.Push(aFrame);
|
||||
}
|
||||
|
||||
void Muxer::AddEncodedVideoFrame(RefPtr<EncodedFrame> aFrame) {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasVideo);
|
||||
mEncodedVideoFrames.Push(aFrame);
|
||||
}
|
||||
|
||||
void Muxer::AudioEndOfStream() {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasAudio);
|
||||
mEncodedAudioFrames.Finish();
|
||||
}
|
||||
|
||||
void Muxer::VideoEndOfStream() {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasVideo);
|
||||
mEncodedVideoFrames.Finish();
|
||||
}
|
||||
|
||||
nsresult Muxer::GetData(nsTArray<nsTArray<uint8_t>>* aOutputBuffers) {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasAudio || mHasVideo);
|
||||
|
||||
nsresult rv;
|
||||
if (!mMetadataEncoded) {
|
||||
rv = mWriter->GetContainerData(aOutputBuffers, ContainerWriter::GET_HEADER);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, "%p Failed getting metadata from writer", this);
|
||||
return rv;
|
||||
}
|
||||
mMetadataEncoded = true;
|
||||
}
|
||||
|
||||
if (mEncodedAudioFrames.GetSize() == 0 &&
|
||||
mEncodedVideoFrames.GetSize() == 0) {
|
||||
// Nothing to mux.
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
rv = Mux();
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, "%p Failed muxing data into writer", this);
|
||||
return rv;
|
||||
}
|
||||
|
||||
MOZ_ASSERT_IF(
|
||||
mEncodedAudioFrames.IsFinished() && mEncodedVideoFrames.IsFinished(),
|
||||
mEncodedAudioFrames.AtEndOfStream());
|
||||
MOZ_ASSERT_IF(
|
||||
mEncodedAudioFrames.IsFinished() && mEncodedVideoFrames.IsFinished(),
|
||||
mEncodedVideoFrames.AtEndOfStream());
|
||||
uint32_t flags =
|
||||
mEncodedAudioFrames.AtEndOfStream() && mEncodedVideoFrames.AtEndOfStream()
|
||||
? ContainerWriter::FLUSH_NEEDED
|
||||
: 0;
|
||||
|
||||
return mWriter->GetContainerData(aOutputBuffers, flags);
|
||||
}
|
||||
|
||||
nsresult Muxer::Mux() {
|
||||
MOZ_ASSERT(mMetadataSet);
|
||||
MOZ_ASSERT(mHasAudio || mHasVideo);
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
// The times at which we expect our next video and audio frames. These are
|
||||
// based on the time + duration (GetEndTime()) of the last seen frames.
|
||||
// Assumes that the encoders write the correct duration for frames.;
|
||||
uint64_t expectedNextVideoTime = 0;
|
||||
uint64_t expectedNextAudioTime = 0;
|
||||
// Interleave frames until we're out of audio or video
|
||||
while (mEncodedVideoFrames.GetSize() > 0 &&
|
||||
mEncodedAudioFrames.GetSize() > 0) {
|
||||
RefPtr<EncodedFrame> videoFrame = mEncodedVideoFrames.PeekFront();
|
||||
RefPtr<EncodedFrame> audioFrame = mEncodedAudioFrames.PeekFront();
|
||||
// For any expected time our frames should occur at or after that time.
|
||||
MOZ_ASSERT(videoFrame->mTime >= expectedNextVideoTime);
|
||||
MOZ_ASSERT(audioFrame->mTime >= expectedNextAudioTime);
|
||||
if (videoFrame->mTime <= audioFrame->mTime) {
|
||||
expectedNextVideoTime = videoFrame->GetEndTime();
|
||||
RefPtr<EncodedFrame> frame = mEncodedVideoFrames.PopFront();
|
||||
frames.AppendElement(frame);
|
||||
} else {
|
||||
expectedNextAudioTime = audioFrame->GetEndTime();
|
||||
RefPtr<EncodedFrame> frame = mEncodedAudioFrames.PopFront();
|
||||
frames.AppendElement(frame);
|
||||
}
|
||||
}
|
||||
|
||||
// If we're out of audio we still may be able to add more video...
|
||||
if (mEncodedAudioFrames.GetSize() == 0) {
|
||||
while (mEncodedVideoFrames.GetSize() > 0) {
|
||||
if (!mEncodedAudioFrames.AtEndOfStream() &&
|
||||
mEncodedVideoFrames.PeekFront()->mTime > expectedNextAudioTime) {
|
||||
// Audio encoding is not complete and since the video frame comes
|
||||
// after our next audio frame we cannot safely add it.
|
||||
break;
|
||||
}
|
||||
frames.AppendElement(mEncodedVideoFrames.PopFront());
|
||||
}
|
||||
}
|
||||
|
||||
// If we're out of video we still may be able to add more audio...
|
||||
if (mEncodedVideoFrames.GetSize() == 0) {
|
||||
while (mEncodedAudioFrames.GetSize() > 0) {
|
||||
if (!mEncodedVideoFrames.AtEndOfStream() &&
|
||||
mEncodedAudioFrames.PeekFront()->mTime > expectedNextVideoTime) {
|
||||
// Video encoding is not complete and since the audio frame comes
|
||||
// after our next video frame we cannot safely add it.
|
||||
break;
|
||||
}
|
||||
frames.AppendElement(mEncodedAudioFrames.PopFront());
|
||||
}
|
||||
}
|
||||
|
||||
// If encoding is complete for both encoders we should signal end of stream,
|
||||
// otherwise we keep going.
|
||||
uint32_t flags =
|
||||
mEncodedVideoFrames.AtEndOfStream() && mEncodedAudioFrames.AtEndOfStream()
|
||||
? ContainerWriter::END_OF_STREAM
|
||||
: 0;
|
||||
nsresult rv = mWriter->WriteEncodedTrack(frames, flags);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(LogLevel::Error, "Error! Failed to write muxed data to the container");
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOG
|
@ -1,74 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef DOM_MEDIA_ENCODER_MUXER_H_
|
||||
#define DOM_MEDIA_ENCODER_MUXER_H_
|
||||
|
||||
#include "MediaQueue.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class ContainerWriter;
|
||||
|
||||
// Generic Muxer class that helps pace the output from track encoders to the
|
||||
// ContainerWriter, so time never appears to go backwards.
|
||||
// Note that the entire class is written for single threaded access.
|
||||
class Muxer {
|
||||
public:
|
||||
explicit Muxer(UniquePtr<ContainerWriter> aWriter);
|
||||
~Muxer() = default;
|
||||
|
||||
// Returns true when all tracks have ended, and all data has been muxed and
|
||||
// fetched.
|
||||
bool IsFinished();
|
||||
|
||||
// Returns true if this muxer has not been given metadata yet.
|
||||
bool NeedsMetadata() const { return !mMetadataSet; }
|
||||
|
||||
// Sets metadata for all tracks. This may only be called once.
|
||||
nsresult SetMetadata(const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata);
|
||||
|
||||
// Adds an encoded audio frame for muxing
|
||||
void AddEncodedAudioFrame(RefPtr<EncodedFrame> aFrame);
|
||||
|
||||
// Adds an encoded video frame for muxing
|
||||
void AddEncodedVideoFrame(RefPtr<EncodedFrame> aFrame);
|
||||
|
||||
// Marks the audio track as ended. Once all tracks for which we have metadata
|
||||
// have ended, GetData() will drain and the muxer will be marked as finished.
|
||||
void AudioEndOfStream();
|
||||
|
||||
// Marks the video track as ended. Once all tracks for which we have metadata
|
||||
// have ended, GetData() will drain and the muxer will be marked as finished.
|
||||
void VideoEndOfStream();
|
||||
|
||||
// Gets the data that has been muxed and written into the container so far.
|
||||
nsresult GetData(nsTArray<nsTArray<uint8_t>>* aOutputBuffers);
|
||||
|
||||
private:
|
||||
// Writes data in MediaQueues to the ContainerWriter.
|
||||
nsresult Mux();
|
||||
|
||||
// Audio frames that have been encoded and are pending write to the muxer.
|
||||
MediaQueue<EncodedFrame> mEncodedAudioFrames;
|
||||
// Video frames that have been encoded and are pending write to the muxer.
|
||||
MediaQueue<EncodedFrame> mEncodedVideoFrames;
|
||||
// The writer for the specific container we're recording into.
|
||||
UniquePtr<ContainerWriter> mWriter;
|
||||
// How much each audio time stamp should be delayed in microseconds. Used to
|
||||
// adjust for opus codec delay.
|
||||
uint64_t mAudioCodecDelay = 0;
|
||||
// True once metadata has been set in the muxer.
|
||||
bool mMetadataSet = false;
|
||||
// True once metadata has been written to file.
|
||||
bool mMetadataEncoded = false;
|
||||
// True if metadata is set and contains an audio track.
|
||||
bool mHasAudio = false;
|
||||
// True if metadata is set and contains a video track.
|
||||
bool mHasVideo = false;
|
||||
};
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <opus/opus.h>
|
||||
|
||||
#undef LOG
|
||||
#define LOG(args, ...)
|
||||
|
||||
namespace mozilla {
|
||||
@ -227,8 +228,7 @@ already_AddRefed<TrackMetadataBase> OpusTrackEncoder::GetMetadata() {
|
||||
return meta.forget();
|
||||
}
|
||||
|
||||
nsresult OpusTrackEncoder::GetEncodedTrack(
|
||||
nsTArray<RefPtr<EncodedFrame>>& aData) {
|
||||
nsresult OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) {
|
||||
AUTO_PROFILER_LABEL("OpusTrackEncoder::GetEncodedTrack", OTHER);
|
||||
|
||||
MOZ_ASSERT(mInitialized || mCanceled);
|
||||
@ -325,7 +325,7 @@ nsresult OpusTrackEncoder::GetEncodedTrack(
|
||||
MOZ_ASSERT(frameCopied <= 3844, "frameCopied exceeded expected range");
|
||||
|
||||
RefPtr<EncodedFrame> audiodata = new EncodedFrame();
|
||||
audiodata->mFrameType = EncodedFrame::OPUS_AUDIO_FRAME;
|
||||
audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME);
|
||||
int framesInPCM = frameCopied;
|
||||
if (mResampler) {
|
||||
AutoTArray<AudioDataValue, 9600> resamplingDest;
|
||||
@ -367,10 +367,10 @@ nsresult OpusTrackEncoder::GetEncodedTrack(
|
||||
mResampledLeftover.Length());
|
||||
// This is always at 48000Hz.
|
||||
framesInPCM = framesLeft + outframesToCopy;
|
||||
audiodata->mDuration = framesInPCM;
|
||||
audiodata->SetDuration(framesInPCM);
|
||||
} else {
|
||||
// The ogg time stamping and pre-skip is always timed at 48000.
|
||||
audiodata->mDuration = frameCopied * (kOpusSamplingRate / mSamplingRate);
|
||||
audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate));
|
||||
}
|
||||
|
||||
// Remove the raw data which has been pulled to pcm buffer.
|
||||
@ -422,16 +422,14 @@ nsresult OpusTrackEncoder::GetEncodedTrack(
|
||||
|
||||
audiodata->SwapInFrameData(frameData);
|
||||
// timestamp should be the time of the first sample
|
||||
audiodata->mTime = mOutputTimeStamp;
|
||||
audiodata->SetTimeStamp(mOutputTimeStamp);
|
||||
mOutputTimeStamp +=
|
||||
FramesToUsecs(GetPacketDuration(), kOpusSamplingRate).value();
|
||||
LOG("[Opus] mOutputTimeStamp %lld.", mOutputTimeStamp);
|
||||
aData.AppendElement(audiodata);
|
||||
aData.AppendEncodedFrame(audiodata);
|
||||
}
|
||||
|
||||
return result >= 0 ? NS_OK : NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOG
|
||||
|
@ -33,7 +33,7 @@ class OpusTrackEncoder : public AudioTrackEncoder {
|
||||
|
||||
already_AddRefed<TrackMetadataBase> GetMetadata() override;
|
||||
|
||||
nsresult GetEncodedTrack(nsTArray<RefPtr<EncodedFrame>>& aData) override;
|
||||
nsresult GetEncodedTrack(EncodedFrameContainer& aData) override;
|
||||
|
||||
protected:
|
||||
int GetPacketDuration() override;
|
||||
|
@ -759,5 +759,3 @@ void VideoTrackEncoder::SetKeyFrameInterval(int32_t aKeyFrameInterval) {
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef TRACK_LOG
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define TrackEncoder_h_
|
||||
|
||||
#include "AudioSegment.h"
|
||||
#include "EncodedFrame.h"
|
||||
#include "EncodedFrameContainer.h"
|
||||
#include "MediaStreamGraph.h"
|
||||
#include "StreamTracks.h"
|
||||
#include "TrackMetadataBase.h"
|
||||
@ -82,7 +82,7 @@ class TrackEncoder {
|
||||
* Encodes raw segments. Result data is returned in aData, and called on the
|
||||
* worker thread.
|
||||
*/
|
||||
virtual nsresult GetEncodedTrack(nsTArray<RefPtr<EncodedFrame>>& aData) = 0;
|
||||
virtual nsresult GetEncodedTrack(EncodedFrameContainer& aData) = 0;
|
||||
|
||||
/**
|
||||
* Returns true once this TrackEncoder is initialized.
|
||||
|
@ -220,8 +220,7 @@ already_AddRefed<TrackMetadataBase> VP8TrackEncoder::GetMetadata() {
|
||||
return meta.forget();
|
||||
}
|
||||
|
||||
nsresult VP8TrackEncoder::GetEncodedPartitions(
|
||||
nsTArray<RefPtr<EncodedFrame>>& aData) {
|
||||
nsresult VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) {
|
||||
vpx_codec_iter_t iter = nullptr;
|
||||
EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
|
||||
nsTArray<uint8_t> frameData;
|
||||
@ -250,7 +249,7 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(
|
||||
if (!frameData.IsEmpty()) {
|
||||
// Copy the encoded data to aData.
|
||||
EncodedFrame* videoData = new EncodedFrame();
|
||||
videoData->mFrameType = frameType;
|
||||
videoData->SetFrameType(frameType);
|
||||
|
||||
// Convert the timestamp and duration to Usecs.
|
||||
CheckedInt64 timestamp = FramesToUsecs(pkt->data.frame.pts, mTrackRate);
|
||||
@ -258,7 +257,7 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(
|
||||
NS_ERROR("Microsecond timestamp overflow");
|
||||
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
|
||||
}
|
||||
videoData->mTime = (uint64_t)timestamp.value();
|
||||
videoData->SetTimeStamp((uint64_t)timestamp.value());
|
||||
|
||||
mExtractedDuration += pkt->data.frame.duration;
|
||||
if (!mExtractedDuration.isValid()) {
|
||||
@ -280,13 +279,14 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(
|
||||
}
|
||||
|
||||
mExtractedDurationUs = totalDuration;
|
||||
videoData->mDuration = (uint64_t)duration.value();
|
||||
videoData->SetDuration((uint64_t)duration.value());
|
||||
videoData->SwapInFrameData(frameData);
|
||||
VP8LOG(LogLevel::Verbose,
|
||||
"GetEncodedPartitions TimeStamp %" PRIu64 ", Duration %" PRIu64
|
||||
", FrameType %d",
|
||||
videoData->mTime, videoData->mDuration, videoData->mFrameType);
|
||||
aData.AppendElement(videoData);
|
||||
videoData->GetTimeStamp(), videoData->GetDuration(),
|
||||
videoData->GetFrameType());
|
||||
aData.AppendEncodedFrame(videoData);
|
||||
}
|
||||
|
||||
return pkt ? NS_OK : NS_ERROR_NOT_AVAILABLE;
|
||||
@ -441,8 +441,7 @@ VP8TrackEncoder::EncodeOperation VP8TrackEncoder::GetNextEncodeOperation(
|
||||
* encode it.
|
||||
* 4. Remove the encoded chunks in mSourceSegment after for-loop.
|
||||
*/
|
||||
nsresult VP8TrackEncoder::GetEncodedTrack(
|
||||
nsTArray<RefPtr<EncodedFrame>>& aData) {
|
||||
nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) {
|
||||
AUTO_PROFILER_LABEL("VP8TrackEncoder::GetEncodedTrack", OTHER);
|
||||
|
||||
MOZ_ASSERT(mInitialized || mCanceled);
|
||||
@ -510,7 +509,7 @@ nsresult VP8TrackEncoder::GetEncodedTrack(
|
||||
// because this frame will be skipped.
|
||||
VP8LOG(LogLevel::Warning,
|
||||
"MediaRecorder lagging behind. Skipping a frame.");
|
||||
RefPtr<EncodedFrame> last = aData.LastElement();
|
||||
RefPtr<EncodedFrame> last = aData.GetEncodedFrames().LastElement();
|
||||
if (last) {
|
||||
mExtractedDuration += chunk.mDuration;
|
||||
if (!mExtractedDuration.isValid()) {
|
||||
@ -526,7 +525,8 @@ nsresult VP8TrackEncoder::GetEncodedTrack(
|
||||
NS_ERROR("skipped duration overflow");
|
||||
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
|
||||
}
|
||||
last->mDuration += static_cast<uint64_t>(skippedDuration.value());
|
||||
last->SetDuration(last->GetDuration() +
|
||||
(static_cast<uint64_t>(skippedDuration.value())));
|
||||
}
|
||||
}
|
||||
|
||||
@ -570,5 +570,3 @@ nsresult VP8TrackEncoder::GetEncodedTrack(
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef VP8LOG
|
||||
|
@ -34,7 +34,7 @@ class VP8TrackEncoder : public VideoTrackEncoder {
|
||||
|
||||
already_AddRefed<TrackMetadataBase> GetMetadata() final;
|
||||
|
||||
nsresult GetEncodedTrack(nsTArray<RefPtr<EncodedFrame>>& aData) final;
|
||||
nsresult GetEncodedTrack(EncodedFrameContainer& aData) final;
|
||||
|
||||
protected:
|
||||
nsresult Init(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
|
||||
@ -50,7 +50,7 @@ class VP8TrackEncoder : public VideoTrackEncoder {
|
||||
// null for EOS detection.
|
||||
// NS_OK if some data was appended to aData.
|
||||
// An error nsresult otherwise.
|
||||
nsresult GetEncodedPartitions(nsTArray<RefPtr<EncodedFrame>>& aData);
|
||||
nsresult GetEncodedPartitions(EncodedFrameContainer& aData);
|
||||
|
||||
// Prepare the input data to the mVPXImageWrapper for encoding.
|
||||
nsresult PrepareRawFrame(VideoChunk& aChunk);
|
||||
|
@ -9,7 +9,7 @@ with Files('*'):
|
||||
|
||||
EXPORTS += [
|
||||
'ContainerWriter.h',
|
||||
'EncodedFrame.h',
|
||||
'EncodedFrameContainer.h',
|
||||
'MediaEncoder.h',
|
||||
'OpusTrackEncoder.h',
|
||||
'TrackEncoder.h',
|
||||
@ -18,7 +18,6 @@ EXPORTS += [
|
||||
|
||||
UNIFIED_SOURCES += [
|
||||
'MediaEncoder.cpp',
|
||||
'Muxer.cpp',
|
||||
'OpusTrackEncoder.cpp',
|
||||
'TrackEncoder.cpp',
|
||||
]
|
||||
|
@ -1,27 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "AudioGenerator.h"
|
||||
|
||||
#include "AudioSegment.h"
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
AudioGenerator::AudioGenerator(int32_t aChannels, int32_t aSampleRate)
|
||||
: mGenerator(aSampleRate, 1000), mChannels(aChannels) {}
|
||||
|
||||
void AudioGenerator::Generate(AudioSegment& aSegment, const int32_t& aSamples) {
|
||||
RefPtr<SharedBuffer> buffer =
|
||||
SharedBuffer::Create(aSamples * sizeof(int16_t));
|
||||
int16_t* dest = static_cast<int16_t*>(buffer->Data());
|
||||
mGenerator.generate(dest, aSamples);
|
||||
AutoTArray<const int16_t*, 1> channels;
|
||||
for (int32_t i = 0; i < mChannels; i++) {
|
||||
channels.AppendElement(dest);
|
||||
}
|
||||
aSegment.AppendFrames(buffer.forget(), channels, aSamples,
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
|
||||
#define DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
|
||||
|
||||
#include "prtime.h"
|
||||
#include "SineWaveGenerator.h"
|
||||
|
||||
namespace mozilla {
|
||||
class AudioSegment;
|
||||
}
|
||||
|
||||
class AudioGenerator {
|
||||
public:
|
||||
AudioGenerator(int32_t aChannels, int32_t aSampleRate);
|
||||
void Generate(mozilla::AudioSegment& aSegment, const int32_t& aSamples);
|
||||
|
||||
private:
|
||||
mozilla::SineWaveGenerator mGenerator;
|
||||
const int32_t mChannels;
|
||||
};
|
||||
|
||||
#endif // DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_
|
@ -5,11 +5,33 @@
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "OpusTrackEncoder.h"
|
||||
|
||||
#include "AudioGenerator.h"
|
||||
#include "SineWaveGenerator.h"
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
class AudioGenerator {
|
||||
public:
|
||||
AudioGenerator(int32_t aChannels, int32_t aSampleRate)
|
||||
: mGenerator(aSampleRate, 1000), mChannels(aChannels) {}
|
||||
|
||||
void Generate(AudioSegment& aSegment, const int32_t& aSamples) {
|
||||
RefPtr<SharedBuffer> buffer =
|
||||
SharedBuffer::Create(aSamples * sizeof(int16_t));
|
||||
int16_t* dest = static_cast<int16_t*>(buffer->Data());
|
||||
mGenerator.generate(dest, aSamples);
|
||||
AutoTArray<const int16_t*, 1> channels;
|
||||
for (int32_t i = 0; i < mChannels; i++) {
|
||||
channels.AppendElement(dest);
|
||||
}
|
||||
aSegment.AppendFrames(buffer.forget(), channels, aSamples,
|
||||
PRINCIPAL_HANDLE_NONE);
|
||||
}
|
||||
|
||||
private:
|
||||
SineWaveGenerator mGenerator;
|
||||
const int32_t mChannels;
|
||||
};
|
||||
|
||||
class TestOpusTrackEncoder : public OpusTrackEncoder {
|
||||
public:
|
||||
TestOpusTrackEncoder() : OpusTrackEncoder(90000) {}
|
||||
@ -201,13 +223,13 @@ TEST(OpusAudioTrackEncoder, FrameEncode)
|
||||
|
||||
encoder.AppendAudioSegment(std::move(segment));
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
// Verify that encoded data is 5 seconds long.
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
// 44100 as used above gets resampled to 48000 for opus.
|
||||
const uint64_t five = 48000 * 5;
|
||||
|
@ -1,210 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ContainerWriter.h"
|
||||
#include "EncodedFrame.h"
|
||||
#include "gtest/gtest.h"
|
||||
#include "gmock/gmock.h"
|
||||
#include "Muxer.h"
|
||||
#include "OpusTrackEncoder.h"
|
||||
#include "WebMWriter.h"
|
||||
|
||||
using namespace mozilla;
|
||||
using testing::_;
|
||||
using testing::ElementsAre;
|
||||
using testing::Return;
|
||||
using testing::StaticAssertTypeEq;
|
||||
using testing::internal::StlContainerView;
|
||||
|
||||
static RefPtr<TrackMetadataBase> CreateOpusMetadata(int32_t aChannels,
|
||||
float aSamplingFrequency,
|
||||
size_t aIdHeaderSize,
|
||||
size_t aCommentHeaderSize) {
|
||||
auto opusMetadata = MakeRefPtr<OpusMetadata>();
|
||||
opusMetadata->mChannels = aChannels;
|
||||
opusMetadata->mSamplingFrequency = aSamplingFrequency;
|
||||
opusMetadata->mIdHeader.SetLength(aIdHeaderSize);
|
||||
for (size_t i = 0; i < opusMetadata->mIdHeader.Length(); i++) {
|
||||
opusMetadata->mIdHeader[i] = 0;
|
||||
}
|
||||
opusMetadata->mCommentHeader.SetLength(aCommentHeaderSize);
|
||||
for (size_t i = 0; i < opusMetadata->mCommentHeader.Length(); i++) {
|
||||
opusMetadata->mCommentHeader[i] = 0;
|
||||
}
|
||||
return opusMetadata;
|
||||
}
|
||||
|
||||
static RefPtr<TrackMetadataBase> CreateVP8Metadata(int32_t aWidth,
|
||||
int32_t aHeight) {
|
||||
auto vp8Metadata = MakeRefPtr<VP8Metadata>();
|
||||
vp8Metadata->mWidth = aWidth;
|
||||
vp8Metadata->mDisplayWidth = aWidth;
|
||||
vp8Metadata->mHeight = aHeight;
|
||||
vp8Metadata->mDisplayHeight = aHeight;
|
||||
return vp8Metadata;
|
||||
}
|
||||
|
||||
static RefPtr<EncodedFrame> CreateFrame(EncodedFrame::FrameType aType,
|
||||
uint64_t aTimeUs, uint64_t aDurationUs,
|
||||
size_t aDataSize) {
|
||||
auto frame = MakeRefPtr<EncodedFrame>();
|
||||
frame->mTime = aTimeUs;
|
||||
if (aType == EncodedFrame::OPUS_AUDIO_FRAME) {
|
||||
// Opus duration is in samples, so figure out how many samples will put us
|
||||
// closest to aDurationUs without going over.
|
||||
frame->mDuration = UsecsToFrames(aDurationUs, 48000).value();
|
||||
} else {
|
||||
frame->mDuration = aDurationUs;
|
||||
}
|
||||
frame->mFrameType = aType;
|
||||
|
||||
nsTArray<uint8_t> data;
|
||||
data.SetLength(aDataSize);
|
||||
frame->SwapInFrameData(data);
|
||||
return frame;
|
||||
}
|
||||
|
||||
// This makes the googletest framework treat nsTArray as an std::vector, so all
|
||||
// the regular Matchers (like ElementsAre) work for it.
|
||||
template <typename Element>
|
||||
class StlContainerView<nsTArray<Element>> {
|
||||
public:
|
||||
typedef GTEST_REMOVE_CONST_(Element) RawElement;
|
||||
typedef std::vector<RawElement> type;
|
||||
typedef const type const_reference;
|
||||
static const_reference ConstReference(const nsTArray<Element>& aContainer) {
|
||||
StaticAssertTypeEq<Element, RawElement>();
|
||||
return type(aContainer.begin(), aContainer.end());
|
||||
}
|
||||
static type Copy(const nsTArray<Element>& aContainer) {
|
||||
return type(aContainer.begin(), aContainer.end());
|
||||
}
|
||||
};
|
||||
|
||||
class MockContainerWriter : public ContainerWriter {
|
||||
public:
|
||||
MOCK_METHOD2(WriteEncodedTrack,
|
||||
nsresult(const nsTArray<RefPtr<EncodedFrame>>&, uint32_t));
|
||||
MOCK_METHOD1(SetMetadata,
|
||||
nsresult(const nsTArray<RefPtr<TrackMetadataBase>>&));
|
||||
MOCK_METHOD0(IsWritingComplete, bool());
|
||||
MOCK_METHOD2(GetContainerData,
|
||||
nsresult(nsTArray<nsTArray<uint8_t>>*, uint32_t));
|
||||
};
|
||||
|
||||
TEST(MuxerTest, AudioOnly)
|
||||
{
|
||||
MockContainerWriter* writer = new MockContainerWriter();
|
||||
Muxer muxer(WrapUnique<ContainerWriter>(writer));
|
||||
|
||||
// Prepare data
|
||||
|
||||
auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
|
||||
auto audioFrame = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48000, 4096);
|
||||
|
||||
// Expectations
|
||||
|
||||
EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta)))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(audioFrame),
|
||||
ContainerWriter::END_OF_STREAM))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
|
||||
|
||||
// Test
|
||||
|
||||
EXPECT_EQ(muxer.SetMetadata(nsTArray<RefPtr<TrackMetadataBase>>({opusMeta})),
|
||||
NS_OK);
|
||||
muxer.AddEncodedAudioFrame(audioFrame);
|
||||
muxer.AudioEndOfStream();
|
||||
nsTArray<nsTArray<uint8_t>> buffers;
|
||||
EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
|
||||
}
|
||||
|
||||
TEST(MuxerTest, AudioVideo)
|
||||
{
|
||||
MockContainerWriter* writer = new MockContainerWriter();
|
||||
Muxer muxer(WrapUnique<ContainerWriter>(writer));
|
||||
|
||||
// Prepare data
|
||||
|
||||
auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
|
||||
auto vp8Meta = CreateVP8Metadata(640, 480);
|
||||
auto audioFrame = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48000, 4096);
|
||||
auto videoFrame = CreateFrame(EncodedFrame::VP8_I_FRAME, 0, 50000, 65536);
|
||||
|
||||
// Expectations
|
||||
|
||||
EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta)))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(videoFrame, audioFrame),
|
||||
ContainerWriter::END_OF_STREAM))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
|
||||
|
||||
// Test
|
||||
|
||||
EXPECT_EQ(muxer.SetMetadata(
|
||||
nsTArray<RefPtr<TrackMetadataBase>>({opusMeta, vp8Meta})),
|
||||
NS_OK);
|
||||
muxer.AddEncodedAudioFrame(audioFrame);
|
||||
muxer.AudioEndOfStream();
|
||||
muxer.AddEncodedVideoFrame(videoFrame);
|
||||
muxer.VideoEndOfStream();
|
||||
nsTArray<nsTArray<uint8_t>> buffers;
|
||||
EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
|
||||
}
|
||||
|
||||
TEST(MuxerTest, AudioVideoOutOfOrder)
|
||||
{
|
||||
MockContainerWriter* writer = new MockContainerWriter();
|
||||
Muxer muxer(WrapUnique<ContainerWriter>(writer));
|
||||
|
||||
// Prepare data
|
||||
|
||||
auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16);
|
||||
auto vp8Meta = CreateVP8Metadata(640, 480);
|
||||
auto a0 = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48, 4096);
|
||||
auto v0 = CreateFrame(EncodedFrame::VP8_I_FRAME, 0, 50, 65536);
|
||||
auto a48 = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 48, 48, 4096);
|
||||
auto v50 = CreateFrame(EncodedFrame::VP8_I_FRAME, 50, 50, 65536);
|
||||
|
||||
// Expectations
|
||||
|
||||
EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta)))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(v0, a0, a48, v50),
|
||||
ContainerWriter::END_OF_STREAM))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED))
|
||||
.WillOnce(Return(NS_OK));
|
||||
EXPECT_CALL(*writer, IsWritingComplete()).Times(0);
|
||||
|
||||
// Test
|
||||
|
||||
EXPECT_EQ(muxer.SetMetadata(
|
||||
nsTArray<RefPtr<TrackMetadataBase>>({opusMeta, vp8Meta})),
|
||||
NS_OK);
|
||||
muxer.AddEncodedAudioFrame(a0);
|
||||
muxer.AddEncodedVideoFrame(v0);
|
||||
muxer.AddEncodedVideoFrame(v50);
|
||||
muxer.VideoEndOfStream();
|
||||
muxer.AddEncodedAudioFrame(a48);
|
||||
muxer.AudioEndOfStream();
|
||||
nsTArray<nsTArray<uint8_t>> buffers;
|
||||
EXPECT_EQ(muxer.GetData(&buffers), NS_OK);
|
||||
}
|
@ -143,8 +143,8 @@ TEST(VP8VideoTrackEncoder, FrameEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(images.Length()));
|
||||
|
||||
// Pull Encoded Data back from encoder.
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
}
|
||||
|
||||
// Test that encoding a single frame gives useful output.
|
||||
@ -165,20 +165,21 @@ TEST(VP8VideoTrackEncoder, SingleFrameEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Read out encoded data, and verify.
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
const size_t oneElement = 1;
|
||||
ASSERT_EQ(oneElement, frames.Length());
|
||||
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType)
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType())
|
||||
<< "We only have one frame, so it should be a keyframe";
|
||||
|
||||
const uint64_t halfSecond = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(halfSecond, frames[0]->mDuration);
|
||||
EXPECT_EQ(halfSecond, frames[0]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that encoding a couple of identical images gives useful output.
|
||||
@ -203,15 +204,15 @@ TEST(VP8VideoTrackEncoder, SameFrameEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.5));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify total duration being 1.5s.
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t oneAndAHalf = (PR_USEC_PER_SEC / 2) * 3;
|
||||
EXPECT_EQ(oneAndAHalf, totalDuration);
|
||||
@ -239,15 +240,15 @@ TEST(VP8VideoTrackEncoder, SkippedFrames)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(100));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify total duration being 100 * 1ms = 100ms.
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t hundredMillis = PR_USEC_PER_SEC / 10;
|
||||
EXPECT_EQ(hundredMillis, totalDuration);
|
||||
@ -281,15 +282,15 @@ TEST(VP8VideoTrackEncoder, RoundingErrorFramesEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify total duration being 1s.
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t oneSecond = PR_USEC_PER_SEC;
|
||||
EXPECT_EQ(oneSecond, totalDuration);
|
||||
@ -318,8 +319,8 @@ TEST(VP8VideoTrackEncoder, TimestampFrameEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
@ -330,9 +331,9 @@ TEST(VP8VideoTrackEncoder, TimestampFrameEncode)
|
||||
(PR_USEC_PER_SEC / 10)};
|
||||
uint64_t totalDuration = 0;
|
||||
size_t i = 0;
|
||||
for (auto& frame : frames) {
|
||||
EXPECT_EQ(expectedDurations[i++], frame->mDuration);
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
EXPECT_EQ(expectedDurations[i++], frame->GetDuration());
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3;
|
||||
EXPECT_EQ(pointThree, totalDuration);
|
||||
@ -367,8 +368,8 @@ TEST(VP8VideoTrackEncoder, DriftingFrameEncode)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
@ -379,9 +380,9 @@ TEST(VP8VideoTrackEncoder, DriftingFrameEncode)
|
||||
(PR_USEC_PER_SEC / 10) * 2};
|
||||
uint64_t totalDuration = 0;
|
||||
size_t i = 0;
|
||||
for (auto& frame : frames) {
|
||||
EXPECT_EQ(expectedDurations[i++], frame->mDuration);
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
EXPECT_EQ(expectedDurations[i++], frame->GetDuration());
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t pointSix = (PR_USEC_PER_SEC / 10) * 6;
|
||||
EXPECT_EQ(pointSix, totalDuration);
|
||||
@ -432,18 +433,18 @@ TEST(VP8VideoTrackEncoder, Suspended)
|
||||
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify that we have two encoded frames and a total duration of 0.2s.
|
||||
const uint64_t two = 2;
|
||||
EXPECT_EQ(two, frames.Length());
|
||||
EXPECT_EQ(two, container.GetEncodedFrames().Length());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t pointTwo = (PR_USEC_PER_SEC / 10) * 2;
|
||||
EXPECT_EQ(pointTwo, totalDuration);
|
||||
@ -482,18 +483,18 @@ TEST(VP8VideoTrackEncoder, SuspendedUntilEnd)
|
||||
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify that we have one encoded frames and a total duration of 0.1s.
|
||||
const uint64_t one = 1;
|
||||
EXPECT_EQ(one, frames.Length());
|
||||
EXPECT_EQ(one, container.GetEncodedFrames().Length());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t pointOne = PR_USEC_PER_SEC / 10;
|
||||
EXPECT_EQ(pointOne, totalDuration);
|
||||
@ -521,14 +522,14 @@ TEST(VP8VideoTrackEncoder, AlwaysSuspended)
|
||||
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify that we have no encoded frames.
|
||||
const uint64_t none = 0;
|
||||
EXPECT_EQ(none, frames.Length());
|
||||
EXPECT_EQ(none, container.GetEncodedFrames().Length());
|
||||
}
|
||||
|
||||
// Test that encoding a track that is suspended in the beginning works.
|
||||
@ -565,18 +566,18 @@ TEST(VP8VideoTrackEncoder, SuspendedBeginning)
|
||||
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify that we have one encoded frames and a total duration of 0.1s.
|
||||
const uint64_t one = 1;
|
||||
EXPECT_EQ(one, frames.Length());
|
||||
EXPECT_EQ(one, container.GetEncodedFrames().Length());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t half = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(half, totalDuration);
|
||||
@ -618,18 +619,18 @@ TEST(VP8VideoTrackEncoder, SuspendedOverlap)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
// Verify that we have two encoded frames and a total duration of 0.1s.
|
||||
const uint64_t two = 2;
|
||||
ASSERT_EQ(two, frames.Length());
|
||||
ASSERT_EQ(two, container.GetEncodedFrames().Length());
|
||||
const uint64_t pointFive = (PR_USEC_PER_SEC / 10) * 5;
|
||||
EXPECT_EQ(pointFive, frames[0]->mDuration);
|
||||
EXPECT_EQ(pointFive, container.GetEncodedFrames()[0]->GetDuration());
|
||||
const uint64_t pointSeven = (PR_USEC_PER_SEC / 10) * 7;
|
||||
EXPECT_EQ(pointSeven, frames[1]->mDuration);
|
||||
EXPECT_EQ(pointSeven, container.GetEncodedFrames()[1]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that ending a track in the middle of already pushed data works.
|
||||
@ -650,14 +651,14 @@ TEST(VP8VideoTrackEncoder, PrematureEnding)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t half = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(half, totalDuration);
|
||||
@ -682,14 +683,14 @@ TEST(VP8VideoTrackEncoder, DelayedStart)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t half = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(half, totalDuration);
|
||||
@ -715,14 +716,14 @@ TEST(VP8VideoTrackEncoder, DelayedStartOtherEventOrder)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t half = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(half, totalDuration);
|
||||
@ -747,14 +748,14 @@ TEST(VP8VideoTrackEncoder, VeryDelayedStart)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(10.5));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t half = PR_USEC_PER_SEC / 2;
|
||||
EXPECT_EQ(half, totalDuration);
|
||||
@ -784,34 +785,34 @@ TEST(VP8VideoTrackEncoder, LongFramesReEncoded)
|
||||
{
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.5));
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_FALSE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t oneSec = PR_USEC_PER_SEC;
|
||||
EXPECT_EQ(oneSec, totalDuration);
|
||||
EXPECT_EQ(1U, frames.Length());
|
||||
EXPECT_EQ(1U, container.GetEncodedFrames().Length());
|
||||
}
|
||||
|
||||
{
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(11));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
uint64_t totalDuration = 0;
|
||||
for (auto& frame : frames) {
|
||||
totalDuration += frame->mDuration;
|
||||
for (auto& frame : container.GetEncodedFrames()) {
|
||||
totalDuration += frame->GetDuration();
|
||||
}
|
||||
const uint64_t tenSec = PR_USEC_PER_SEC * 10;
|
||||
EXPECT_EQ(tenSec, totalDuration);
|
||||
EXPECT_EQ(10U, frames.Length());
|
||||
EXPECT_EQ(10U, container.GetEncodedFrames().Length());
|
||||
}
|
||||
}
|
||||
|
||||
@ -852,36 +853,37 @@ TEST(VP8VideoTrackEncoder, ShortKeyFrameInterval)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.2));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(6UL, frames.Length());
|
||||
|
||||
// [0, 400ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 400UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 400UL, frames[0]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType());
|
||||
|
||||
// [400ms, 600ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[1]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType());
|
||||
|
||||
// [600ms, 750ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[2]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[2]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[2]->GetFrameType());
|
||||
|
||||
// [750ms, 900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[3]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[3]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->GetFrameType());
|
||||
|
||||
// [900ms, 1100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType());
|
||||
|
||||
// [1100ms, 1200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType());
|
||||
}
|
||||
|
||||
// Test that an encoding with a defined key frame interval encodes keyframes
|
||||
@ -921,36 +923,37 @@ TEST(VP8VideoTrackEncoder, LongKeyFrameInterval)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2.2));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(6UL, frames.Length());
|
||||
|
||||
// [0, 600ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType());
|
||||
|
||||
// [600ms, 900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType());
|
||||
|
||||
// [900ms, 1100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType());
|
||||
|
||||
// [1100ms, 1900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->GetFrameType());
|
||||
|
||||
// [1900ms, 2100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType());
|
||||
|
||||
// [2100ms, 2200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType());
|
||||
}
|
||||
|
||||
// Test that an encoding with no defined key frame interval encodes keyframes
|
||||
@ -988,36 +991,37 @@ TEST(VP8VideoTrackEncoder, DefaultKeyFrameInterval)
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2.2));
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(6UL, frames.Length());
|
||||
|
||||
// [0, 600ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType());
|
||||
|
||||
// [600ms, 900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType());
|
||||
|
||||
// [900ms, 1100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType());
|
||||
|
||||
// [1100ms, 1900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->GetFrameType());
|
||||
|
||||
// [1900ms, 2100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType());
|
||||
|
||||
// [2100ms, 2200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType());
|
||||
}
|
||||
|
||||
// Test that an encoding where the key frame interval is updated dynamically
|
||||
@ -1027,7 +1031,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
// Set keyframe interval to 100ms.
|
||||
@ -1076,7 +1080,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges)
|
||||
// Advancing 501ms, so the first bit of the frame starting at 500ms is
|
||||
// included.
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(501));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
{
|
||||
VideoSegment segment;
|
||||
@ -1102,7 +1106,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges)
|
||||
|
||||
// Advancing 2000ms from 501ms to 2501ms
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(2501));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
{
|
||||
VideoSegment segment;
|
||||
@ -1126,67 +1130,68 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges)
|
||||
|
||||
encoder.NotifyEndOfStream();
|
||||
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(14UL, frames.Length());
|
||||
|
||||
// [0, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType());
|
||||
|
||||
// [100ms, 120ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 20UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[1]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 20UL, frames[1]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[1]->GetFrameType());
|
||||
|
||||
// [120ms, 130ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 10UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 10UL, frames[2]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType());
|
||||
|
||||
// [130ms, 200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 70UL, frames[3]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 70UL, frames[3]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->GetFrameType());
|
||||
|
||||
// [200ms, 300ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[4]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[4]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[4]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[4]->GetFrameType());
|
||||
|
||||
// [300ms, 500ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[5]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[5]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType());
|
||||
|
||||
// [500ms, 1300ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[6]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[6]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[6]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[6]->GetFrameType());
|
||||
|
||||
// [1300ms, 1400ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[7]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[7]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[7]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[7]->GetFrameType());
|
||||
|
||||
// [1400ms, 2400ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frames[8]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[8]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frames[8]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[8]->GetFrameType());
|
||||
|
||||
// [2400ms, 2500ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[9]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[9]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[9]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[9]->GetFrameType());
|
||||
|
||||
// [2500ms, 2600ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[10]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[10]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[10]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[10]->GetFrameType());
|
||||
|
||||
// [2600ms, 2800ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[11]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[11]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[11]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[11]->GetFrameType());
|
||||
|
||||
// [2800ms, 2900ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[12]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[12]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[12]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[12]->GetFrameType());
|
||||
|
||||
// [2900ms, 3000ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[13]->mDuration);
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[13]->mFrameType);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[13]->GetDuration());
|
||||
EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[13]->GetFrameType());
|
||||
}
|
||||
|
||||
// Test that an encoding which is disabled on a frame timestamp encodes
|
||||
@ -1196,7 +1201,7 @@ TEST(VP8VideoTrackEncoder, DisableOnFrameTime)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
// Pass a frame in at t=0.
|
||||
@ -1221,16 +1226,17 @@ TEST(VP8VideoTrackEncoder, DisableOnFrameTime)
|
||||
encoder.Disable(now + TimeDuration::FromMilliseconds(100));
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(2UL, frames.Length());
|
||||
|
||||
// [0, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration());
|
||||
|
||||
// [100ms, 200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that an encoding which is disabled between two frame timestamps encodes
|
||||
@ -1240,7 +1246,7 @@ TEST(VP8VideoTrackEncoder, DisableBetweenFrames)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
// Pass a frame in at t=0.
|
||||
@ -1262,19 +1268,20 @@ TEST(VP8VideoTrackEncoder, DisableBetweenFrames)
|
||||
encoder.Disable(now + TimeDuration::FromMilliseconds(50));
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(3UL, frames.Length());
|
||||
|
||||
// [0, 50ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->GetDuration());
|
||||
|
||||
// [50ms, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration());
|
||||
|
||||
// [100ms, 200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that an encoding which is enabled on a frame timestamp encodes
|
||||
@ -1284,7 +1291,7 @@ TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
// Disable the track at t=0.
|
||||
@ -1311,16 +1318,17 @@ TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
|
||||
encoder.Enable(now + TimeDuration::FromMilliseconds(100));
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(2UL, frames.Length());
|
||||
|
||||
// [0, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration());
|
||||
|
||||
// [100ms, 200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that an encoding which is enabled between two frame timestamps encodes
|
||||
@ -1330,7 +1338,7 @@ TEST(VP8VideoTrackEncoder, EnableBetweenFrames)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
// Disable the track at t=0.
|
||||
@ -1354,19 +1362,20 @@ TEST(VP8VideoTrackEncoder, EnableBetweenFrames)
|
||||
encoder.Enable(now + TimeDuration::FromMilliseconds(50));
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(3UL, frames.Length());
|
||||
|
||||
// [0, 50ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->GetDuration());
|
||||
|
||||
// [50ms, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration());
|
||||
|
||||
// [100ms, 200ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that making time go backwards removes any future frames in the encoder.
|
||||
@ -1375,7 +1384,7 @@ TEST(VP8VideoTrackEncoder, BackwardsTimeResets)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
encoder.SetStartOffset(now);
|
||||
@ -1422,22 +1431,23 @@ TEST(VP8VideoTrackEncoder, BackwardsTimeResets)
|
||||
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(4UL, frames.Length());
|
||||
|
||||
// [0, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration());
|
||||
|
||||
// [100ms, 150ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration());
|
||||
|
||||
// [150ms, 250ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration());
|
||||
|
||||
// [250ms, 300ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[3]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[3]->GetDuration());
|
||||
}
|
||||
|
||||
// Test that trying to encode a null image removes any future frames in the
|
||||
@ -1447,7 +1457,7 @@ TEST(VP8VideoTrackEncoder, NullImageResets)
|
||||
TestVP8TrackEncoder encoder;
|
||||
YUVBufferGenerator generator;
|
||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EncodedFrameContainer container;
|
||||
TimeStamp now = TimeStamp::Now();
|
||||
|
||||
encoder.SetStartOffset(now);
|
||||
@ -1494,19 +1504,20 @@ TEST(VP8VideoTrackEncoder, NullImageResets)
|
||||
|
||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300));
|
||||
encoder.NotifyEndOfStream();
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
|
||||
const nsTArray<RefPtr<EncodedFrame>>& frames = container.GetEncodedFrames();
|
||||
ASSERT_EQ(3UL, frames.Length());
|
||||
|
||||
// [0, 100ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration());
|
||||
|
||||
// [100ms, 250ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[1]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[1]->GetDuration());
|
||||
|
||||
// [250ms, 300ms)
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[2]->mDuration);
|
||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[2]->GetDuration());
|
||||
}
|
||||
|
||||
// EOS test
|
||||
@ -1520,8 +1531,8 @@ TEST(VP8VideoTrackEncoder, EncodeComplete)
|
||||
// Pull Encoded Data back from encoder. Since we have sent
|
||||
// EOS to encoder, encoder.GetEncodedTrack should return
|
||||
// NS_OK immidiately.
|
||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
||||
EncodedFrameContainer container;
|
||||
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));
|
||||
|
||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
||||
}
|
||||
|
@ -40,30 +40,28 @@ class WebMVP8TrackEncoder : public VP8TrackEncoder {
|
||||
}
|
||||
};
|
||||
|
||||
static void GetOpusMetadata(int aChannels, int aSampleRate,
|
||||
TrackRate aTrackRate,
|
||||
nsTArray<RefPtr<TrackMetadataBase>>& aMeta) {
|
||||
WebMOpusTrackEncoder opusEncoder(aTrackRate);
|
||||
EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels, aSampleRate));
|
||||
aMeta.AppendElement(opusEncoder.GetMetadata());
|
||||
}
|
||||
|
||||
static void GetVP8Metadata(int32_t aWidth, int32_t aHeight,
|
||||
int32_t aDisplayWidth, int32_t aDisplayHeight,
|
||||
TrackRate aTrackRate,
|
||||
nsTArray<RefPtr<TrackMetadataBase>>& aMeta) {
|
||||
WebMVP8TrackEncoder vp8Encoder;
|
||||
EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth,
|
||||
aDisplayHeight));
|
||||
aMeta.AppendElement(vp8Encoder.GetMetadata());
|
||||
}
|
||||
|
||||
const uint64_t FIXED_DURATION = 1000000;
|
||||
const uint32_t FIXED_FRAMESIZE = 500;
|
||||
|
||||
class TestWebMWriter : public WebMWriter {
|
||||
public:
|
||||
TestWebMWriter() : WebMWriter(), mTimestamp(0) {}
|
||||
explicit TestWebMWriter(int aTrackTypes)
|
||||
: WebMWriter(aTrackTypes), mTimestamp(0) {}
|
||||
|
||||
void SetOpusMetadata(int aChannels, int aSampleRate, TrackRate aTrackRate) {
|
||||
WebMOpusTrackEncoder opusEncoder(aTrackRate);
|
||||
EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels, aSampleRate));
|
||||
RefPtr<TrackMetadataBase> opusMeta = opusEncoder.GetMetadata();
|
||||
SetMetadata(opusMeta);
|
||||
}
|
||||
void SetVP8Metadata(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
|
||||
int32_t aDisplayHeight, TrackRate aTrackRate) {
|
||||
WebMVP8TrackEncoder vp8Encoder;
|
||||
EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth,
|
||||
aDisplayHeight));
|
||||
RefPtr<TrackMetadataBase> vp8Meta = vp8Encoder.GetMetadata();
|
||||
SetMetadata(vp8Meta);
|
||||
}
|
||||
|
||||
// When we append an I-Frame into WebM muxer, the muxer will treat previous
|
||||
// data as "a cluster".
|
||||
@ -71,22 +69,22 @@ class TestWebMWriter : public WebMWriter {
|
||||
// previous cluster so that we can retrieve data by |GetContainerData|.
|
||||
void AppendDummyFrame(EncodedFrame::FrameType aFrameType,
|
||||
uint64_t aDuration) {
|
||||
nsTArray<RefPtr<EncodedFrame>> encodedVideoData;
|
||||
EncodedFrameContainer encodedVideoData;
|
||||
nsTArray<uint8_t> frameData;
|
||||
RefPtr<EncodedFrame> videoData = new EncodedFrame();
|
||||
// Create dummy frame data.
|
||||
frameData.SetLength(FIXED_FRAMESIZE);
|
||||
videoData->mFrameType = aFrameType;
|
||||
videoData->mTime = mTimestamp;
|
||||
videoData->mDuration = aDuration;
|
||||
videoData->SetFrameType(aFrameType);
|
||||
videoData->SetTimeStamp(mTimestamp);
|
||||
videoData->SetDuration(aDuration);
|
||||
videoData->SwapInFrameData(frameData);
|
||||
encodedVideoData.AppendElement(videoData);
|
||||
encodedVideoData.AppendEncodedFrame(videoData);
|
||||
WriteEncodedTrack(encodedVideoData, 0);
|
||||
mTimestamp += aDuration;
|
||||
}
|
||||
|
||||
bool HaveValidCluster() {
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsTArray<nsTArray<uint8_t> > encodedBuf;
|
||||
GetContainerData(&encodedBuf, 0);
|
||||
return (encodedBuf.Length() > 0) ? true : false;
|
||||
}
|
||||
@ -98,32 +96,35 @@ class TestWebMWriter : public WebMWriter {
|
||||
|
||||
TEST(WebMWriter, Metadata)
|
||||
{
|
||||
TestWebMWriter writer;
|
||||
TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK |
|
||||
ContainerWriter::CREATE_VIDEO_TRACK);
|
||||
|
||||
// The output should be empty since we didn't set any metadata in writer.
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsTArray<nsTArray<uint8_t> > encodedBuf;
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
|
||||
EXPECT_TRUE(encodedBuf.Length() == 0);
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED);
|
||||
EXPECT_TRUE(encodedBuf.Length() == 0);
|
||||
|
||||
nsTArray<RefPtr<TrackMetadataBase>> meta;
|
||||
|
||||
// Get opus metadata.
|
||||
// Set opus metadata.
|
||||
int channel = 1;
|
||||
int sampleRate = 44100;
|
||||
TrackRate aTrackRate = 90000;
|
||||
GetOpusMetadata(channel, sampleRate, aTrackRate, meta);
|
||||
writer.SetOpusMetadata(channel, sampleRate, aTrackRate);
|
||||
|
||||
// Get vp8 metadata
|
||||
// No output data since we didn't set both audio/video
|
||||
// metadata in writer.
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
|
||||
EXPECT_TRUE(encodedBuf.Length() == 0);
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED);
|
||||
EXPECT_TRUE(encodedBuf.Length() == 0);
|
||||
|
||||
// Set vp8 metadata
|
||||
int32_t width = 640;
|
||||
int32_t height = 480;
|
||||
int32_t displayWidth = 640;
|
||||
int32_t displayHeight = 480;
|
||||
GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta);
|
||||
|
||||
// Set metadata
|
||||
writer.SetMetadata(meta);
|
||||
writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate);
|
||||
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
|
||||
EXPECT_TRUE(encodedBuf.Length() > 0);
|
||||
@ -131,22 +132,21 @@ TEST(WebMWriter, Metadata)
|
||||
|
||||
TEST(WebMWriter, Cluster)
|
||||
{
|
||||
TestWebMWriter writer;
|
||||
nsTArray<RefPtr<TrackMetadataBase>> meta;
|
||||
// Get opus metadata.
|
||||
TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK |
|
||||
ContainerWriter::CREATE_VIDEO_TRACK);
|
||||
// Set opus metadata.
|
||||
int channel = 1;
|
||||
int sampleRate = 48000;
|
||||
TrackRate aTrackRate = 90000;
|
||||
GetOpusMetadata(channel, sampleRate, aTrackRate, meta);
|
||||
// Get vp8 metadata
|
||||
writer.SetOpusMetadata(channel, sampleRate, aTrackRate);
|
||||
// Set vp8 metadata
|
||||
int32_t width = 320;
|
||||
int32_t height = 240;
|
||||
int32_t displayWidth = 320;
|
||||
int32_t displayHeight = 240;
|
||||
GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta);
|
||||
writer.SetMetadata(meta);
|
||||
writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate);
|
||||
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsTArray<nsTArray<uint8_t> > encodedBuf;
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER);
|
||||
EXPECT_TRUE(encodedBuf.Length() > 0);
|
||||
encodedBuf.Clear();
|
||||
@ -174,20 +174,19 @@ TEST(WebMWriter, Cluster)
|
||||
|
||||
TEST(WebMWriter, FLUSH_NEEDED)
|
||||
{
|
||||
TestWebMWriter writer;
|
||||
nsTArray<RefPtr<TrackMetadataBase>> meta;
|
||||
// Get opus metadata.
|
||||
TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK |
|
||||
ContainerWriter::CREATE_VIDEO_TRACK);
|
||||
// Set opus metadata.
|
||||
int channel = 2;
|
||||
int sampleRate = 44100;
|
||||
TrackRate aTrackRate = 100000;
|
||||
GetOpusMetadata(channel, sampleRate, aTrackRate, meta);
|
||||
// Get vp8 metadata
|
||||
writer.SetOpusMetadata(channel, sampleRate, aTrackRate);
|
||||
// Set vp8 metadata
|
||||
int32_t width = 176;
|
||||
int32_t height = 352;
|
||||
int32_t displayWidth = 176;
|
||||
int32_t displayHeight = 352;
|
||||
GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta);
|
||||
writer.SetMetadata(meta);
|
||||
writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate);
|
||||
|
||||
// write the first I-Frame.
|
||||
writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
|
||||
@ -200,7 +199,7 @@ TEST(WebMWriter, FLUSH_NEEDED)
|
||||
// retrieved
|
||||
EXPECT_FALSE(writer.HaveValidCluster());
|
||||
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsTArray<nsTArray<uint8_t> > encodedBuf;
|
||||
// Have data because the flag ContainerWriter::FLUSH_NEEDED
|
||||
writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED);
|
||||
EXPECT_TRUE(encodedBuf.Length() > 0);
|
||||
@ -295,20 +294,19 @@ static int64_t webm_tell(void* aUserData) {
|
||||
|
||||
TEST(WebMWriter, bug970774_aspect_ratio)
|
||||
{
|
||||
TestWebMWriter writer;
|
||||
nsTArray<RefPtr<TrackMetadataBase>> meta;
|
||||
// Get opus metadata.
|
||||
TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK |
|
||||
ContainerWriter::CREATE_VIDEO_TRACK);
|
||||
// Set opus metadata.
|
||||
int channel = 1;
|
||||
int sampleRate = 44100;
|
||||
TrackRate aTrackRate = 90000;
|
||||
GetOpusMetadata(channel, sampleRate, aTrackRate, meta);
|
||||
writer.SetOpusMetadata(channel, sampleRate, aTrackRate);
|
||||
// Set vp8 metadata
|
||||
int32_t width = 640;
|
||||
int32_t height = 480;
|
||||
int32_t displayWidth = 1280;
|
||||
int32_t displayHeight = 960;
|
||||
GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta);
|
||||
writer.SetMetadata(meta);
|
||||
writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate);
|
||||
|
||||
// write the first I-Frame.
|
||||
writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
|
||||
@ -317,7 +315,7 @@ TEST(WebMWriter, bug970774_aspect_ratio)
|
||||
writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION);
|
||||
|
||||
// Get the metadata and the first cluster.
|
||||
nsTArray<nsTArray<uint8_t>> encodedBuf;
|
||||
nsTArray<nsTArray<uint8_t> > encodedBuf;
|
||||
writer.GetContainerData(&encodedBuf, 0);
|
||||
// Flatten the encodedBuf.
|
||||
WebMioData ioData;
|
||||
|
@ -14,7 +14,6 @@ LOCAL_INCLUDES += [
|
||||
]
|
||||
|
||||
UNIFIED_SOURCES += [
|
||||
'AudioGenerator.cpp',
|
||||
'MockMediaResource.cpp',
|
||||
'TestAudioBuffers.cpp',
|
||||
'TestAudioCallbackDriver.cpp',
|
||||
@ -38,7 +37,6 @@ UNIFIED_SOURCES += [
|
||||
'TestMediaSpan.cpp',
|
||||
'TestMP3Demuxer.cpp',
|
||||
'TestMP4Demuxer.cpp',
|
||||
'TestMuxer.cpp',
|
||||
'TestOpusParser.cpp',
|
||||
'TestRust.cpp',
|
||||
'TestTimeUnit.cpp',
|
||||
|
@ -1675,6 +1675,4 @@ bool SkeletonState::DecodeHeader(OggPacketPtr aPacket) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#undef LOG
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -1886,5 +1886,5 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType, int64_t aTarget,
|
||||
}
|
||||
|
||||
#undef OGG_DEBUG
|
||||
#undef SEEK_LOG
|
||||
#undef SEEK_DEBUG
|
||||
} // namespace mozilla
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "prtime.h"
|
||||
#include "GeckoProfiler.h"
|
||||
|
||||
#undef LOG
|
||||
#define LOG(args, ...)
|
||||
|
||||
namespace mozilla {
|
||||
@ -45,20 +46,22 @@ nsresult OggWriter::Init() {
|
||||
return (rc == 0) ? NS_OK : NS_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
nsresult OggWriter::WriteEncodedTrack(
|
||||
const nsTArray<RefPtr<EncodedFrame>>& aData, uint32_t aFlags) {
|
||||
nsresult OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
|
||||
uint32_t aFlags) {
|
||||
AUTO_PROFILER_LABEL("OggWriter::WriteEncodedTrack", OTHER);
|
||||
|
||||
uint32_t len = aData.Length();
|
||||
uint32_t len = aData.GetEncodedFrames().Length();
|
||||
for (uint32_t i = 0; i < len; i++) {
|
||||
if (aData[i]->mFrameType != EncodedFrame::OPUS_AUDIO_FRAME) {
|
||||
if (aData.GetEncodedFrames()[i]->GetFrameType() !=
|
||||
EncodedFrame::OPUS_AUDIO_FRAME) {
|
||||
LOG("[OggWriter] wrong encoded data type!");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
// only pass END_OF_STREAM on the last frame!
|
||||
nsresult rv = WriteEncodedData(
|
||||
aData[i]->GetFrameData(), aData[i]->mDuration,
|
||||
aData.GetEncodedFrames()[i]->GetFrameData(),
|
||||
aData.GetEncodedFrames()[i]->GetDuration(),
|
||||
i < len - 1 ? (aFlags & ~ContainerWriter::END_OF_STREAM) : aFlags);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG("%p Failed to WriteEncodedTrack!", this);
|
||||
@ -108,7 +111,7 @@ nsresult OggWriter::WriteEncodedData(const nsTArray<uint8_t>& aBuffer,
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void OggWriter::ProduceOggPage(nsTArray<nsTArray<uint8_t>>* aOutputBufs) {
|
||||
void OggWriter::ProduceOggPage(nsTArray<nsTArray<uint8_t> >* aOutputBufs) {
|
||||
aOutputBufs->AppendElement();
|
||||
aOutputBufs->LastElement().SetLength(mOggPage.header_len + mOggPage.body_len);
|
||||
memcpy(aOutputBufs->LastElement().Elements(), mOggPage.header,
|
||||
@ -117,7 +120,7 @@ void OggWriter::ProduceOggPage(nsTArray<nsTArray<uint8_t>>* aOutputBufs) {
|
||||
mOggPage.body, mOggPage.body_len);
|
||||
}
|
||||
|
||||
nsresult OggWriter::GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
nsresult OggWriter::GetContainerData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
|
||||
uint32_t aFlags) {
|
||||
int rc = -1;
|
||||
AUTO_PROFILER_LABEL("OggWriter::GetContainerData", OTHER);
|
||||
@ -141,13 +144,12 @@ nsresult OggWriter::GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
rc = ogg_stream_flush(&mOggStreamState, &mOggPage);
|
||||
NS_ENSURE_TRUE(rc > 0, NS_ERROR_FAILURE);
|
||||
|
||||
ProduceOggPage(aOutputBufs);
|
||||
return NS_OK;
|
||||
|
||||
// Force generate a page even if the amount of packet data is not enough.
|
||||
// Usually do so after a header packet.
|
||||
|
||||
ProduceOggPage(aOutputBufs);
|
||||
}
|
||||
|
||||
if (aFlags & ContainerWriter::FLUSH_NEEDED) {
|
||||
} else if (aFlags & ContainerWriter::FLUSH_NEEDED) {
|
||||
// rc = 0 means no packet to put into a page, or an internal error.
|
||||
rc = ogg_stream_flush(&mOggStreamState, &mOggPage);
|
||||
} else {
|
||||
@ -162,25 +164,20 @@ nsresult OggWriter::GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
if (aFlags & ContainerWriter::FLUSH_NEEDED) {
|
||||
mIsWritingComplete = true;
|
||||
}
|
||||
// We always return NS_OK here since it's OK to call this without having
|
||||
// enough data to fill a page. It's the more common case compared to internal
|
||||
// errors, and we cannot distinguish the two.
|
||||
return NS_OK;
|
||||
return (rc > 0) ? NS_OK : NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
nsresult OggWriter::SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) {
|
||||
MOZ_ASSERT(aMetadata.Length() == 1);
|
||||
MOZ_ASSERT(aMetadata[0]);
|
||||
nsresult OggWriter::SetMetadata(TrackMetadataBase* aMetadata) {
|
||||
MOZ_ASSERT(aMetadata);
|
||||
|
||||
AUTO_PROFILER_LABEL("OggWriter::SetMetadata", OTHER);
|
||||
|
||||
if (aMetadata[0]->GetKind() != TrackMetadataBase::METADATA_OPUS) {
|
||||
if (aMetadata->GetKind() != TrackMetadataBase::METADATA_OPUS) {
|
||||
LOG("wrong meta data type!");
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
// Validate each field of METADATA
|
||||
mMetadata = static_cast<OpusMetadata*>(aMetadata[0].get());
|
||||
mMetadata = static_cast<OpusMetadata*>(aMetadata);
|
||||
if (mMetadata->mIdHeader.Length() == 0) {
|
||||
LOG("miss mIdHeader!");
|
||||
return NS_ERROR_FAILURE;
|
||||
@ -194,5 +191,3 @@ nsresult OggWriter::SetMetadata(
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOG
|
||||
|
@ -23,17 +23,14 @@ class OggWriter : public ContainerWriter {
|
||||
OggWriter();
|
||||
~OggWriter();
|
||||
|
||||
// Write frames into the ogg container. aFlags should be set to END_OF_STREAM
|
||||
// for the final set of frames.
|
||||
nsresult WriteEncodedTrack(const nsTArray<RefPtr<EncodedFrame>>& aData,
|
||||
nsresult WriteEncodedTrack(const EncodedFrameContainer& aData,
|
||||
uint32_t aFlags = 0) override;
|
||||
|
||||
nsresult GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
nsresult GetContainerData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
|
||||
uint32_t aFlags = 0) override;
|
||||
|
||||
// Check metadata type integrity and reject unacceptable track encoder.
|
||||
nsresult SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) override;
|
||||
nsresult SetMetadata(TrackMetadataBase* aMetadata) override;
|
||||
|
||||
private:
|
||||
nsresult Init();
|
||||
@ -41,7 +38,7 @@ class OggWriter : public ContainerWriter {
|
||||
nsresult WriteEncodedData(const nsTArray<uint8_t>& aBuffer, int aDuration,
|
||||
uint32_t aFlags = 0);
|
||||
|
||||
void ProduceOggPage(nsTArray<nsTArray<uint8_t>>* aOutputBufs);
|
||||
void ProduceOggPage(nsTArray<nsTArray<uint8_t> >* aOutputBufs);
|
||||
// Store the Medatata from track encoder
|
||||
RefPtr<OpusMetadata> mMetadata;
|
||||
|
||||
|
@ -212,6 +212,4 @@ bool OpusParser::IsValidMapping2ChannelsCount(uint8_t aChannels) {
|
||||
return val == valInt || valInt * valInt + 2 == aChannels;
|
||||
}
|
||||
|
||||
#undef OPUS_LOG
|
||||
|
||||
} // namespace mozilla
|
||||
|
@ -56,8 +56,7 @@ function startTest() {
|
||||
}
|
||||
totalBlobSize += e.data.size;
|
||||
ok(totalBlobSize > 0, 'check the totalBlobSize');
|
||||
is(e.data.type, expectedMimeType, 'blob should have expected mimetype');
|
||||
is(mMediaRecorder.mimeType, expectedMimeType, 'recorder should have expected mimetype');
|
||||
is(mMediaRecorder.mimeType, expectedMimeType, 'blob should has mimetype, return ' + mMediaRecorder.mimeType);
|
||||
if (!stopTriggered) {
|
||||
mMediaRecorder.stop();
|
||||
stopTriggered = true;
|
||||
|
@ -65,9 +65,7 @@ async function testRecord(source, mimeType) {
|
||||
|
||||
const chunks = [];
|
||||
let {data} = await new Promise(r => recorder.ondataavailable = r);
|
||||
if (!isOffline) {
|
||||
is(recorder.state, "recording", "Expected to still be recording");
|
||||
}
|
||||
is(recorder.state, "recording", "Expected to still be recording");
|
||||
is(data.type, recorder.mimeType, "Blob has recorder mimetype");
|
||||
if (mimeType != "") {
|
||||
is(data.type, mimeType, "Blob has given mimetype");
|
||||
|
@ -38,13 +38,13 @@ function startTest(test, token) {
|
||||
info('onstart fired successfully');
|
||||
hasonstart = true;
|
||||
// On audio only case, we produce audio/ogg as mimeType.
|
||||
is('audio/ogg', mMediaRecorder.mimeType, "MediaRecorder mimetype as expected");
|
||||
is('audio/ogg', mMediaRecorder.mimeType, "check the record mimetype return " + mMediaRecorder.mimeType);
|
||||
mMediaRecorder.requestData();
|
||||
};
|
||||
|
||||
mMediaRecorder.onstop = function() {
|
||||
info('onstop fired successfully');
|
||||
ok(hasondataavailable, "should have ondataavailable before onstop");
|
||||
ok (hasondataavailable, "should have ondataavailable before onstop");
|
||||
is(mMediaRecorder.state, 'inactive', 'check recording status is inactive');
|
||||
SimpleTest.finish();
|
||||
};
|
||||
@ -53,9 +53,8 @@ function startTest(test, token) {
|
||||
info('ondataavailable fired successfully');
|
||||
if (mMediaRecorder.state == 'recording') {
|
||||
hasondataavailable = true;
|
||||
ok(hasonstart, "should have had start event first");
|
||||
is(e.data.type, mMediaRecorder.mimeType,
|
||||
"blob's mimeType matches the recorder's");
|
||||
ok(hasonstart, "should has onstart event first");
|
||||
ok(e.data.size > 0, 'check blob has data');
|
||||
mMediaRecorder.stop();
|
||||
}
|
||||
};
|
||||
|
@ -55,15 +55,14 @@ void EbmlComposer::GenerateHeader() {
|
||||
if (mCodecPrivateData.Length() > 0) {
|
||||
// Extract the pre-skip from mCodecPrivateData
|
||||
// then convert it to nanoseconds.
|
||||
// For more details see
|
||||
// https://tools.ietf.org/html/rfc7845#section-4.2
|
||||
uint64_t codecDelay = (uint64_t)LittleEndian::readUint16(
|
||||
mCodecPrivateData.Elements() + 10) *
|
||||
PR_NSEC_PER_SEC / 48000;
|
||||
// Details in OpusTrackEncoder.cpp.
|
||||
mCodecDelay = (uint64_t)LittleEndian::readUint16(
|
||||
mCodecPrivateData.Elements() + 10) *
|
||||
PR_NSEC_PER_SEC / 48000;
|
||||
// Fixed 80ms, convert into nanoseconds.
|
||||
uint64_t seekPreRoll = 80 * PR_NSEC_PER_MSEC;
|
||||
writeAudioTrack(&ebml, 0x2, 0x0, "A_OPUS", mSampleFreq, mChannels,
|
||||
codecDelay, seekPreRoll,
|
||||
mCodecDelay, seekPreRoll,
|
||||
mCodecPrivateData.Elements(),
|
||||
mCodecPrivateData.Length());
|
||||
}
|
||||
@ -115,7 +114,7 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
|
||||
EbmlGlobal ebml;
|
||||
ebml.offset = 0;
|
||||
|
||||
auto frameType = aFrame->mFrameType;
|
||||
auto frameType = aFrame->GetFrameType();
|
||||
const bool isVP8IFrame = (frameType == EncodedFrame::FrameType::VP8_I_FRAME);
|
||||
const bool isVP8PFrame = (frameType == EncodedFrame::FrameType::VP8_P_FRAME);
|
||||
const bool isOpus = (frameType == EncodedFrame::FrameType::OPUS_AUDIO_FRAME);
|
||||
@ -129,7 +128,11 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
|
||||
return;
|
||||
}
|
||||
|
||||
int64_t timeCode = aFrame->mTime / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
|
||||
int64_t timeCode =
|
||||
aFrame->GetTimeStamp() / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
|
||||
if (isOpus) {
|
||||
timeCode += mCodecDelay / PR_NSEC_PER_MSEC;
|
||||
}
|
||||
|
||||
if (!mHasVideo && timeCode >= FLUSH_AUDIO_ONLY_AFTER_MS) {
|
||||
MOZ_ASSERT(mHasAudio);
|
||||
@ -154,11 +157,15 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
|
||||
mClusterHeaderIndex = mClusters.Length() - 1;
|
||||
mClusterLengthLoc = ebmlLoc.offset;
|
||||
// if timeCode didn't under/overflow before, it shouldn't after this
|
||||
mClusterTimecode = aFrame->mTime / PR_USEC_PER_MSEC;
|
||||
mClusterTimecode = aFrame->GetTimeStamp() / PR_USEC_PER_MSEC;
|
||||
Ebml_SerializeUnsigned(&ebml, Timecode, mClusterTimecode);
|
||||
|
||||
// Can't under-/overflow now
|
||||
timeCode = aFrame->mTime / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
|
||||
timeCode =
|
||||
aFrame->GetTimeStamp() / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
|
||||
if (isOpus) {
|
||||
timeCode += mCodecDelay / PR_NSEC_PER_MSEC;
|
||||
}
|
||||
|
||||
mWritingCluster = true;
|
||||
}
|
||||
|
@ -38,8 +38,7 @@ class EbmlComposer {
|
||||
/*
|
||||
* Insert media encoded buffer into muxer and it would be package
|
||||
* into SimpleBlock. If no cluster is opened, new cluster will start for
|
||||
* writing. Frames passed to this function should already have any codec delay
|
||||
* applied.
|
||||
* writing.
|
||||
*/
|
||||
void WriteSimpleBlock(EncodedFrame* aFrame);
|
||||
/*
|
||||
@ -69,6 +68,8 @@ class EbmlComposer {
|
||||
uint64_t mClusterLengthLoc = 0;
|
||||
// Audio codec specific header data.
|
||||
nsTArray<uint8_t> mCodecPrivateData;
|
||||
// Codec delay in nanoseconds.
|
||||
uint64_t mCodecDelay = 0;
|
||||
|
||||
// The timecode of the cluster.
|
||||
uint64_t mClusterTimecode = 0;
|
||||
|
@ -1254,6 +1254,6 @@ int64_t WebMTrackDemuxer::GetEvictionOffset(const TimeUnit& aTime) {
|
||||
|
||||
return offset;
|
||||
}
|
||||
} // namespace mozilla
|
||||
|
||||
#undef WEBM_DEBUG
|
||||
} // namespace mozilla
|
||||
|
@ -10,7 +10,8 @@
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
WebMWriter::WebMWriter() : ContainerWriter() {
|
||||
WebMWriter::WebMWriter(uint32_t aTrackTypes) : ContainerWriter() {
|
||||
mMetadataRequiredFlag = aTrackTypes;
|
||||
mEbmlComposer = new EbmlComposer();
|
||||
}
|
||||
|
||||
@ -18,16 +19,17 @@ WebMWriter::~WebMWriter() {
|
||||
// Out-of-line dtor so mEbmlComposer nsAutoPtr can delete a complete type.
|
||||
}
|
||||
|
||||
nsresult WebMWriter::WriteEncodedTrack(
|
||||
const nsTArray<RefPtr<EncodedFrame>>& aData, uint32_t aFlags) {
|
||||
nsresult WebMWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
|
||||
uint32_t aFlags) {
|
||||
AUTO_PROFILER_LABEL("WebMWriter::WriteEncodedTrack", OTHER);
|
||||
for (uint32_t i = 0; i < aData.Length(); i++) {
|
||||
mEbmlComposer->WriteSimpleBlock(aData.ElementAt(i).get());
|
||||
for (uint32_t i = 0; i < aData.GetEncodedFrames().Length(); i++) {
|
||||
mEbmlComposer->WriteSimpleBlock(
|
||||
aData.GetEncodedFrames().ElementAt(i).get());
|
||||
}
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult WebMWriter::GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
nsresult WebMWriter::GetContainerData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
|
||||
uint32_t aFlags) {
|
||||
AUTO_PROFILER_LABEL("WebMWriter::GetContainerData", OTHER);
|
||||
mEbmlComposer->ExtractBuffer(aOutputBufs, aFlags);
|
||||
@ -37,75 +39,40 @@ nsresult WebMWriter::GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
nsresult WebMWriter::SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) {
|
||||
nsresult WebMWriter::SetMetadata(TrackMetadataBase* aMetadata) {
|
||||
MOZ_ASSERT(aMetadata);
|
||||
AUTO_PROFILER_LABEL("WebMWriter::SetMetadata", OTHER);
|
||||
MOZ_DIAGNOSTIC_ASSERT(!aMetadata.IsEmpty());
|
||||
|
||||
// Integrity checks
|
||||
bool bad = false;
|
||||
for (const RefPtr<TrackMetadataBase>& metadata : aMetadata) {
|
||||
MOZ_ASSERT(metadata);
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
|
||||
VP8Metadata* meta = static_cast<VP8Metadata*>(metadata.get());
|
||||
if (meta->mWidth == 0 || meta->mHeight == 0 || meta->mDisplayWidth == 0 ||
|
||||
meta->mDisplayHeight == 0) {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
|
||||
VorbisMetadata* meta = static_cast<VorbisMetadata*>(metadata.get());
|
||||
if (meta->mSamplingFrequency == 0 || meta->mChannels == 0 ||
|
||||
meta->mData.IsEmpty()) {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
|
||||
OpusMetadata* meta = static_cast<OpusMetadata*>(metadata.get());
|
||||
if (meta->mSamplingFrequency == 0 || meta->mChannels == 0 ||
|
||||
meta->mIdHeader.IsEmpty()) {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bad) {
|
||||
return NS_ERROR_FAILURE;
|
||||
if (aMetadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
|
||||
VP8Metadata* meta = static_cast<VP8Metadata*>(aMetadata);
|
||||
MOZ_ASSERT(meta, "Cannot find vp8 encoder metadata");
|
||||
mEbmlComposer->SetVideoConfig(meta->mWidth, meta->mHeight,
|
||||
meta->mDisplayWidth, meta->mDisplayHeight);
|
||||
mMetadataRequiredFlag =
|
||||
mMetadataRequiredFlag & ~ContainerWriter::CREATE_VIDEO_TRACK;
|
||||
}
|
||||
|
||||
// Storing
|
||||
bool hasAudio = false;
|
||||
bool hasVideo = false;
|
||||
for (const RefPtr<TrackMetadataBase>& metadata : aMetadata) {
|
||||
MOZ_ASSERT(metadata);
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
|
||||
MOZ_DIAGNOSTIC_ASSERT(!hasVideo);
|
||||
VP8Metadata* meta = static_cast<VP8Metadata*>(metadata.get());
|
||||
mEbmlComposer->SetVideoConfig(meta->mWidth, meta->mHeight,
|
||||
meta->mDisplayWidth, meta->mDisplayHeight);
|
||||
hasVideo = true;
|
||||
}
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
|
||||
MOZ_DIAGNOSTIC_ASSERT(!hasAudio);
|
||||
VorbisMetadata* meta = static_cast<VorbisMetadata*>(metadata.get());
|
||||
mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
|
||||
mEbmlComposer->SetAudioCodecPrivateData(meta->mData);
|
||||
hasAudio = true;
|
||||
}
|
||||
|
||||
if (metadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
|
||||
MOZ_DIAGNOSTIC_ASSERT(!hasAudio);
|
||||
OpusMetadata* meta = static_cast<OpusMetadata*>(metadata.get());
|
||||
mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
|
||||
mEbmlComposer->SetAudioCodecPrivateData(meta->mIdHeader);
|
||||
hasAudio = true;
|
||||
}
|
||||
if (aMetadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
|
||||
VorbisMetadata* meta = static_cast<VorbisMetadata*>(aMetadata);
|
||||
MOZ_ASSERT(meta, "Cannot find vorbis encoder metadata");
|
||||
mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
|
||||
mEbmlComposer->SetAudioCodecPrivateData(meta->mData);
|
||||
mMetadataRequiredFlag =
|
||||
mMetadataRequiredFlag & ~ContainerWriter::CREATE_AUDIO_TRACK;
|
||||
}
|
||||
|
||||
if (aMetadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
|
||||
OpusMetadata* meta = static_cast<OpusMetadata*>(aMetadata);
|
||||
MOZ_ASSERT(meta, "Cannot find Opus encoder metadata");
|
||||
mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
|
||||
mEbmlComposer->SetAudioCodecPrivateData(meta->mIdHeader);
|
||||
mMetadataRequiredFlag =
|
||||
mMetadataRequiredFlag & ~ContainerWriter::CREATE_AUDIO_TRACK;
|
||||
}
|
||||
|
||||
if (!mMetadataRequiredFlag) {
|
||||
mEbmlComposer->GenerateHeader();
|
||||
}
|
||||
mEbmlComposer->GenerateHeader();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
@ -41,28 +41,30 @@ class VP8Metadata : public TrackMetadataBase {
|
||||
*/
|
||||
class WebMWriter : public ContainerWriter {
|
||||
public:
|
||||
// Run in MediaRecorder thread
|
||||
WebMWriter();
|
||||
// aTrackTypes indicate this muxer should multiplex into Video only or A/V
|
||||
// foramt. Run in MediaRecorder thread
|
||||
explicit WebMWriter(uint32_t aTrackTypes);
|
||||
virtual ~WebMWriter();
|
||||
|
||||
// WriteEncodedTrack inserts raw packets into WebM stream. Does not accept
|
||||
// any flags: any specified will be ignored. Writing is finalized via
|
||||
// flushing via GetContainerData().
|
||||
nsresult WriteEncodedTrack(const nsTArray<RefPtr<EncodedFrame>>& aData,
|
||||
// WriteEncodedTrack inserts raw packets into WebM stream.
|
||||
nsresult WriteEncodedTrack(const EncodedFrameContainer& aData,
|
||||
uint32_t aFlags = 0) override;
|
||||
|
||||
// GetContainerData outputs multiplexing data.
|
||||
// aFlags indicates the muxer should enter into finished stage and flush out
|
||||
// queue data.
|
||||
nsresult GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
|
||||
nsresult GetContainerData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
|
||||
uint32_t aFlags = 0) override;
|
||||
|
||||
// Assign metadata into muxer
|
||||
nsresult SetMetadata(
|
||||
const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) override;
|
||||
nsresult SetMetadata(TrackMetadataBase* aMetadata) override;
|
||||
|
||||
private:
|
||||
nsAutoPtr<EbmlComposer> mEbmlComposer;
|
||||
|
||||
// Indicate what kind of meta data needed in the writer.
|
||||
// If this value become 0, it means writer can start to generate header.
|
||||
uint8_t mMetadataRequiredFlag;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
Loading…
Reference in New Issue
Block a user