gecko-dev/dom/media/encoder/MediaEncoder.cpp
Nicholas Nethercote 58786e1ea7 Bug 1375392 - Tweak the PROFILER_LABEL* macros. r=mstange.
This patch makes the following changes to the macros.

- Removes PROFILER_LABEL_FUNC. It's only suitable for use in functions outside
  classes, due to PROFILER_FUNCTION_NAME not getting class names, and it was
  mostly misused.

- Removes PROFILER_FUNCTION_NAME. It's no longer used, and __func__ is
  universally available now anyway.

- Combines the first two string literal arguments of PROFILER_LABEL and
  PROFILER_LABEL_DYNAMIC into a single argument. There was no good reason for
  them to be separate, and it forced a '::' in the label, which isn't always
  appropriate. Also, the meaning of the "name_space" argument was interpreted
  in an interesting variety of ways.

- Adds an "AUTO_" prefix to PROFILER_LABEL and PROFILER_LABEL_DYNAMIC, to make
  it clearer they construct RAII objects rather than just being function calls.
  (I myself have screwed up the scoping because of this in the past.)

- Fills in the 'js::ProfileEntry::Category::' qualifier within the macro, so
  the caller doesn't need to. This makes a *lot* more of the uses fit onto a
  single line.

The patch also makes the following changes to the macro uses (beyond those
required by the changes described above).

- Fixes a bunch of labels that had gotten out of sync with the name of the
  class and/or function that encloses them.

- Removes a useless PROFILER_LABEL use within a trivial scope in
  EventStateManager::DispatchMouseOrPointerEvent(). It clearly wasn't serving
  any useful purpose. It also serves as extra evidence that the AUTO_ prefix is
  a good idea.

- Tweaks DecodePool::SyncRunIf{Preferred,Possible} so that the labelling is
  done within them, instead of at their callsites, because that's a more
  standard way of doing things.

--HG--
extra : rebase_source : 318d1bc6fc1425a94aacbf489dd46e4f83211de4
2017-06-22 17:08:53 +10:00

431 lines
14 KiB
C++

/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaEncoder.h"
#include "MediaDecoder.h"
#include "nsIPrincipal.h"
#include "nsMimeTypes.h"
#include "TimeUnits.h"
#include "mozilla/Logging.h"
#include "mozilla/Preferences.h"
#include "mozilla/StaticPtr.h"
#include "mozilla/gfx/Point.h" // IntSize
#include"GeckoProfiler.h"
#include "OggWriter.h"
#include "OpusTrackEncoder.h"
#ifdef MOZ_WEBM_ENCODER
#include "VP8TrackEncoder.h"
#include "WebMWriter.h"
#endif
#ifdef LOG
#undef LOG
#endif
mozilla::LazyLogModule gMediaEncoderLog("MediaEncoder");
#define LOG(type, msg) MOZ_LOG(gMediaEncoderLog, type, msg)
namespace mozilla {
void
MediaStreamVideoRecorderSink::SetCurrentFrames(const VideoSegment& aSegment)
{
MOZ_ASSERT(mVideoEncoder);
// If we're suspended (paused) we don't forward frames
if (!mSuspended) {
mVideoEncoder->SetCurrentFrames(aSegment);
}
}
void
MediaEncoder::Suspend()
{
MOZ_ASSERT(NS_IsMainThread());
mLastPauseStartTime = TimeStamp::Now();
mSuspended = true;
mVideoSink->Suspend();
}
void
MediaEncoder::Resume()
{
MOZ_ASSERT(NS_IsMainThread());
if (!mSuspended) {
return;
}
media::TimeUnit timeSpentPaused =
media::TimeUnit::FromTimeDuration(
TimeStamp::Now() - mLastPauseStartTime);
MOZ_ASSERT(timeSpentPaused.ToMicroseconds() >= 0);
MOZ_RELEASE_ASSERT(timeSpentPaused.IsValid());
mMicrosecondsSpentPaused += timeSpentPaused.ToMicroseconds();;
mSuspended = false;
mVideoSink->Resume();
}
void
MediaEncoder::SetDirectConnect(bool aConnected)
{
mDirectConnected = aConnected;
}
void
MediaEncoder::NotifyRealtimeData(MediaStreamGraph* aGraph,
TrackID aID,
StreamTime aTrackOffset,
uint32_t aTrackEvents,
const MediaSegment& aRealtimeMedia)
{
if (mSuspended) {
return;
}
// Process the incoming raw track data from MediaStreamGraph, called on the
// thread of MediaStreamGraph.
if (mAudioEncoder && aRealtimeMedia.GetType() == MediaSegment::AUDIO) {
mAudioEncoder->NotifyQueuedTrackChanges(aGraph, aID,
aTrackOffset, aTrackEvents,
aRealtimeMedia);
} else if (mVideoEncoder &&
aRealtimeMedia.GetType() == MediaSegment::VIDEO &&
aTrackEvents != TrackEventCommand::TRACK_EVENT_NONE) {
mVideoEncoder->NotifyQueuedTrackChanges(aGraph, aID,
aTrackOffset, aTrackEvents,
aRealtimeMedia);
}
}
void
MediaEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
TrackID aID,
StreamTime aTrackOffset,
TrackEventCommand aTrackEvents,
const MediaSegment& aQueuedMedia,
MediaStream* aInputStream,
TrackID aInputTrackID)
{
if (!mDirectConnected) {
NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, aQueuedMedia);
} else {
if (aTrackEvents != TrackEventCommand::TRACK_EVENT_NONE) {
// forward events (TRACK_EVENT_ENDED) but not the media
if (aQueuedMedia.GetType() == MediaSegment::VIDEO) {
VideoSegment segment;
NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, segment);
} else {
AudioSegment segment;
NotifyRealtimeData(aGraph, aID, aTrackOffset, aTrackEvents, segment);
}
}
}
}
void
MediaEncoder::NotifyQueuedAudioData(MediaStreamGraph* aGraph, TrackID aID,
StreamTime aTrackOffset,
const AudioSegment& aQueuedMedia,
MediaStream* aInputStream,
TrackID aInputTrackID)
{
if (!mDirectConnected) {
NotifyRealtimeData(aGraph, aID, aTrackOffset, 0, aQueuedMedia);
}
}
void
MediaEncoder::NotifyEvent(MediaStreamGraph* aGraph,
MediaStreamGraphEvent event)
{
// In case that MediaEncoder does not receive a TRACK_EVENT_ENDED event.
LOG(LogLevel::Debug, ("NotifyRemoved in [MediaEncoder]."));
if (mAudioEncoder) {
mAudioEncoder->NotifyEvent(aGraph, event);
}
if (mVideoEncoder) {
mVideoEncoder->NotifyEvent(aGraph, event);
}
}
/* static */
already_AddRefed<MediaEncoder>
MediaEncoder::CreateEncoder(const nsAString& aMIMEType, uint32_t aAudioBitrate,
uint32_t aVideoBitrate, uint32_t aBitrate,
uint8_t aTrackTypes,
TrackRate aTrackRate)
{
AUTO_PROFILER_LABEL("MediaEncoder::CreateEncoder", OTHER);
nsAutoPtr<ContainerWriter> writer;
nsAutoPtr<AudioTrackEncoder> audioEncoder;
nsAutoPtr<VideoTrackEncoder> videoEncoder;
RefPtr<MediaEncoder> encoder;
nsString mimeType;
if (!aTrackTypes) {
LOG(LogLevel::Error, ("NO TrackTypes!!!"));
return nullptr;
}
#ifdef MOZ_WEBM_ENCODER
else if (MediaEncoder::IsWebMEncoderEnabled() &&
(aMIMEType.EqualsLiteral(VIDEO_WEBM) ||
(aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK))) {
if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK
&& MediaDecoder::IsOpusEnabled()) {
audioEncoder = new OpusTrackEncoder();
NS_ENSURE_TRUE(audioEncoder, nullptr);
}
videoEncoder = new VP8TrackEncoder(aTrackRate);
writer = new WebMWriter(aTrackTypes);
NS_ENSURE_TRUE(writer, nullptr);
NS_ENSURE_TRUE(videoEncoder, nullptr);
mimeType = NS_LITERAL_STRING(VIDEO_WEBM);
}
#endif //MOZ_WEBM_ENCODER
else if (MediaDecoder::IsOggEnabled() && MediaDecoder::IsOpusEnabled() &&
(aMIMEType.EqualsLiteral(AUDIO_OGG) ||
(aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK))) {
writer = new OggWriter();
audioEncoder = new OpusTrackEncoder();
NS_ENSURE_TRUE(writer, nullptr);
NS_ENSURE_TRUE(audioEncoder, nullptr);
mimeType = NS_LITERAL_STRING(AUDIO_OGG);
}
else {
LOG(LogLevel::Error, ("Can not find any encoder to record this media stream"));
return nullptr;
}
LOG(LogLevel::Debug, ("Create encoder result:a[%d] v[%d] w[%d] mimeType = %s.",
audioEncoder != nullptr, videoEncoder != nullptr,
writer != nullptr, NS_ConvertUTF16toUTF8(mimeType).get()));
if (videoEncoder && aVideoBitrate != 0) {
videoEncoder->SetBitrate(aVideoBitrate);
}
if (audioEncoder && aAudioBitrate != 0) {
audioEncoder->SetBitrate(aAudioBitrate);
}
encoder = new MediaEncoder(writer.forget(), audioEncoder.forget(),
videoEncoder.forget(), mimeType, aAudioBitrate,
aVideoBitrate, aBitrate);
return encoder.forget();
}
/**
* GetEncodedData() runs as a state machine, starting with mState set to
* GET_METADDATA, the procedure should be as follow:
*
* While non-stop
* If mState is GET_METADDATA
* Get the meta data from audio/video encoder
* If a meta data is generated
* Get meta data from audio/video encoder
* Set mState to ENCODE_TRACK
* Return the final container data
*
* If mState is ENCODE_TRACK
* Get encoded track data from audio/video encoder
* If a packet of track data is generated
* Insert encoded track data into the container stream of writer
* If the final container data is copied to aOutput
* Return the copy of final container data
* If this is the last packet of input stream
* Set mState to ENCODE_DONE
*
* If mState is ENCODE_DONE or ENCODE_ERROR
* Stop the loop
*/
void
MediaEncoder::GetEncodedData(nsTArray<nsTArray<uint8_t> >* aOutputBufs,
nsAString& aMIMEType)
{
MOZ_ASSERT(!NS_IsMainThread());
aMIMEType = mMIMEType;
AUTO_PROFILER_LABEL("MediaEncoder::GetEncodedData", OTHER);
bool reloop = true;
while (reloop) {
switch (mState) {
case ENCODE_METADDATA: {
LOG(LogLevel::Debug, ("ENCODE_METADDATA TimeStamp = %f", GetEncodeTimeStamp()));
nsresult rv = CopyMetadataToMuxer(mAudioEncoder.get());
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! Fail to Set Audio Metadata"));
break;
}
rv = CopyMetadataToMuxer(mVideoEncoder.get());
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! Fail to Set Video Metadata"));
break;
}
rv = mWriter->GetContainerData(aOutputBufs,
ContainerWriter::GET_HEADER);
if (aOutputBufs != nullptr) {
mSizeOfBuffer = aOutputBufs->ShallowSizeOfExcludingThis(MallocSizeOf);
}
if (NS_FAILED(rv)) {
LOG(LogLevel::Error,("Error! writer fail to generate header!"));
mState = ENCODE_ERROR;
break;
}
LOG(LogLevel::Debug, ("Finish ENCODE_METADDATA TimeStamp = %f", GetEncodeTimeStamp()));
mState = ENCODE_TRACK;
break;
}
case ENCODE_TRACK: {
LOG(LogLevel::Debug, ("ENCODE_TRACK TimeStamp = %f", GetEncodeTimeStamp()));
EncodedFrameContainer encodedData;
nsresult rv = NS_OK;
// We're most likely to actually wait for a video frame, so do that first to minimize
// capture offset/lipsync issues
rv = WriteEncodedDataToMuxer(mVideoEncoder.get());
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Fail to write video encoder data to muxer"));
break;
}
rv = WriteEncodedDataToMuxer(mAudioEncoder.get());
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! Fail to write audio encoder data to muxer"));
break;
}
LOG(LogLevel::Debug, ("Audio encoded TimeStamp = %f", GetEncodeTimeStamp()));
LOG(LogLevel::Debug, ("Video encoded TimeStamp = %f", GetEncodeTimeStamp()));
// In audio only or video only case, let unavailable track's flag to be true.
bool isAudioCompleted = (mAudioEncoder && mAudioEncoder->IsEncodingComplete()) || !mAudioEncoder;
bool isVideoCompleted = (mVideoEncoder && mVideoEncoder->IsEncodingComplete()) || !mVideoEncoder;
rv = mWriter->GetContainerData(aOutputBufs,
isAudioCompleted && isVideoCompleted ?
ContainerWriter::FLUSH_NEEDED : 0);
if (aOutputBufs != nullptr) {
mSizeOfBuffer = aOutputBufs->ShallowSizeOfExcludingThis(MallocSizeOf);
}
if (NS_SUCCEEDED(rv)) {
// Successfully get the copy of final container data from writer.
reloop = false;
}
mState = (mWriter->IsWritingComplete()) ? ENCODE_DONE : ENCODE_TRACK;
LOG(LogLevel::Debug, ("END ENCODE_TRACK TimeStamp = %f "
"mState = %d aComplete %d vComplete %d",
GetEncodeTimeStamp(), mState, isAudioCompleted, isVideoCompleted));
break;
}
case ENCODE_DONE:
case ENCODE_ERROR:
LOG(LogLevel::Debug, ("MediaEncoder has been shutdown."));
mSizeOfBuffer = 0;
mShutdown = true;
reloop = false;
break;
default:
MOZ_CRASH("Invalid encode state");
}
}
}
nsresult
MediaEncoder::WriteEncodedDataToMuxer(TrackEncoder *aTrackEncoder)
{
if (aTrackEncoder == nullptr) {
return NS_OK;
}
if (aTrackEncoder->IsEncodingComplete()) {
return NS_OK;
}
AUTO_PROFILER_LABEL("MediaEncoder::WriteEncodedDataToMuxer", OTHER);
EncodedFrameContainer encodedVideoData;
nsresult rv = aTrackEncoder->GetEncodedTrack(encodedVideoData);
if (NS_FAILED(rv)) {
// Encoding might be canceled.
LOG(LogLevel::Error, ("Error! Fail to get encoded data from video encoder."));
mState = ENCODE_ERROR;
return rv;
}
// Update timestamps to accommodate pauses
const nsTArray<RefPtr<EncodedFrame> >& encodedFrames =
encodedVideoData.GetEncodedFrames();
// Take a copy of the atomic so we don't continually access it
uint64_t microsecondsSpentPaused = mMicrosecondsSpentPaused;
for (size_t i = 0; i < encodedFrames.Length(); ++i) {
RefPtr<EncodedFrame> frame = encodedFrames[i];
if (frame->GetTimeStamp() > microsecondsSpentPaused &&
frame->GetTimeStamp() - microsecondsSpentPaused > mLastMuxedTimestamp) {
// Use the adjusted timestamp if it's after the last timestamp
frame->SetTimeStamp(frame->GetTimeStamp() - microsecondsSpentPaused);
} else {
// If not, we force the last time stamp. We do this so the frames are
// still around and in order in case the codec needs to reference them.
// Dropping them here may result in artifacts in playback.
frame->SetTimeStamp(mLastMuxedTimestamp);
}
MOZ_ASSERT(mLastMuxedTimestamp <= frame->GetTimeStamp(),
"Our frames should be ordered by this point!");
mLastMuxedTimestamp = frame->GetTimeStamp();
}
rv = mWriter->WriteEncodedTrack(encodedVideoData,
aTrackEncoder->IsEncodingComplete() ?
ContainerWriter::END_OF_STREAM : 0);
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! Fail to write encoded video track to the media container."));
mState = ENCODE_ERROR;
}
return rv;
}
nsresult
MediaEncoder::CopyMetadataToMuxer(TrackEncoder *aTrackEncoder)
{
if (aTrackEncoder == nullptr) {
return NS_OK;
}
AUTO_PROFILER_LABEL("MediaEncoder::CopyMetadataToMuxer", OTHER);
RefPtr<TrackMetadataBase> meta = aTrackEncoder->GetMetadata();
if (meta == nullptr) {
LOG(LogLevel::Error, ("Error! metadata = null"));
mState = ENCODE_ERROR;
return NS_ERROR_ABORT;
}
nsresult rv = mWriter->SetMetadata(meta);
if (NS_FAILED(rv)) {
LOG(LogLevel::Error, ("Error! SetMetadata fail"));
mState = ENCODE_ERROR;
}
return rv;
}
#ifdef MOZ_WEBM_ENCODER
bool
MediaEncoder::IsWebMEncoderEnabled()
{
return Preferences::GetBool("media.encoder.webm.enabled");
}
#endif
/*
* SizeOfExcludingThis measures memory being used by the Media Encoder.
* Currently it measures the size of the Encoder buffer and memory occupied
* by mAudioEncoder and mVideoEncoder.
*/
size_t
MediaEncoder::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
{
size_t amount = 0;
if (mState == ENCODE_TRACK) {
amount = mSizeOfBuffer +
(mAudioEncoder != nullptr ? mAudioEncoder->SizeOfExcludingThis(aMallocSizeOf) : 0) +
(mVideoEncoder != nullptr ? mVideoEncoder->SizeOfExcludingThis(aMallocSizeOf) : 0);
}
return amount;
}
} // namespace mozilla