Backed out 5 changesets 507a508aea7, 16669eed518d, 6f6fc1a91d07, 1e983ccb61cc, c5afa29ea85a (bug 1195073) for M2 and W5 on OSX and Linux and R(R2, Ru2) bustage on Linux. r=backout

Backed out changeset 507a508aea70 (bug 1195073)
Backed out changeset 16669eed518d (bug 1195073)
Backed out changeset 6f6fc1a91d07 (bug 1195073)
Backed out changeset 1e983ccb61cc (bug 1195073)
Backed out changeset c5afa29ea85a (bug 1195073)
This commit is contained in:
Sebastian Hengst 2015-08-21 10:46:05 +02:00
parent 5cec624cad
commit 0276165cfa
8 changed files with 50 additions and 236 deletions

View File

@ -178,6 +178,16 @@ public:
(*aData)[3] == 0x6b) {
return true;
}
// 0xa3 // SimpleBlock
if (aData->Length() >= 1 &&
(*aData)[0] == 0xa3) {
return true;
}
// 0xa1 // Block
if (aData->Length() >= 1 &&
(*aData)[0] == 0xa1) {
return true;
}
return false;
}
@ -186,17 +196,6 @@ public:
{
bool initSegment = IsInitSegmentPresent(aData);
if (initSegment) {
if (mLastMapping) {
// The last data contained a complete cluster but we can only detect it
// now that a new one is starting.
// We use mOffset as end position to ensure that any blocks not reported
// by WebMBufferParser are properly skipped.
mCompleteMediaSegmentRange = MediaByteRange(mLastMapping.ref().mSyncOffset,
mOffset);
mLastMapping.reset();
MSE_DEBUG(WebMContainerParser, "New cluster found at start, ending previous one");
return false;
}
mOffset = 0;
mParser = WebMBufferedParser(0);
mOverlappedMapping.Clear();
@ -244,71 +243,38 @@ public:
return false;
}
if (mLastMapping &&
mLastMapping.ref().mSyncOffset != mapping[0].mSyncOffset) {
// The last data contained a complete cluster but we can only detect it
// now that a new one is starting.
// We use the start of the next cluster as end position to ensure that any
// blocks not reported by WebMBufferParser is properly skipped.
mCompleteMediaSegmentRange = MediaByteRange(mLastMapping.ref().mSyncOffset,
mapping[0].mSyncOffset);
mOverlappedMapping.AppendElements(mapping);
mLastMapping.reset();
MSE_DEBUG(WebMContainerParser, "New cluster found at start, ending previous one");
return false;
}
// Calculate media range for first media segment.
// Check if we have a cluster finishing in the current data.
uint32_t endIdx = mapping.Length() - 1;
bool foundNewCluster = false;
while (mapping[0].mSyncOffset != mapping[endIdx].mSyncOffset) {
endIdx -= 1;
foundNewCluster = true;
// Calculate media range for first media segment
uint32_t segmentEndIdx = endIdx;
while (mapping[0].mSyncOffset != mapping[segmentEndIdx].mSyncOffset) {
segmentEndIdx -= 1;
}
int32_t completeIdx = endIdx;
while (completeIdx >= 0 && mOffset < mapping[completeIdx].mEndOffset) {
MSE_DEBUG(WebMContainerParser, "block is incomplete, missing: %lld",
mapping[completeIdx].mEndOffset - mOffset);
completeIdx -= 1;
}
// Save parsed blocks for which we do not have all data yet.
mOverlappedMapping.AppendElements(mapping.Elements() + completeIdx + 1,
mapping.Length() - completeIdx - 1);
if (completeIdx < 0) {
mLastMapping.reset();
return false;
}
if (mCompleteMediaHeaderRange.IsNull()) {
mCompleteMediaHeaderRange = MediaByteRange(mapping[0].mSyncOffset,
if (segmentEndIdx > 0 && mOffset >= mapping[segmentEndIdx].mEndOffset) {
mCompleteMediaHeaderRange = MediaByteRange(mParser.mInitEndOffset,
mapping[0].mEndOffset);
}
mLastMapping = Some(mapping[completeIdx]);
if (foundNewCluster && mOffset >= mapping[endIdx].mEndOffset) {
// We now have all information required to delimit a complete cluster.
mCompleteMediaSegmentRange = MediaByteRange(mapping[endIdx].mSyncOffset,
mapping[endIdx].mEndOffset);
mCompleteMediaSegmentRange = MediaByteRange(mParser.mInitEndOffset,
mapping[segmentEndIdx].mEndOffset);
}
if (!completeIdx) {
// Exclude frames that we don't have enough data to cover the end of.
while (mOffset < mapping[endIdx].mEndOffset && endIdx > 0) {
endIdx -= 1;
}
if (endIdx == 0) {
return false;
}
uint64_t frameDuration =
mapping[completeIdx].mTimecode - mapping[completeIdx - 1].mTimecode;
uint64_t frameDuration = mapping[endIdx].mTimecode - mapping[endIdx - 1].mTimecode;
aStart = mapping[0].mTimecode / NS_PER_USEC;
aEnd = (mapping[completeIdx].mTimecode + frameDuration) / NS_PER_USEC;
aEnd = (mapping[endIdx].mTimecode + frameDuration) / NS_PER_USEC;
MSE_DEBUG(WebMContainerParser, "[%lld, %lld] [fso=%lld, leo=%lld, l=%u processedIdx=%u fs=%lld]",
aStart, aEnd, mapping[0].mSyncOffset,
mapping[completeIdx].mEndOffset, mapping.Length(), completeIdx,
mCompleteMediaSegmentRange.mEnd);
MSE_DEBUG(WebMContainerParser, "[%lld, %lld] [fso=%lld, leo=%lld, l=%u endIdx=%u]",
aStart, aEnd, mapping[0].mSyncOffset, mapping[endIdx].mEndOffset, mapping.Length(), endIdx);
mapping.RemoveElementsAt(0, endIdx + 1);
mOverlappedMapping.AppendElements(mapping);
return true;
}
@ -323,7 +289,6 @@ private:
WebMBufferedParser mParser;
nsTArray<WebMTimeDataOffset> mOverlappedMapping;
int64_t mOffset;
Maybe<WebMTimeDataOffset> mLastMapping;
};
#ifdef MOZ_FMP4

View File

@ -100,11 +100,6 @@ public:
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
}
virtual bool IsExpectingMoreData() override
{
return false;
}
// Used by SourceBuffer.
void AppendData(MediaByteBuffer* aData);
void Ended();

View File

@ -97,7 +97,6 @@ TrackBuffersManager::TrackBuffersManager(dom::SourceBufferAttributes* aAttribute
, mAppendState(AppendState::WAITING_FOR_SEGMENT)
, mBufferFull(false)
, mFirstInitializationSegmentReceived(false)
, mNewSegmentStarted(false)
, mActiveTrack(false)
, mType(aType)
, mParser(ContainerParser::CreateForMIMEType(aType))
@ -656,12 +655,10 @@ TrackBuffersManager::SegmentParserLoop()
// This is a new initialization segment. Obsolete the old one.
RecreateParser(false);
}
mNewSegmentStarted = true;
continue;
}
if (mParser->IsMediaSegmentPresent(mInputBuffer)) {
SetAppendState(AppendState::PARSING_MEDIA_SEGMENT);
mNewSegmentStarted = true;
continue;
}
// We have neither an init segment nor a media segment, this is either
@ -672,7 +669,7 @@ TrackBuffersManager::SegmentParserLoop()
}
int64_t start, end;
bool newData = mParser->ParseStartAndEndTimestamps(mInputBuffer, start, end);
mParser->ParseStartAndEndTimestamps(mInputBuffer, start, end);
mProcessedInput += mInputBuffer->Length();
// 5. If the append state equals PARSING_INIT_SEGMENT, then run the
@ -699,22 +696,6 @@ TrackBuffersManager::SegmentParserLoop()
NeedMoreData();
return;
}
// We can't feed some demuxers (WebMDemuxer) with data that do not have
// monotonizally increasing timestamps. So we check if we have a
// discontinuity from the previous segment parsed.
// If so, recreate a new demuxer to ensure that the demuxer is only fed
// monotonically increasing data.
if (newData) {
if (mNewSegmentStarted && mLastParsedEndTime.isSome() &&
start < mLastParsedEndTime.ref().ToMicroseconds()) {
ResetDemuxingState();
return;
}
mNewSegmentStarted = false;
mLastParsedEndTime = Some(TimeUnit::FromMicroseconds(end));
}
// 3. If the input buffer contains one or more complete coded frames, then run the coded frame processing algorithm.
nsRefPtr<TrackBuffersManager> self = this;
mProcessingRequest.Begin(CodedFrameProcessing()
@ -775,7 +756,6 @@ TrackBuffersManager::ShutdownDemuxers()
mAudioTracks.mDemuxer = nullptr;
}
mInputDemuxer = nullptr;
mLastParsedEndTime.reset();
}
void
@ -800,58 +780,6 @@ TrackBuffersManager::CreateDemuxerforMIMEType()
return;
}
// We reset the demuxer by creating a new one and initializing it.
void
TrackBuffersManager::ResetDemuxingState()
{
MOZ_ASSERT(mParser && mParser->HasInitData());
RecreateParser(true);
mCurrentInputBuffer = new SourceBufferResource(mType);
// The demuxer isn't initialized yet ; we don't want to notify it
// that data has been appended yet ; so we simply append the init segment
// to the resource.
mCurrentInputBuffer->AppendData(mParser->InitData());
CreateDemuxerforMIMEType();
if (!mInputDemuxer) {
RejectAppend(NS_ERROR_FAILURE, __func__);
return;
}
mDemuxerInitRequest.Begin(mInputDemuxer->Init()
->Then(GetTaskQueue(), __func__,
this,
&TrackBuffersManager::OnDemuxerResetDone,
&TrackBuffersManager::OnDemuxerInitFailed));
}
void
TrackBuffersManager::OnDemuxerResetDone(nsresult)
{
MOZ_ASSERT(OnTaskQueue());
MSE_DEBUG("mAbort:%d", static_cast<bool>(mAbort));
mDemuxerInitRequest.Complete();
if (mAbort) {
RejectAppend(NS_ERROR_ABORT, __func__);
return;
}
// Recreate track demuxers.
uint32_t numVideos = mInputDemuxer->GetNumberTracks(TrackInfo::kVideoTrack);
if (numVideos) {
// We currently only handle the first video track.
mVideoTracks.mDemuxer = mInputDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
MOZ_ASSERT(mVideoTracks.mDemuxer);
}
uint32_t numAudios = mInputDemuxer->GetNumberTracks(TrackInfo::kAudioTrack);
if (numAudios) {
// We currently only handle the first audio track.
mAudioTracks.mDemuxer = mInputDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
MOZ_ASSERT(mAudioTracks.mDemuxer);
}
SegmentParserLoop();
}
void
TrackBuffersManager::AppendDataToCurrentInputBuffer(MediaByteBuffer* aData)
{
@ -1128,14 +1056,6 @@ TrackBuffersManager::CodedFrameProcessing()
// The mediaRange is offset by the init segment position previously added.
uint32_t length =
mediaRange.mEnd - (mProcessedInput - mInputBuffer->Length());
if (!length) {
// We've completed our earlier media segment and no new data is to be
// processed. This happens with some containers that can't detect that a
// media segment is ending until a new one starts.
nsRefPtr<CodedFrameProcessingPromise> p = mProcessingPromise.Ensure(__func__);
CompleteCodedFrameProcessing();
return p;
}
nsRefPtr<MediaByteBuffer> segment = new MediaByteBuffer;
if (!segment->AppendElements(mInputBuffer->Elements(), length, fallible)) {
return CodedFrameProcessingPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__);

View File

@ -113,7 +113,6 @@ private:
void InitializationSegmentReceived();
void ShutdownDemuxers();
void CreateDemuxerforMIMEType();
void ResetDemuxingState();
void NeedMoreData();
void RejectAppend(nsresult aRejectValue, const char* aName);
// Will return a promise that will be resolved once all frames of the current
@ -152,8 +151,6 @@ private:
// TODO: Unused for now.
Atomic<bool> mBufferFull;
bool mFirstInitializationSegmentReceived;
// Set to true once a new segment is started.
bool mNewSegmentStarted;
bool mActiveTrack;
Maybe<media::TimeUnit> mGroupStartTimestamp;
media::TimeUnit mGroupEndTimestamp;
@ -174,11 +171,9 @@ private:
nsRefPtr<MediaDataDemuxer> mInputDemuxer;
// Length already processed in current media segment.
uint32_t mProcessedInput;
Maybe<media::TimeUnit> mLastParsedEndTime;
void OnDemuxerInitDone(nsresult);
void OnDemuxerInitFailed(DemuxerFailureReason aFailure);
void OnDemuxerResetDone(nsresult);
MozPromiseRequestHolder<MediaDataDemuxer::InitPromise> mDemuxerInitRequest;
bool mEncrypted;

View File

@ -344,22 +344,6 @@ void WebMBufferedState::NotifyDataArrived(const unsigned char* aBuffer, uint32_t
i += 1;
}
}
mLastEndOffset = std::max<int64_t>(aOffset + aLength, mLastEndOffset);
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
if (mTimeMapping.IsEmpty()) {
return;
}
int32_t endIdx = mTimeMapping.Length() - 1;
while (endIdx >= 0 && mLastEndOffset < mTimeMapping[endIdx].mEndOffset) {
endIdx -= 1;
}
if (endIdx < 0) {
return;
}
mLastBlockOffset = mTimeMapping[endIdx].mEndOffset;
}
void WebMBufferedState::Reset() {
@ -414,13 +398,6 @@ int64_t WebMBufferedState::GetInitEndOffset()
return mRangeParsers[0].mInitEndOffset;
}
int64_t WebMBufferedState::GetLastBlockOffset()
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
return mLastBlockOffset;
}
bool WebMBufferedState::GetStartTime(uint64_t *aTime)
{
ReentrantMonitorAutoEnter mon(mReentrantMonitor);

View File

@ -225,11 +225,7 @@ class WebMBufferedState final
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebMBufferedState)
public:
WebMBufferedState()
: mReentrantMonitor("WebMBufferedState")
, mLastBlockOffset(-1)
, mLastEndOffset(-1)
{
WebMBufferedState() : mReentrantMonitor("WebMBufferedState") {
MOZ_COUNT_CTOR(WebMBufferedState);
}
@ -246,8 +242,6 @@ public:
// Returns end offset of init segment or -1 if none found.
int64_t GetInitEndOffset();
// Returns the end offset of the last complete block or -1 if none found.
int64_t GetLastBlockOffset();
// Returns start time
bool GetStartTime(uint64_t *aTime);
@ -261,16 +255,12 @@ private:
MOZ_COUNT_DTOR(WebMBufferedState);
}
// Synchronizes access to the mTimeMapping array and mLastBlockOffset.
// Synchronizes access to the mTimeMapping array.
ReentrantMonitor mReentrantMonitor;
// Sorted (by offset) map of data offsets to timecodes. Populated
// on the main thread as data is received and parsed by WebMBufferedParsers.
nsTArray<WebMTimeDataOffset> mTimeMapping;
// The last complete block parsed. -1 if not set.
int64_t mLastBlockOffset;
// The last seen data end offset. -1 if not set.
int64_t mLastEndOffset;
// Sorted (by offset) live parser instances. Main thread only.
nsTArray<WebMBufferedParser> mRangeParsers;

View File

@ -35,39 +35,40 @@ extern PRLogModuleInfo* gNesteggLog;
// Functions for reading and seeking using WebMDemuxer required for
// nestegg_io. The 'user data' passed to these functions is the
// demuxer.
// demuxer's MediaResourceIndex
static int webmdemux_read(void* aBuffer, size_t aLength, void* aUserData)
{
MOZ_ASSERT(aUserData);
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
int64_t length = resource->GetLength();
MOZ_ASSERT(aLength < UINT32_MAX);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
int64_t length = demuxer->GetEndDataOffset();
uint32_t count = aLength;
int64_t position = demuxer->GetResource()->Tell();
if (length >= 0 && count + position > length) {
count = length - position;
if (length >= 0 && count + resource->Tell() > length) {
count = uint32_t(length - resource->Tell());
}
uint32_t bytes = 0;
nsresult rv =
demuxer->GetResource()->Read(static_cast<char*>(aBuffer), count, &bytes);
bool eof = bytes < aLength;
nsresult rv = resource->Read(static_cast<char*>(aBuffer), count, &bytes);
bool eof = !bytes;
return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
}
static int webmdemux_seek(int64_t aOffset, int aWhence, void* aUserData)
{
MOZ_ASSERT(aUserData);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
nsresult rv = demuxer->GetResource()->Seek(aWhence, aOffset);
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
nsresult rv = resource->Seek(aWhence, aOffset);
return NS_SUCCEEDED(rv) ? 0 : -1;
}
static int64_t webmdemux_tell(void* aUserData)
{
MOZ_ASSERT(aUserData);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
return demuxer->GetResource()->Tell();
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
return resource->Tell();
}
static void webmdemux_log(nestegg* aContext,
@ -121,15 +122,12 @@ WebMDemuxer::WebMDemuxer(MediaResource* aResource)
, mVideoTrack(0)
, mAudioTrack(0)
, mSeekPreroll(0)
, mLastAudioFrameTime(0)
, mLastVideoFrameTime(0)
, mAudioCodec(-1)
, mVideoCodec(-1)
, mHasVideo(false)
, mHasAudio(false)
, mNeedReIndex(true)
, mLastWebMBlockOffset(-1)
, mIsExpectingMoreData(true)
{
if (!gNesteggLog) {
gNesteggLog = PR_NewLogModule("Nestegg");
@ -254,7 +252,7 @@ WebMDemuxer::ReadMetadata()
io.read = webmdemux_read;
io.seek = webmdemux_seek;
io.tell = webmdemux_tell;
io.userdata = this;
io.userdata = &mResource;
int64_t maxOffset = mBufferedState->GetInitEndOffset();
if (maxOffset == -1) {
maxOffset = mResource.GetLength();
@ -430,8 +428,6 @@ WebMDemuxer::EnsureUpToDateIndex()
if (!mInitData && mBufferedState->GetInitEndOffset() != -1) {
mInitData = mResource.MediaReadAt(0, mBufferedState->GetInitEndOffset());
}
mLastWebMBlockOffset = mBufferedState->GetLastBlockOffset();
mIsExpectingMoreData = mResource.GetResource()->IsExpectingMoreData();
mNeedReIndex = false;
}
@ -458,8 +454,6 @@ WebMDemuxer::GetCrypto()
bool
WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSamples)
{
EnsureUpToDateIndex();
nsRefPtr<NesteggPacketHolder> holder(NextPacket(aType));
if (!holder) {
@ -695,10 +689,6 @@ WebMDemuxer::SeekInternal(const media::TimeUnit& aTarget)
}
WEBM_DEBUG("got offset from buffered state: %" PRIu64 "", offset);
}
mLastAudioFrameTime = 0;
mLastVideoFrameTime = 0;
return NS_OK;
}

View File

@ -85,18 +85,6 @@ public:
// Pushes a packet to the front of the video packet queue.
virtual void PushVideoPacket(NesteggPacketHolder* aItem);
// Public accessor for nestegg callbacks
MediaResourceIndex* GetResource()
{
return &mResource;
}
int64_t GetEndDataOffset()
{
return mLastWebMBlockOffset < 0 || mIsExpectingMoreData
? mResource.GetLength() : mLastWebMBlockOffset;
}
private:
friend class WebMTrackDemuxer;
@ -164,12 +152,6 @@ private:
bool mHasVideo;
bool mHasAudio;
bool mNeedReIndex;
// The last complete block parsed by the WebMBufferedState. -1 if not set.
// We cache those values rather than retrieving them for performance reasons
// as nestegg only performs 1-byte read at a time.
int64_t mLastWebMBlockOffset;
bool mIsExpectingMoreData;
};
class WebMTrackDemuxer : public MediaTrackDemuxer