Bug 1356530 - Change the type of MediaData::mTime to TimeUnit since int64_t is ambiguous. r=kaku

MozReview-Commit-ID: 4bVeqIuWO2O

--HG--
extra : rebase_source : d504ac15a6dc59ad42f3ab80faf23f629d74315f
extra : intermediate-source : 6e52995b6c8146451d98dffc62f6907755dc856e
extra : source : 82d2649cdafb5a6389f6858c23578811933580c9
This commit is contained in:
JW Wang 2017-04-14 17:13:36 +08:00
parent 5f606637f1
commit 302d82c85a
42 changed files with 170 additions and 178 deletions

View File

@ -752,12 +752,12 @@ ADTSTrackDemuxer::GetNextFrame(const adts::Frame& aFrame)
UpdateState(aFrame); UpdateState(aFrame);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds(); frame->mTime = Duration(mFrameIndex - 1);
frame->mDuration = Duration(1); frame->mDuration = Duration(1);
frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime); frame->mTimecode = frame->mTime;
frame->mKeyframe = true; frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0); MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(frame->mDuration.IsPositive()); MOZ_ASSERT(frame->mDuration.IsPositive());
ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64

View File

@ -604,12 +604,12 @@ MP3TrackDemuxer::GetNextFrame(const MediaByteRange& aRange)
UpdateState(aRange); UpdateState(aRange);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds(); frame->mTime = Duration(mFrameIndex - 1);
frame->mDuration = Duration(1); frame->mDuration = Duration(1);
frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime); frame->mTimecode = frame->mTime;
frame->mKeyframe = true; frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0); MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(frame->mDuration.IsPositive()); MOZ_ASSERT(frame->mDuration.IsPositive());
if (mNumParsedFrames == 1) { if (mNumParsedFrames == 1) {

View File

@ -237,7 +237,7 @@ VideoData::UpdateTimestamp(const TimeUnit& aTimestamp)
auto updatedDuration = GetEndTime() - aTimestamp; auto updatedDuration = GetEndTime() - aTimestamp;
MOZ_ASSERT(!updatedDuration.IsNegative()); MOZ_ASSERT(!updatedDuration.IsNegative());
mTime = aTimestamp.ToMicroseconds(); mTime = aTimestamp;
mDuration = updatedDuration; mDuration = updatedDuration;
} }

View File

@ -294,7 +294,7 @@ public:
uint32_t aFrames) uint32_t aFrames)
: mType(aType) : mType(aType)
, mOffset(aOffset) , mOffset(aOffset)
, mTime(aTimestamp) , mTime(media::TimeUnit::FromMicroseconds(aTimestamp))
, mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp)) , mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp))
, mDuration(media::TimeUnit::FromMicroseconds(aDuration)) , mDuration(media::TimeUnit::FromMicroseconds(aDuration))
, mFrames(aFrames) , mFrames(aFrames)
@ -308,8 +308,8 @@ public:
// Approximate byte offset where this data was demuxed from its media. // Approximate byte offset where this data was demuxed from its media.
int64_t mOffset; int64_t mOffset;
// Start time of sample, in microseconds. // Start time of sample.
int64_t mTime; media::TimeUnit mTime;
// Codec specific internal time code. For Ogg based codecs this is the // Codec specific internal time code. For Ogg based codecs this is the
// granulepos. // granulepos.
@ -325,13 +325,13 @@ public:
media::TimeUnit GetEndTime() const media::TimeUnit GetEndTime() const
{ {
return media::TimeUnit::FromMicroseconds(mTime) + mDuration; return mTime + mDuration;
} }
bool AdjustForStartTime(int64_t aStartTime) bool AdjustForStartTime(int64_t aStartTime)
{ {
mTime = mTime - aStartTime; mTime = mTime - media::TimeUnit::FromMicroseconds(aStartTime);
return mTime >= 0; return !mTime.IsNegative();
} }
template <typename ReturnType> template <typename ReturnType>
@ -352,7 +352,6 @@ protected:
MediaData(Type aType, uint32_t aFrames) MediaData(Type aType, uint32_t aFrames)
: mType(aType) : mType(aType)
, mOffset(0) , mOffset(0)
, mTime(0)
, mFrames(aFrames) , mFrames(aFrames)
, mKeyframe(false) , mKeyframe(false)
{ {

View File

@ -1237,8 +1237,8 @@ private:
return seekTime; return seekTime;
} }
const int64_t audioStart = audio ? audio->mTime : INT64_MAX; const int64_t audioStart = audio ? audio->mTime.ToMicroseconds() : INT64_MAX;
const int64_t videoStart = video ? video->mTime : INT64_MAX; const int64_t videoStart = video ? video->mTime.ToMicroseconds() : INT64_MAX;
const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds()); const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds());
const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds()); const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds());
return TimeUnit::FromMicroseconds( return TimeUnit::FromMicroseconds(
@ -1314,7 +1314,7 @@ private:
{ {
if (mSeekJob.mTarget->IsFast() if (mSeekJob.mTarget->IsFast()
&& mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek && mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
&& aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) { && aSample->mTime < mCurrentTimeBeforeSeek) {
// We are doing a fastSeek, but we ended up *before* the previous // We are doing a fastSeek, but we ended up *before* the previous
// playback position. This is surprising UX, so switch to an accurate // playback position. This is surprising UX, so switch to an accurate
// seek and decode to the seek target. This is not conformant to the // seek and decode to the seek target. This is not conformant to the
@ -1335,7 +1335,7 @@ private:
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
} }
auto audioTime = TimeUnit::FromMicroseconds(aAudio->mTime); auto audioTime = aAudio->mTime;
if (audioTime + sampleDuration <= mSeekJob.mTarget->GetTime()) { if (audioTime + sampleDuration <= mSeekJob.mTarget->GetTime()) {
// Our seek target lies after the frames in this AudioData. Don't // Our seek target lies after the frames in this AudioData. Don't
// push it onto the audio queue, and keep decoding forwards. // push it onto the audio queue, and keep decoding forwards.
@ -1405,18 +1405,18 @@ private:
{ {
MOZ_ASSERT(aVideo); MOZ_ASSERT(aVideo);
SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]", SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds()); aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
const auto target = mSeekJob.mTarget->GetTime(); const auto target = mSeekJob.mTarget->GetTime();
// If the frame end time is less than the seek target, we won't want // If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it. // to display this frame after the seek, so discard it.
if (target >= aVideo->GetEndTime()) { if (target >= aVideo->GetEndTime()) {
SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64, SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64,
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(), aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
target.ToMicroseconds()); target.ToMicroseconds());
mFirstVideoFrameAfterSeek = aVideo; mFirstVideoFrameAfterSeek = aVideo;
} else { } else {
if (target.ToMicroseconds() >= aVideo->mTime && if (target >= aVideo->mTime &&
aVideo->GetEndTime() >= target) { aVideo->GetEndTime() >= target) {
// The seek target lies inside this frame's time slice. Adjust the // The seek target lies inside this frame's time slice. Adjust the
// frame's start time to match the seek target. // frame's start time to match the seek target.
@ -1426,7 +1426,7 @@ private:
SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] " SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] "
"containing target=%" PRId64, "containing target=%" PRId64,
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(), aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
target.ToMicroseconds()); target.ToMicroseconds());
MOZ_ASSERT(VideoQueue().GetSize() == 0, MOZ_ASSERT(VideoQueue().GetSize() == 0,
@ -1475,7 +1475,7 @@ static void
DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare) DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare)
{ {
while(aQueue.GetSize() > 0) { while(aQueue.GetSize() > 0) {
if (aCompare(aQueue.PeekFront()->mTime)) { if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) {
RefPtr<Type> releaseMe = aQueue.PopFront(); RefPtr<Type> releaseMe = aQueue.PopFront();
continue; continue;
} }
@ -1575,7 +1575,7 @@ private:
MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
MOZ_ASSERT(NeedMoreVideo()); MOZ_ASSERT(NeedMoreVideo());
if (aVideo->mTime > mCurrentTime.ToMicroseconds()) { if (aVideo->mTime > mCurrentTime) {
mMaster->PushVideo(aVideo); mMaster->PushVideo(aVideo);
FinishSeek(); FinishSeek();
} else { } else {
@ -1667,7 +1667,7 @@ private:
{ {
RefPtr<VideoData> data = VideoQueue().PeekFront(); RefPtr<VideoData> data = VideoQueue().PeekFront();
if (data) { if (data) {
mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime)); mSeekJob.mTarget->SetTime(data->mTime);
} else { } else {
MOZ_ASSERT(VideoQueue().AtEndOfStream()); MOZ_ASSERT(VideoQueue().AtEndOfStream());
mSeekJob.mTarget->SetTime(mDuration); mSeekJob.mTarget->SetTime(mDuration);
@ -3177,7 +3177,8 @@ MediaDecoderStateMachine::RequestAudioData()
// audio->GetEndTime() is not always mono-increasing in chained ogg. // audio->GetEndTime() is not always mono-increasing in chained ogg.
mDecodedAudioEndTime = std::max( mDecodedAudioEndTime = std::max(
aAudio->GetEndTime(), mDecodedAudioEndTime); aAudio->GetEndTime(), mDecodedAudioEndTime);
LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime, LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]",
aAudio->mTime.ToMicroseconds(),
aAudio->GetEndTime().ToMicroseconds()); aAudio->GetEndTime().ToMicroseconds());
mStateObj->HandleAudioDecoded(aAudio); mStateObj->HandleAudioDecoded(aAudio);
}, },
@ -3223,7 +3224,8 @@ MediaDecoderStateMachine::RequestVideoData(bool aSkipToNextKeyframe,
// Handle abnormal or negative timestamps. // Handle abnormal or negative timestamps.
mDecodedVideoEndTime = std::max( mDecodedVideoEndTime = std::max(
mDecodedVideoEndTime, aVideo->GetEndTime()); mDecodedVideoEndTime, aVideo->GetEndTime());
LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime, LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]",
aVideo->mTime.ToMicroseconds(),
aVideo->GetEndTime().ToMicroseconds()); aVideo->GetEndTime().ToMicroseconds());
mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime); mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
}, },

View File

@ -1724,7 +1724,7 @@ MediaFormatReader::NotifyNewOutput(
auto& decoder = GetDecoderData(aTrack); auto& decoder = GetDecoderData(aTrack);
for (auto& sample : aResults) { for (auto& sample : aResults) {
LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64, LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64,
TrackTypeToStr(aTrack), sample->mTime, TrackTypeToStr(aTrack), sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds()); sample->mDuration.ToMicroseconds());
decoder.mOutput.AppendElement(sample); decoder.mOutput.AppendElement(sample);
decoder.mNumSamplesOutput++; decoder.mNumSamplesOutput++;
@ -2011,19 +2011,19 @@ MediaFormatReader::HandleDemuxedSamples(
if (sample->mKeyframe) { if (sample->mKeyframe) {
ScheduleUpdate(aTrack); ScheduleUpdate(aTrack);
} else { } else {
auto time = TimeInterval( auto time = TimeInterval(sample->mTime, sample->GetEndTime());
TimeUnit::FromMicroseconds(sample->mTime), sample->GetEndTime());
InternalSeekTarget seekTarget = InternalSeekTarget seekTarget =
decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false)); decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64, LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64,
sample->mTime); sample->mTime.ToMicroseconds());
InternalSeek(aTrack, seekTarget); InternalSeek(aTrack, seekTarget);
} }
return; return;
} }
LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)", LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)",
sample->mTime, sample->mTimecode.ToMicroseconds(), sample->mKeyframe); sample->mTime.ToMicroseconds(), sample->mTimecode.ToMicroseconds(),
sample->mKeyframe);
decoder.mNumSamplesInput++; decoder.mNumSamplesInput++;
decoder.mSizeOfQueue++; decoder.mSizeOfQueue++;
if (aTrack == TrackInfo::kVideoTrack) { if (aTrack == TrackInfo::kVideoTrack) {
@ -2186,7 +2186,7 @@ MediaFormatReader::Update(TrackType aTrack)
while (decoder.mTimeThreshold && decoder.mOutput.Length()) { while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
RefPtr<MediaData>& output = decoder.mOutput[0]; RefPtr<MediaData>& output = decoder.mOutput[0];
InternalSeekTarget target = decoder.mTimeThreshold.ref(); InternalSeekTarget target = decoder.mTimeThreshold.ref();
media::TimeUnit time = media::TimeUnit::FromMicroseconds(output->mTime); media::TimeUnit time = output->mTime;
if (time >= target.Time()) { if (time >= target.Time()) {
// We have reached our internal seek target. // We have reached our internal seek target.
decoder.mTimeThreshold.reset(); decoder.mTimeThreshold.reset();
@ -2196,7 +2196,7 @@ MediaFormatReader::Update(TrackType aTrack)
if (time < target.Time() || (target.mDropTarget && target.Contains(time))) { if (time < target.Time() || (target.mDropTarget && target.Contains(time))) {
LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)", LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)",
TrackTypeToStr(aTrack), TrackTypeToStr(aTrack),
media::TimeUnit::FromMicroseconds(output->mTime).ToSeconds(), output->mTime.ToSeconds(),
target.Time().ToSeconds(), target.Time().ToSeconds(),
output->mKeyframe); output->mKeyframe);
decoder.mOutput.RemoveElementAt(0); decoder.mOutput.RemoveElementAt(0);
@ -2206,7 +2206,8 @@ MediaFormatReader::Update(TrackType aTrack)
while (decoder.mOutput.Length() while (decoder.mOutput.Length()
&& decoder.mOutput[0]->mType == MediaData::NULL_DATA) { && decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
LOGV("Dropping null data. Time: %" PRId64, decoder.mOutput[0]->mTime); LOGV("Dropping null data. Time: %" PRId64,
decoder.mOutput[0]->mTime.ToMicroseconds());
decoder.mOutput.RemoveElementAt(0); decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1; decoder.mSizeOfQueue -= 1;
} }
@ -2218,8 +2219,7 @@ MediaFormatReader::Update(TrackType aTrack)
decoder.mOutput.RemoveElementAt(0); decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1; decoder.mSizeOfQueue -= 1;
decoder.mLastSampleTime = decoder.mLastSampleTime =
Some(TimeInterval(TimeUnit::FromMicroseconds(output->mTime), Some(TimeInterval(output->mTime, output->GetEndTime()));
output->GetEndTime()));
decoder.mNumSamplesOutputTotal++; decoder.mNumSamplesOutputTotal++;
ReturnOutput(output, aTrack); ReturnOutput(output, aTrack);
// We have a decoded sample ready to be returned. // We have a decoded sample ready to be returned.
@ -2229,17 +2229,17 @@ MediaFormatReader::Update(TrackType aTrack)
a.mStats.mDecodedFrames = static_cast<uint32_t>(delta); a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal; mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
if (output->mKeyframe) { if (output->mKeyframe) {
if (mPreviousDecodedKeyframeTime_us < output->mTime) { if (mPreviousDecodedKeyframeTime_us < output->mTime.ToMicroseconds()) {
// There is a previous keyframe -> Record inter-keyframe stats. // There is a previous keyframe -> Record inter-keyframe stats.
uint64_t segment_us = uint64_t segment_us =
output->mTime - mPreviousDecodedKeyframeTime_us; output->mTime.ToMicroseconds() - mPreviousDecodedKeyframeTime_us;
a.mStats.mInterKeyframeSum_us += segment_us; a.mStats.mInterKeyframeSum_us += segment_us;
a.mStats.mInterKeyframeCount += 1; a.mStats.mInterKeyframeCount += 1;
if (a.mStats.mInterKeyFrameMax_us < segment_us) { if (a.mStats.mInterKeyFrameMax_us < segment_us) {
a.mStats.mInterKeyFrameMax_us = segment_us; a.mStats.mInterKeyFrameMax_us = segment_us;
} }
} }
mPreviousDecodedKeyframeTime_us = output->mTime; mPreviousDecodedKeyframeTime_us = output->mTime.ToMicroseconds();
} }
nsCString error; nsCString error;
mVideo.mIsHardwareAccelerated = mVideo.mIsHardwareAccelerated =
@ -2379,7 +2379,7 @@ MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack)
MOZ_ASSERT(GetDecoderData(aTrack).HasPromise()); MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA); MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA);
LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack), LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack),
aData->mTime, aData->GetEndTime().ToMicroseconds()); aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
if (aTrack == TrackInfo::kAudioTrack) { if (aTrack == TrackInfo::kAudioTrack) {
AudioData* audioData = static_cast<AudioData*>(aData); AudioData* audioData = static_cast<AudioData*>(aData);
@ -2506,8 +2506,7 @@ MediaFormatReader::DropDecodedSamples(TrackType aTrack)
auto& decoder = GetDecoderData(aTrack); auto& decoder = GetDecoderData(aTrack);
size_t lengthDecodedQueue = decoder.mOutput.Length(); size_t lengthDecodedQueue = decoder.mOutput.Length();
if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) { if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) {
TimeUnit time = TimeUnit time = decoder.mOutput.LastElement()->mTime;
TimeUnit::FromMicroseconds(decoder.mOutput.LastElement()->mTime);
if (time >= decoder.mTimeThreshold.ref().Time()) { if (time >= decoder.mTimeThreshold.ref().Time()) {
// We would have reached our internal seek target. // We would have reached our internal seek target.
decoder.mTimeThreshold.reset(); decoder.mTimeThreshold.reset();
@ -3102,8 +3101,7 @@ MediaFormatReader::OnFirstDemuxCompleted(
auto& decoder = GetDecoderData(aType); auto& decoder = GetDecoderData(aType);
MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing()); MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing());
decoder.mFirstDemuxedSampleTime.emplace( decoder.mFirstDemuxedSampleTime.emplace(aSamples->mSamples[0]->mTime);
TimeUnit::FromMicroseconds(aSamples->mSamples[0]->mTime));
MaybeResolveMetadataPromise(); MaybeResolveMetadataPromise();
} }

View File

@ -47,7 +47,7 @@ public:
MOZ_ASSERT(!mEndOfStream); MOZ_ASSERT(!mEndOfStream);
MOZ_ASSERT(aItem); MOZ_ASSERT(aItem);
NS_ADDREF(aItem); NS_ADDREF(aItem);
MOZ_ASSERT(aItem->GetEndTime().ToMicroseconds() >= aItem->mTime); MOZ_ASSERT(aItem->GetEndTime() >= aItem->mTime);
nsDeque::Push(aItem); nsDeque::Push(aItem);
mPushEvent.Notify(RefPtr<T>(aItem)); mPushEvent.Notify(RefPtr<T>(aItem));
} }
@ -104,7 +104,7 @@ public:
} }
T* last = static_cast<T*>(nsDeque::Peek()); T* last = static_cast<T*>(nsDeque::Peek());
T* first = static_cast<T*>(nsDeque::PeekFront()); T* first = static_cast<T*>(nsDeque::PeekFront());
return last->GetEndTime().ToMicroseconds() - first->mTime; return (last->GetEndTime() - first->mTime).ToMicroseconds();
} }
void LockedForEach(nsDequeFunctor& aFunctor) const { void LockedForEach(nsDequeFunctor& aFunctor) const {

View File

@ -141,7 +141,8 @@ bool AndroidMediaReader::DecodeVideoFrame(bool& aKeyframeSkip,
if (mLastVideoFrame) { if (mLastVideoFrame) {
int64_t durationUs; int64_t durationUs;
mPlugin->GetDuration(mPlugin, &durationUs); mPlugin->GetDuration(mPlugin, &durationUs);
durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0); durationUs = std::max<int64_t>(
durationUs - mLastVideoFrame->mTime.ToMicroseconds(), 0);
mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(durationUs)); mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(durationUs));
mVideoQueue.Push(mLastVideoFrame); mVideoQueue.Push(mLastVideoFrame);
mLastVideoFrame = nullptr; mLastVideoFrame = nullptr;
@ -247,8 +248,8 @@ bool AndroidMediaReader::DecodeVideoFrame(bool& aKeyframeSkip,
// Calculate the duration as the timestamp of the current frame minus the // Calculate the duration as the timestamp of the current frame minus the
// timestamp of the previous frame. We can then return the previously // timestamp of the previous frame. We can then return the previously
// decoded frame, and it will have a valid timestamp. // decoded frame, and it will have a valid timestamp.
int64_t duration = v->mTime - mLastVideoFrame->mTime; auto duration = v->mTime - mLastVideoFrame->mTime;
mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(duration)); mLastVideoFrame->UpdateDuration(duration);
// We have the start time of the next frame, so we can push the previous // We have the start time of the next frame, so we can push the previous
// frame into the queue, except if the end time is below the threshold, // frame into the queue, except if the end time is below the threshold,
@ -320,7 +321,7 @@ AndroidMediaReader::Seek(const SeekTarget& aTarget)
RefPtr<AndroidMediaReader> self = this; RefPtr<AndroidMediaReader> self = this;
DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) { DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
self->mSeekRequest.Complete(); self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = v->mTime; self->mAudioSeekTimeUs = v->mTime.ToMicroseconds();
self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__); self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
}, [self, aTarget] () { }, [self, aTarget] () {
self->mSeekRequest.Complete(); self->mSeekRequest.Complete();

View File

@ -980,13 +980,13 @@ FlacTrackDemuxer::GetNextFrame(const flac::Frame& aFrame)
return nullptr; return nullptr;
} }
frame->mTime = aFrame.Time().ToMicroseconds(); frame->mTime = aFrame.Time();
frame->mDuration = aFrame.Duration(); frame->mDuration = aFrame.Duration();
frame->mTimecode = TimeUnit::FromMicroseconds(frame->mTime); frame->mTimecode = frame->mTime;
frame->mOffset = aFrame.Offset(); frame->mOffset = aFrame.Offset();
frame->mKeyframe = true; frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0); MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(!frame->mDuration.IsNegative()); MOZ_ASSERT(!frame->mDuration.IsNegative());
return frame.forget(); return frame.forget();

View File

@ -411,10 +411,10 @@ MP4TrackDemuxer::EnsureUpToDateIndex()
RefPtr<MP4TrackDemuxer::SeekPromise> RefPtr<MP4TrackDemuxer::SeekPromise>
MP4TrackDemuxer::Seek(const media::TimeUnit& aTime) MP4TrackDemuxer::Seek(const media::TimeUnit& aTime)
{ {
int64_t seekTime = aTime.ToMicroseconds(); auto seekTime = aTime;
mQueuedSample = nullptr; mQueuedSample = nullptr;
mIterator->Seek(seekTime); mIterator->Seek(seekTime.ToMicroseconds());
// Check what time we actually seeked to. // Check what time we actually seeked to.
RefPtr<MediaRawData> sample; RefPtr<MediaRawData> sample;
@ -436,8 +436,7 @@ MP4TrackDemuxer::Seek(const media::TimeUnit& aTime)
SetNextKeyFrameTime(); SetNextKeyFrameTime();
return SeekPromise::CreateAndResolve( return SeekPromise::CreateAndResolve(seekTime, __func__);
media::TimeUnit::FromMicroseconds(seekTime), __func__);
} }
already_AddRefed<MediaRawData> already_AddRefed<MediaRawData>
@ -461,7 +460,8 @@ MP4TrackDemuxer::GetNextSample()
NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe " NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe "
"@ pts:%" PRId64 " dur:%" PRId64 "@ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64, " dts:%" PRId64,
keyframe ? "" : "non-", sample->mTime, keyframe ? "" : "non-",
sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds(), sample->mDuration.ToMicroseconds(),
sample->mTimecode.ToMicroseconds()) sample->mTimecode.ToMicroseconds())
.get()); .get());
@ -473,7 +473,8 @@ MP4TrackDemuxer::GetNextSample()
NS_WARNING( NS_WARNING(
nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64 nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64, " dts:%" PRId64,
sample->mTime, sample->mDuration.ToMicroseconds(), sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds(),
sample->mTimecode.ToMicroseconds()) sample->mTimecode.ToMicroseconds())
.get()); .get());
// We could reject the sample now, however demuxer errors are fatal. // We could reject the sample now, however demuxer errors are fatal.
@ -540,7 +541,7 @@ MP4TrackDemuxer::GetSamples(int32_t aNumSamples)
if (mNextKeyframeTime.isNothing() if (mNextKeyframeTime.isNothing()
|| samples->mSamples.LastElement()->mTime || samples->mSamples.LastElement()->mTime
>= mNextKeyframeTime.value().ToMicroseconds()) { >= mNextKeyframeTime.value()) {
SetNextKeyFrameTime(); SetNextKeyFrameTime();
} }
return SamplesPromise::CreateAndResolve(samples, __func__); return SamplesPromise::CreateAndResolve(samples, __func__);
@ -590,7 +591,7 @@ MP4TrackDemuxer::SkipToNextRandomAccessPoint(
RefPtr<MediaRawData> sample; RefPtr<MediaRawData> sample;
while (!found && (sample = GetNextSample())) { while (!found && (sample = GetNextSample())) {
parsed++; parsed++;
if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) { if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
found = true; found = true;
mQueuedSample = sample; mQueuedSample = sample;
} }

View File

@ -190,7 +190,7 @@ ChromiumCDMParent::InitCDMInputBuffer(gmp::CDMInputBuffer& aBuffer,
aBuffer = gmp::CDMInputBuffer(shmem, aBuffer = gmp::CDMInputBuffer(shmem,
crypto.mKeyId, crypto.mKeyId,
crypto.mIV, crypto.mIV,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
crypto.mPlainSizes, crypto.mPlainSizes,
crypto.mEncryptedSizes, crypto.mEncryptedSizes,
@ -835,7 +835,7 @@ ChromiumCDMParent::DecryptAndDecodeFrame(MediaRawData* aSample)
} }
GMP_LOG("ChromiumCDMParent::DecryptAndDecodeFrame t=%" PRId64, GMP_LOG("ChromiumCDMParent::DecryptAndDecodeFrame t=%" PRId64,
aSample->mTime); aSample->mTime.ToMicroseconds());
CDMInputBuffer buffer; CDMInputBuffer buffer;

View File

@ -63,7 +63,7 @@ public:
RefPtr<MediaTrackDemuxer> track = aTrackDemuxer; RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
RefPtr<MP4DemuxerBinding> binding = this; RefPtr<MP4DemuxerBinding> binding = this;
int64_t time = -1; auto time = media::TimeUnit::Invalid();
while (mIndex < mSamples.Length()) { while (mIndex < mSamples.Length()) {
uint32_t i = mIndex++; uint32_t i = mIndex++;
if (mSamples[i]->mKeyframe) { if (mSamples[i]->mKeyframe) {
@ -74,7 +74,7 @@ public:
RefPtr<GenericPromise> p = mCheckTrackKeyFramePromise.Ensure(__func__); RefPtr<GenericPromise> p = mCheckTrackKeyFramePromise.Ensure(__func__);
if (time == -1) { if (!time.IsValid()) {
mCheckTrackKeyFramePromise.Resolve(true, __func__); mCheckTrackKeyFramePromise.Resolve(true, __func__);
return p; return p;
} }
@ -82,7 +82,7 @@ public:
DispatchTask( DispatchTask(
[track, time, binding] () { [track, time, binding] () {
track->Seek(media::TimeUnit::FromMicroseconds(time))->Then(binding->mTaskQueue, __func__, track->Seek(time)->Then(binding->mTaskQueue, __func__,
[track, time, binding] () { [track, time, binding] () {
track->GetSamples()->Then(binding->mTaskQueue, __func__, track->GetSamples()->Then(binding->mTaskQueue, __func__,
[track, time, binding] (RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) { [track, time, binding] (RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {

View File

@ -230,7 +230,7 @@ VideoDecoderChild::Decode(MediaRawData* aSample)
memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size()); memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset, MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mTimecode.ToMicroseconds(), aSample->mTimecode.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
aSample->mFrames, aSample->mFrames,

View File

@ -137,7 +137,7 @@ VideoDecoderParent::RecvInput(const MediaRawDataIPDL& aData)
return IPC_OK(); return IPC_OK();
} }
data->mOffset = aData.base().offset(); data->mOffset = aData.base().offset();
data->mTime = aData.base().time(); data->mTime = media::TimeUnit::FromMicroseconds(aData.base().time());
data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode()); data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode());
data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration()); data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
data->mKeyframe = aData.base().keyframe(); data->mKeyframe = aData.base().keyframe();
@ -191,7 +191,8 @@ VideoDecoderParent::ProcessDecodedData(
} }
VideoDataIPDL output( VideoDataIPDL output(
MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode.ToMicroseconds(), MediaDataIPDL(data->mOffset, data->mTime.ToMicroseconds(),
data->mTimecode.ToMicroseconds(),
data->mDuration.ToMicroseconds(), data->mDuration.ToMicroseconds(),
data->mFrames, data->mKeyframe), data->mFrames, data->mKeyframe),
video->mDisplay, video->mDisplay,

View File

@ -285,7 +285,8 @@ AudioSink::PopFrames(uint32_t aFrames)
auto framesToPop = std::min(aFrames, mCursor->Available()); auto framesToPop = std::min(aFrames, mCursor->Available());
SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u", SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u",
mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop); mCurrentData->mTime.ToMicroseconds(),
mCurrentData->mFrames - mCursor->Available(), framesToPop);
UniquePtr<AudioStream::Chunk> chunk = UniquePtr<AudioStream::Chunk> chunk =
MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr()); MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
@ -406,8 +407,8 @@ AudioSink::NotifyAudioNeeded()
// audio hardware, so we can play across the gap. // audio hardware, so we can play across the gap.
// Calculate the timestamp of the next chunk of audio in numbers of // Calculate the timestamp of the next chunk of audio in numbers of
// samples. // samples.
CheckedInt64 sampleTime = TimeUnitToFrames( CheckedInt64 sampleTime =
TimeUnit::FromMicroseconds(data->mTime) - mStartTime, data->mRate); TimeUnitToFrames(data->mTime - mStartTime, data->mRate);
// Calculate the number of frames that have been pushed onto the audio hardware. // Calculate the number of frames that have been pushed onto the audio hardware.
CheckedInt64 missingFrames = sampleTime - mFramesParsed; CheckedInt64 missingFrames = sampleTime - mFramesParsed;
@ -501,7 +502,7 @@ AudioSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
} }
RefPtr<AudioData> data = RefPtr<AudioData> data =
new AudioData(aReference->mOffset, new AudioData(aReference->mOffset,
aReference->mTime, aReference->mTime.ToMicroseconds(),
duration.value(), duration.value(),
frames, frames,
Move(aBuffer), Move(aBuffer),

View File

@ -461,7 +461,7 @@ SendStreamAudio(DecodedStreamData* aStream, const media::TimeUnit& aStartTime,
// the exact same silences // the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
+ TimeUnitToFrames(aStartTime, aRate); + TimeUnitToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate); CheckedInt64 frameOffset = TimeUnitToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() || if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() || !frameOffset.isValid() ||
@ -595,7 +595,7 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
for (uint32_t i = 0; i < video.Length(); ++i) { for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i]; VideoData* v = video[i];
if (mData->mNextVideoTime.ToMicroseconds() < v->mTime) { if (mData->mNextVideoTime < v->mTime) {
// Write last video frame to catch up. mLastVideoImage can be null here // Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video. // which is fine, it just means there's no video.
@ -605,12 +605,11 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
// video frame). E.g. if we have a video frame that is 30 sec long // video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame // and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long. // that is 15 sec long.
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
FromMicroseconds(v->mTime),
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime), tracksStartTimeStamp + v->mTime.ToTimeDuration(),
&output, aPrincipalHandle); &output, aPrincipalHandle);
mData->mNextVideoTime = FromMicroseconds(v->mTime); mData->mNextVideoTime = v->mTime;
} }
if (mData->mNextVideoTime < v->GetEndTime()) { if (mData->mNextVideoTime < v->GetEndTime()) {
@ -746,7 +745,7 @@ DecodedStream::NotifyOutput(int64_t aTime)
{ {
AssertOwnerThread(); AssertOwnerThread();
mLastOutputTime = FromMicroseconds(aTime); mLastOutputTime = FromMicroseconds(aTime);
int64_t currentTime = GetPosition().ToMicroseconds(); auto currentTime = GetPosition();
// Remove audio samples that have been played by MSG from the queue. // Remove audio samples that have been played by MSG from the queue.
RefPtr<AudioData> a = mAudioQueue.PeekFront(); RefPtr<AudioData> a = mAudioQueue.PeekFront();

View File

@ -365,8 +365,7 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
continue; continue;
} }
int64_t frameTime = frame->mTime; if (frame->mTime.IsNegative()) {
if (frameTime < 0) {
// Frame times before the start time are invalid; drop such frames // Frame times before the start time are invalid; drop such frames
continue; continue;
} }
@ -374,7 +373,7 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
TimeStamp t; TimeStamp t;
if (aMaxFrames > 1) { if (aMaxFrames > 1) {
MOZ_ASSERT(!aClockTimeStamp.IsNull()); MOZ_ASSERT(!aClockTimeStamp.IsNull());
int64_t delta = frame->mTime - aClockTime; int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
t = aClockTimeStamp + t = aClockTimeStamp +
TimeDuration::FromMicroseconds(delta / params.mPlaybackRate); TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
if (!lastFrameTime.IsNull() && t <= lastFrameTime) { if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
@ -394,7 +393,8 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
img->mProducerID = mProducerID; img->mProducerID = mProducerID;
VSINK_LOG_V("playing video frame %" PRId64 " (id=%x) (vq-queued=%" PRIuSIZE ")", VSINK_LOG_V("playing video frame %" PRId64 " (id=%x) (vq-queued=%" PRIuSIZE ")",
frame->mTime, frame->mFrameID, VideoQueue().GetSize()); frame->mTime.ToMicroseconds(), frame->mFrameID,
VideoQueue().GetSize());
} }
if (images.Length() > 0) { if (images.Length() > 0) {
@ -424,7 +424,7 @@ VideoSink::UpdateRenderedVideoFrames()
} else { } else {
mFrameStats.NotifyDecodedFrames({ 0, 0, 1 }); mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64, VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64,
frame->mTime, clockTime.ToMicroseconds()); frame->mTime.ToMicroseconds(), clockTime.ToMicroseconds());
} }
} }
@ -450,7 +450,7 @@ VideoSink::UpdateRenderedVideoFrames()
return; return;
} }
int64_t nextFrameTime = frames[1]->mTime; int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
int64_t delta = std::max( int64_t delta = std::max(
nextFrameTime - clockTime.ToMicroseconds(), MIN_UPDATE_INTERVAL_US); nextFrameTime - clockTime.ToMicroseconds(), MIN_UPDATE_INTERVAL_US);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds( TimeStamp target = nowTime + TimeDuration::FromMicroseconds(

View File

@ -479,7 +479,7 @@ MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples)
} }
RefPtr<SamplesHolder> samples = new SamplesHolder; RefPtr<SamplesHolder> samples = new SamplesHolder;
samples->mSamples.AppendElement(sample); samples->mSamples.AppendElement(sample);
if (mNextRandomAccessPoint.ToMicroseconds() <= sample->mTime) { if (mNextRandomAccessPoint <= sample->mTime) {
MonitorAutoLock mon(mMonitor); MonitorAutoLock mon(mMonitor);
mNextRandomAccessPoint = mNextRandomAccessPoint =
mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ); mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);

View File

@ -459,8 +459,8 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
MSE_DEBUG("Step1. Evicting %" PRId64 " bytes prior currentTime", MSE_DEBUG("Step1. Evicting %" PRId64 " bytes prior currentTime",
aSizeToEvict - toEvict); aSizeToEvict - toEvict);
CodedFrameRemoval( CodedFrameRemoval(
TimeInterval(TimeUnit::FromMicroseconds(0), TimeInterval(TimeUnit::Zero(),
TimeUnit::FromMicroseconds(buffer[lastKeyFrameIndex]->mTime - 1))); buffer[lastKeyFrameIndex]->mTime - TimeUnit::FromMicroseconds(1)));
} }
if (mSizeSourceBuffer <= finalSize) { if (mSizeSourceBuffer <= finalSize) {
@ -487,7 +487,7 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
uint32_t evictedFramesStartIndex = buffer.Length(); uint32_t evictedFramesStartIndex = buffer.Length();
for (int32_t i = buffer.Length() - 1; i >= 0; i--) { for (int32_t i = buffer.Length() - 1; i >= 0; i--) {
const auto& frame = buffer[i]; const auto& frame = buffer[i];
if (frame->mTime <= upperLimit.ToMicroseconds() || toEvict < 0) { if (frame->mTime <= upperLimit || toEvict < 0) {
// We've reached a frame that shouldn't be evicted -> Evict after it -> i+1. // We've reached a frame that shouldn't be evicted -> Evict after it -> i+1.
// Or the previous loop reached the eviction threshold -> Evict from it -> i+1. // Or the previous loop reached the eviction threshold -> Evict from it -> i+1.
evictedFramesStartIndex = i + 1; evictedFramesStartIndex = i + 1;
@ -499,7 +499,7 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
MSE_DEBUG("Step2. Evicting %" PRId64 " bytes from trailing data", MSE_DEBUG("Step2. Evicting %" PRId64 " bytes from trailing data",
mSizeSourceBuffer - finalSize - toEvict); mSizeSourceBuffer - finalSize - toEvict);
CodedFrameRemoval( CodedFrameRemoval(
TimeInterval(TimeUnit::FromMicroseconds(buffer[evictedFramesStartIndex]->mTime), TimeInterval(buffer[evictedFramesStartIndex]->mTime,
TimeUnit::FromInfinity())); TimeUnit::FromInfinity()));
} }
} }
@ -559,8 +559,8 @@ TrackBuffersManager::CodedFrameRemoval(TimeInterval aInterval)
// then update remove end timestamp to that random access point timestamp. // then update remove end timestamp to that random access point timestamp.
if (end < track->mBufferedRanges.GetEnd()) { if (end < track->mBufferedRanges.GetEnd()) {
for (auto& frame : track->GetTrackBuffer()) { for (auto& frame : track->GetTrackBuffer()) {
if (frame->mKeyframe && frame->mTime >= end.ToMicroseconds()) { if (frame->mKeyframe && frame->mTime >= end) {
removeEndTimestamp = TimeUnit::FromMicroseconds(frame->mTime); removeEndTimestamp = frame->mTime;
break; break;
} }
} }
@ -1420,14 +1420,12 @@ TimeInterval
TrackBuffersManager::PresentationInterval(const TrackBuffer& aSamples) const TrackBuffersManager::PresentationInterval(const TrackBuffer& aSamples) const
{ {
TimeInterval presentationInterval = TimeInterval presentationInterval =
TimeInterval(TimeUnit::FromMicroseconds(aSamples[0]->mTime), TimeInterval(aSamples[0]->mTime, aSamples[0]->GetEndTime());
aSamples[0]->GetEndTime());
for (uint32_t i = 1; i < aSamples.Length(); i++) { for (uint32_t i = 1; i < aSamples.Length(); i++) {
auto& sample = aSamples[i]; auto& sample = aSamples[i];
presentationInterval = presentationInterval.Span( presentationInterval = presentationInterval.Span(
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime), TimeInterval(sample->mTime, sample->GetEndTime()));
sample->GetEndTime()));
} }
return presentationInterval; return presentationInterval;
} }
@ -1445,8 +1443,8 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds. // Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
TimeUnit presentationTimestamp = TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit() ? TimeUnit::Zero()
: TimeUnit::FromMicroseconds(aSamples[0]->mTime); : aSamples[0]->mTime;
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps: // 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
CheckSequenceDiscontinuity(presentationTimestamp); CheckSequenceDiscontinuity(presentationTimestamp);
@ -1488,7 +1486,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", " SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
"kf:%d)", "kf:%d)",
aTrackData.mInfo->mMimeType.get(), aTrackData.mInfo->mMimeType.get(),
sample->mTime, sample->mTime.ToMicroseconds(),
sample->GetEndTime().ToMicroseconds(), sample->GetEndTime().ToMicroseconds(),
sample->mTimecode.ToMicroseconds(), sample->mTimecode.ToMicroseconds(),
sample->mDuration.ToMicroseconds(), sample->mDuration.ToMicroseconds(),
@ -1524,7 +1522,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Step 3 is performed earlier or when a discontinuity has been detected. // Step 3 is performed earlier or when a discontinuity has been detected.
// 4. If timestampOffset is not 0, then run the following steps: // 4. If timestampOffset is not 0, then run the following steps:
TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime); TimeUnit sampleTime = sample->mTime;
TimeUnit sampleTimecode = sample->mTimecode; TimeUnit sampleTimecode = sample->mTimecode;
TimeUnit sampleDuration = sample->mDuration; TimeUnit sampleDuration = sample->mDuration;
TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset(); TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
@ -1618,7 +1616,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
samplesRange += sampleInterval; samplesRange += sampleInterval;
sizeNewSamples += sample->ComputedSizeOfIncludingThis(); sizeNewSamples += sample->ComputedSizeOfIncludingThis();
sample->mTime = sampleInterval.mStart.ToMicroseconds(); sample->mTime = sampleInterval.mStart;
sample->mTimecode = decodeTimestamp; sample->mTimecode = decodeTimestamp;
sample->mTrackInfo = trackBuffer.mLastInfo; sample->mTrackInfo = trackBuffer.mLastInfo;
samples.AppendElement(sample); samples.AppendElement(sample);
@ -1694,7 +1692,7 @@ TrackBuffersManager::CheckNextInsertionIndex(TrackData& aTrackData,
// We will insert our new frames right before. // We will insert our new frames right before.
for (uint32_t i = 0; i < data.Length(); i++) { for (uint32_t i = 0; i < data.Length(); i++) {
const RefPtr<MediaRawData>& sample = data[i]; const RefPtr<MediaRawData>& sample = data[i];
if (sample->mTime >= target.mStart.ToMicroseconds() || if (sample->mTime >= target.mStart ||
sample->GetEndTime() > target.mStart) { sample->GetEndTime() > target.mStart) {
aTrackData.mNextInsertionIndex = Some(i); aTrackData.mNextInsertionIndex = Some(i);
return true; return true;
@ -1764,8 +1762,7 @@ TrackBuffersManager::InsertFrames(TrackBuffer& aSamples,
} }
// 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer. // 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
if (!CheckNextInsertionIndex(aTrackData, if (!CheckNextInsertionIndex(aTrackData, aSamples[0]->mTime)) {
TimeUnit::FromMicroseconds(aSamples[0]->mTime))) {
RejectProcessing(NS_ERROR_FAILURE, __func__); RejectProcessing(NS_ERROR_FAILURE, __func__);
return; return;
} }
@ -1837,8 +1834,7 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
for (uint32_t i = aStartIndex; i < data.Length(); i++) { for (uint32_t i = aStartIndex; i < data.Length(); i++) {
const RefPtr<MediaRawData> sample = data[i]; const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval = TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime), TimeInterval(sample->mTime, sample->GetEndTime());
sample->GetEndTime());
if (aIntervals.Contains(sampleInterval)) { if (aIntervals.Contains(sampleInterval)) {
if (firstRemovedIndex.isNothing()) { if (firstRemovedIndex.isNothing()) {
firstRemovedIndex = Some(i); firstRemovedIndex = Some(i);
@ -1875,8 +1871,7 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) { for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) {
const RefPtr<MediaRawData> sample = data[i]; const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval = TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime), TimeInterval(sample->mTime, sample->GetEndTime());
sample->GetEndTime());
removedIntervals += sampleInterval; removedIntervals += sampleInterval;
if (sample->mDuration > maxSampleDuration) { if (sample->mDuration > maxSampleDuration) {
maxSampleDuration = sample->mDuration; maxSampleDuration = sample->mDuration;
@ -1938,15 +1933,14 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
if (aIntervals.GetEnd() >= aTrackData.mHighestStartTimestamp) { if (aIntervals.GetEnd() >= aTrackData.mHighestStartTimestamp) {
// The sample with the highest presentation time got removed. // The sample with the highest presentation time got removed.
// Rescan the trackbuffer to determine the new one. // Rescan the trackbuffer to determine the new one.
int64_t highestStartTime = 0; TimeUnit highestStartTime;
for (const auto& sample : data) { for (const auto& sample : data) {
if (sample->mTime > highestStartTime) { if (sample->mTime > highestStartTime) {
highestStartTime = sample->mTime; highestStartTime = sample->mTime;
} }
} }
MonitorAutoLock mon(mMonitor); MonitorAutoLock mon(mMonitor);
aTrackData.mHighestStartTimestamp = aTrackData.mHighestStartTimestamp = highestStartTime;
TimeUnit::FromMicroseconds(highestStartTime);
} }
return firstRemovedIndex.ref(); return firstRemovedIndex.ref();
@ -2116,7 +2110,7 @@ uint32_t TrackBuffersManager::FindSampleIndex(const TrackBuffer& aTrackBuffer,
for (uint32_t i = 0; i < aTrackBuffer.Length(); i++) { for (uint32_t i = 0; i < aTrackBuffer.Length(); i++) {
const RefPtr<MediaRawData>& sample = aTrackBuffer[i]; const RefPtr<MediaRawData>& sample = aTrackBuffer[i];
if (sample->mTime >= target.ToMicroseconds() || if (sample->mTime >= target ||
sample->GetEndTime() > target) { sample->GetEndTime() > target) {
return i; return i;
} }
@ -2165,7 +2159,7 @@ TrackBuffersManager::Seek(TrackInfo::TrackType aTrack,
uint32_t lastKeyFrameIndex = 0; uint32_t lastKeyFrameIndex = 0;
for (; i < track.Length(); i++) { for (; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i]; const RefPtr<MediaRawData>& sample = track[i];
TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime); TimeUnit sampleTime = sample->mTime;
if (sampleTime > aTime && lastKeyFrameTime.isSome()) { if (sampleTime > aTime && lastKeyFrameTime.isSome()) {
break; break;
} }
@ -2238,7 +2232,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
break; break;
} }
if (sample->mKeyframe && if (sample->mKeyframe &&
sample->mTime >= aTimeThreadshold.ToMicroseconds()) { sample->mTime >= aTimeThreadshold) {
aFound = true; aFound = true;
break; break;
} }
@ -2252,8 +2246,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
// skipped. // skipped.
if (aFound) { if (aFound) {
trackData.mNextSampleTimecode = track[i]->mTimecode; trackData.mNextSampleTimecode = track[i]->mTimecode;
trackData.mNextSampleTime = trackData.mNextSampleTime = track[i]->mTime;
TimeUnit::FromMicroseconds(track[i]->mTime);
trackData.mNextGetSampleIndex = Some(i); trackData.mNextGetSampleIndex = Some(i);
} else if (i > 0) { } else if (i > 0) {
// Go back to the previous keyframe or the original position so the next // Go back to the previous keyframe or the original position so the next
@ -2262,7 +2255,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
const RefPtr<MediaRawData>& sample = track[j]; const RefPtr<MediaRawData>& sample = track[j];
if (sample->mKeyframe) { if (sample->mKeyframe) {
trackData.mNextSampleTimecode = sample->mTimecode; trackData.mNextSampleTimecode = sample->mTimecode;
trackData.mNextSampleTime = TimeUnit::FromMicroseconds(sample->mTime); trackData.mNextSampleTime = sample->mTime;
trackData.mNextGetSampleIndex = Some(uint32_t(j)); trackData.mNextGetSampleIndex = Some(uint32_t(j));
// We are unable to skip to a keyframe past aTimeThreshold, however // We are unable to skip to a keyframe past aTimeThreshold, however
// we are speeding up decoding by dropping the unplayable frames. // we are speeding up decoding by dropping the unplayable frames.
@ -2298,7 +2291,7 @@ TrackBuffersManager::GetSample(TrackInfo::TrackType aTrack,
const RefPtr<MediaRawData>& sample = track[aIndex]; const RefPtr<MediaRawData>& sample = track[aIndex];
if (!aIndex || sample->mTimecode <= aExpectedDts + aFuzz || if (!aIndex || sample->mTimecode <= aExpectedDts + aFuzz ||
sample->mTime <= (aExpectedPts + aFuzz).ToMicroseconds()) { sample->mTime <= aExpectedPts + aFuzz) {
return sample; return sample;
} }
@ -2366,8 +2359,7 @@ TrackBuffersManager::GetSample(TrackInfo::TrackType aTrack,
if (nextSample) { if (nextSample) {
// We have a valid next sample, can use exact values. // We have a valid next sample, can use exact values.
trackData.mNextSampleTimecode = nextSample->mTimecode; trackData.mNextSampleTimecode = nextSample->mTimecode;
trackData.mNextSampleTime = trackData.mNextSampleTime = nextSample->mTime;
TimeUnit::FromMicroseconds(nextSample->mTime);
} else { } else {
// Next sample isn't available yet. Use estimates. // Next sample isn't available yet. Use estimates.
trackData.mNextSampleTimecode = nextSampleTimecode; trackData.mNextSampleTimecode = nextSampleTimecode;
@ -2462,7 +2454,7 @@ TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
for (uint32_t i = 0; i < track.Length(); i++) { for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i]; const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{ TimeInterval sampleInterval{
TimeUnit::FromMicroseconds(sample->mTime), sample->mTime,
sample->GetEndTime(), sample->GetEndTime(),
aFuzz}; aFuzz};
@ -2502,7 +2494,7 @@ TrackBuffersManager::GetNextRandomAccessPoint(TrackInfo::TrackType aTrack,
break; break;
} }
if (sample->mKeyframe) { if (sample->mKeyframe) {
return TimeUnit::FromMicroseconds(sample->mTime); return sample->mTime;
} }
nextSampleTimecode = sample->mTimecode + sample->mDuration; nextSampleTimecode = sample->mTimecode + sample->mDuration;
nextSampleTime = sample->GetEndTime(); nextSampleTime = sample->GetEndTime();

View File

@ -260,7 +260,7 @@ OggCodecState::PacketOutAsMediaRawData()
NS_ASSERTION(duration >= 0, "duration invalid"); NS_ASSERTION(duration >= 0, "duration invalid");
sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos); sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos);
sample->mTime = end_tstamp - duration; sample->mTime = media::TimeUnit::FromMicroseconds(end_tstamp - duration);
sample->mDuration = media::TimeUnit::FromMicroseconds(duration); sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
sample->mKeyframe = IsKeyframe(packet.get()); sample->mKeyframe = IsKeyframe(packet.get());
sample->mEOS = packet->e_o_s; sample->mEOS = packet->e_o_s;

View File

@ -1317,7 +1317,7 @@ OggTrackDemuxer::Seek(const TimeUnit& aTime)
// Check what time we actually seeked to. // Check what time we actually seeked to.
if (sample != nullptr) { if (sample != nullptr) {
seekTime = TimeUnit::FromMicroseconds(sample->mTime); seekTime = sample->mTime;
OGG_DEBUG("%p seeked to time %" PRId64, this, seekTime.ToMicroseconds()); OGG_DEBUG("%p seeked to time %" PRId64, this, seekTime.ToMicroseconds());
} }
mQueuedSample = sample; mQueuedSample = sample;
@ -1403,15 +1403,14 @@ OggTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold)
OGG_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds()); OGG_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && (sample = NextSample())) { while (!found && (sample = NextSample())) {
parsed++; parsed++;
if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) { if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
found = true; found = true;
mQueuedSample = sample; mQueuedSample = sample;
} }
} }
if (found) { if (found) {
OGG_DEBUG("next sample: %f (parsed: %d)", OGG_DEBUG("next sample: %f (parsed: %d)",
TimeUnit::FromMicroseconds(sample->mTime).ToSeconds(), sample->mTime.ToSeconds(), parsed);
parsed);
return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
} else { } else {
SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed); SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);

View File

@ -75,11 +75,11 @@ BlankVideoDataCreator::Create(MediaRawData* aSample)
return VideoData::CreateAndCopyData(mInfo, return VideoData::CreateAndCopyData(mInfo,
mImageContainer, mImageContainer,
aSample->mOffset, aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration, aSample->mDuration,
buffer, buffer,
aSample->mKeyframe, aSample->mKeyframe,
aSample->mTime, aSample->mTime.ToMicroseconds(),
mPicture); mPicture);
} }
@ -116,7 +116,7 @@ BlankAudioDataCreator::Create(MediaRawData* aSample)
mFrameSum++; mFrameSum++;
} }
RefPtr<AudioData> data(new AudioData(aSample->mOffset, RefPtr<AudioData> data(new AudioData(aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
uint32_t(frames.value()), uint32_t(frames.value()),
Move(samples), Move(samples),

View File

@ -17,7 +17,7 @@ public:
// Create a dummy VideoData with no image. This gives us something to // Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary. // send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aSample->mOffset, RefPtr<VideoData> v(new VideoData(aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe, aSample->mKeyframe,
aSample->mTimecode.ToMicroseconds(), aSample->mTimecode.ToMicroseconds(),

View File

@ -167,10 +167,11 @@ OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
__func__); __func__);
} }
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) { if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block. // We are starting a new block.
mFrames = 0; mFrames = 0;
mLastFrameTime = Some(aSample->mTime); mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
} }
// Maximum value is 63*2880, so there's no chance of overflow. // Maximum value is 63*2880, so there's no chance of overflow.
@ -231,7 +232,7 @@ OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
__func__); __func__);
} }
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples"); NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aSample->mTime; CheckedInt64 startTime = aSample->mTime.ToMicroseconds();
// Trim the initial frames while the decoder is settling. // Trim the initial frames while the decoder is settling.
if (mSkip > 0) { if (mSkip > 0) {

View File

@ -172,7 +172,7 @@ TheoraDecoder::ProcessDecode(MediaRawData* aSample)
VideoData::CreateAndCopyData(info, VideoData::CreateAndCopyData(info,
mImageContainer, mImageContainer,
aSample->mOffset, aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration, aSample->mDuration,
b, b,
aSample->mKeyframe, aSample->mKeyframe,

View File

@ -207,7 +207,7 @@ VPXDecoder::ProcessDecode(MediaRawData* aSample)
v = VideoData::CreateAndCopyData(mInfo, v = VideoData::CreateAndCopyData(mInfo,
mImageContainer, mImageContainer,
aSample->mOffset, aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration, aSample->mDuration,
b, b,
aSample->mKeyframe, aSample->mKeyframe,
@ -224,7 +224,7 @@ VPXDecoder::ProcessDecode(MediaRawData* aSample)
v = VideoData::CreateAndCopyData(mInfo, v = VideoData::CreateAndCopyData(mInfo,
mImageContainer, mImageContainer,
aSample->mOffset, aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration, aSample->mDuration,
b, b,
alpha_plane, alpha_plane,

View File

@ -141,15 +141,16 @@ VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
const unsigned char* aData = aSample->Data(); const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size(); size_t aLength = aSample->Size();
int64_t aOffset = aSample->mOffset; int64_t aOffset = aSample->mOffset;
int64_t aTstampUsecs = aSample->mTime; int64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
int64_t aTotalFrames = 0; int64_t aTotalFrames = 0;
MOZ_ASSERT(mPacketCount >= 3); MOZ_ASSERT(mPacketCount >= 3);
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) { if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block. // We are starting a new block.
mFrames = 0; mFrames = 0;
mLastFrameTime = Some(aSample->mTime); mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
} }
ogg_packet pkt = InitVorbisPacket( ogg_packet pkt = InitVorbisPacket(

View File

@ -79,7 +79,7 @@ WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
size_t aLength = aSample->Size(); size_t aLength = aSample->Size();
ByteReader aReader(aSample->Data(), aLength); ByteReader aReader(aSample->Data(), aLength);
int64_t aOffset = aSample->mOffset; int64_t aOffset = aSample->mOffset;
uint64_t aTstampUsecs = aSample->mTime; uint64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels; int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;

View File

@ -200,7 +200,7 @@ GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
frame->SetEncodedWidth(mConfig.mDisplay.width); frame->SetEncodedWidth(mConfig.mDisplay.width);
frame->SetEncodedHeight(mConfig.mDisplay.height); frame->SetEncodedHeight(mConfig.mDisplay.height);
frame->SetTimeStamp(aSample->mTime); frame->SetTimeStamp(aSample->mTime.ToMicroseconds());
frame->SetCompleteFrame(true); frame->SetCompleteFrame(true);
frame->SetDuration(aSample->mDuration.ToMicroseconds()); frame->SetDuration(aSample->mDuration.ToMicroseconds());
frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame); frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);

View File

@ -226,7 +226,7 @@ public:
InputInfo info( InputInfo info(
aSample->mDuration.ToMicroseconds(), config->mImage, config->mDisplay); aSample->mDuration.ToMicroseconds(), config->mImage, config->mDisplay);
mInputInfos.Insert(aSample->mTime, info); mInputInfos.Insert(aSample->mTime.ToMicroseconds(), info);
return RemoteDataDecoder::Decode(aSample); return RemoteDataDecoder::Decode(aSample);
} }
@ -537,7 +537,7 @@ RemoteDataDecoder::Decode(MediaRawData* aSample)
return DecodePromise::CreateAndReject( return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__); MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
} }
bufferInfo->Set(0, sample->Size(), sample->mTime, 0); bufferInfo->Set(0, sample->Size(), sample->mTime.ToMicroseconds(), 0);
mDrainStatus = DrainStatus::DRAINABLE; mDrainStatus = DrainStatus::DRAINABLE;
return mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample)) return mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample))

View File

@ -67,7 +67,7 @@ RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Decode(MediaRawData* aSample) AppleATDecoder::Decode(MediaRawData* aSample)
{ {
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample, LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
aSample->mDuration.ToMicroseconds(), aSample->mTime, aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(),
aSample->mKeyframe ? " keyframe" : "", aSample->mKeyframe ? " keyframe" : "",
(unsigned long long)aSample->Size()); (unsigned long long)aSample->Size());
RefPtr<AppleATDecoder> self = this; RefPtr<AppleATDecoder> self = this;
@ -270,7 +270,7 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
LOG("Error decoding audio sample: %d\n", static_cast<int>(rv)); LOG("Error decoding audio sample: %d\n", static_cast<int>(rv));
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Error decoding audio sample: %d @ %lld", RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
static_cast<int>(rv), aSample->mTime)); static_cast<int>(rv), aSample->mTime.ToMicroseconds()));
} }
if (numFrames) { if (numFrames) {
@ -323,7 +323,7 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
} }
RefPtr<AudioData> audio = new AudioData(aSample->mOffset, RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime, aSample->mTime.ToMicroseconds(),
duration.ToMicroseconds(), duration.ToMicroseconds(),
numFrames, numFrames,
data.Forget(), data.Forget(),

View File

@ -78,7 +78,7 @@ AppleVTDecoder::Decode(MediaRawData* aSample)
{ {
LOG("mp4 input sample %p pts %lld duration %lld us%s %" PRIuSIZE " bytes", LOG("mp4 input sample %p pts %lld duration %lld us%s %" PRIuSIZE " bytes",
aSample, aSample,
aSample->mTime, aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(), aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe ? " keyframe" : "", aSample->mKeyframe ? " keyframe" : "",
aSample->Size()); aSample->Size());
@ -132,7 +132,7 @@ TimingInfoFromSample(MediaRawData* aSample)
timestamp.duration = CMTimeMake( timestamp.duration = CMTimeMake(
aSample->mDuration.ToMicroseconds(), USECS_PER_S); aSample->mDuration.ToMicroseconds(), USECS_PER_S);
timestamp.presentationTimeStamp = timestamp.presentationTimeStamp =
CMTimeMake(aSample->mTime, USECS_PER_S); CMTimeMake(aSample->mTime.ToMicroseconds(), USECS_PER_S);
timestamp.decodeTimeStamp = timestamp.decodeTimeStamp =
CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S); CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S);

View File

@ -33,7 +33,7 @@ public:
explicit AppleFrameRef(const MediaRawData& aSample) explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(aSample.mTimecode) : decode_timestamp(aSample.mTimecode)
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime)) , composition_timestamp(aSample.mTime)
, duration(aSample.mDuration) , duration(aSample.mDuration)
, byte_offset(aSample.mOffset) , byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe) , is_sync_point(aSample.mKeyframe)

View File

@ -137,7 +137,7 @@ FFmpegAudioDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
} }
int64_t samplePosition = aSample->mOffset; int64_t samplePosition = aSample->mOffset;
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime); media::TimeUnit pts = aSample->mTime;
DecodedData results; DecodedData results;
while (packet.size > 0) { while (packet.size > 0) {

View File

@ -197,7 +197,8 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame,
int size; int size;
int len = mLib->av_parser_parse2( int len = mLib->av_parser_parse2(
mCodecParser, mCodecContext, &data, &size, inputData, inputSize, mCodecParser, mCodecContext, &data, &size, inputData, inputSize,
aSample->mTime, aSample->mTimecode.ToMicroseconds(), aSample->mOffset); aSample->mTime.ToMicroseconds(), aSample->mTimecode.ToMicroseconds(),
aSample->mOffset);
if (size_t(len) > inputSize) { if (size_t(len) > inputSize) {
return NS_ERROR_DOM_MEDIA_DECODE_ERR; return NS_ERROR_DOM_MEDIA_DECODE_ERR;
} }
@ -232,7 +233,7 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
packet.data = aData; packet.data = aData;
packet.size = aSize; packet.size = aSize;
packet.dts = mLastInputDts = aSample->mTimecode.ToMicroseconds(); packet.dts = mLastInputDts = aSample->mTimecode.ToMicroseconds();
packet.pts = aSample->mTime; packet.pts = aSample->mTime.ToMicroseconds();
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0; packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset; packet.pos = aSample->mOffset;

View File

@ -445,7 +445,7 @@ OmxDataDecoder::FillAndEmptyBuffers()
inbuf->mBuffer->nOffset = 0; inbuf->mBuffer->nOffset = 0;
inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ? inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ?
OMX_BUFFERFLAG_ENDOFFRAME : 0; OMX_BUFFERFLAG_ENDOFFRAME : 0;
inbuf->mBuffer->nTimeStamp = data->mTime; inbuf->mBuffer->nTimeStamp = data->mTime.ToMicroseconds();
if (data->Size()) { if (data->Size()) {
inbuf->mRawData = mMediaRawDatas[0]; inbuf->mRawData = mMediaRawDatas[0];
} else { } else {

View File

@ -123,7 +123,7 @@ already_AddRefed<MediaRawData>
OmxPromiseLayer::FindAndRemoveRawData(OMX_TICKS aTimecode) OmxPromiseLayer::FindAndRemoveRawData(OMX_TICKS aTimecode)
{ {
for (auto raw : mRawDatas) { for (auto raw : mRawDatas) {
if (raw->mTime == aTimecode) { if (raw->mTime.ToMicroseconds() == aTimecode) {
mRawDatas.RemoveElement(raw); mRawDatas.RemoveElement(raw);
return raw.forget(); return raw.forget();
} }

View File

@ -193,7 +193,7 @@ WMFAudioMFTManager::Input(MediaRawData* aSample)
{ {
return mDecoder->Input(aSample->Data(), return mDecoder->Input(aSample->Data(),
uint32_t(aSample->Size()), uint32_t(aSample->Size()),
aSample->mTime); aSample->mTime.ToMicroseconds());
} }
HRESULT HRESULT

View File

@ -668,12 +668,12 @@ WMFVideoMFTManager::Input(MediaRawData* aSample)
RefPtr<IMFSample> inputSample; RefPtr<IMFSample> inputSample;
HRESULT hr = mDecoder->CreateInputSample(aSample->Data(), HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
uint32_t(aSample->Size()), uint32_t(aSample->Size()),
aSample->mTime, aSample->mTime.ToMicroseconds(),
&inputSample); &inputSample);
NS_ENSURE_TRUE(SUCCEEDED(hr) && inputSample != nullptr, hr); NS_ENSURE_TRUE(SUCCEEDED(hr) && inputSample != nullptr, hr);
mLastDuration = aSample->mDuration.ToMicroseconds(); mLastDuration = aSample->mDuration.ToMicroseconds();
mLastTime = aSample->mTime; mLastTime = aSample->mTime.ToMicroseconds();
mSamplesCount++; mSamplesCount++;
// Forward sample data to the decoder. // Forward sample data to the decoder.
@ -1032,7 +1032,7 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
aOutData = frame; aOutData = frame;
// Set the potentially corrected pts and duration. // Set the potentially corrected pts and duration.
aOutData->mTime = pts.ToMicroseconds(); aOutData->mTime = pts;
aOutData->mDuration = duration; aOutData->mDuration = duration;
if (mNullOutputCount) { if (mNullOutputCount) {

View File

@ -531,7 +531,7 @@ WAVTrackDemuxer::GetNextChunk(const MediaByteRange& aRange)
++mNumParsedChunks; ++mNumParsedChunks;
++mChunkIndex; ++mChunkIndex;
datachunk->mTime = Duration(mChunkIndex - 1).ToMicroseconds(); datachunk->mTime = Duration(mChunkIndex - 1);
if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) { if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) {
datachunk->mDuration = Duration(1); datachunk->mDuration = Duration(1);
@ -540,10 +540,10 @@ WAVTrackDemuxer::GetNextChunk(const MediaByteRange& aRange)
mDataLength - mChunkIndex * DATA_CHUNK_SIZE; mDataLength - mChunkIndex * DATA_CHUNK_SIZE;
datachunk->mDuration = DurationFromBytes(mBytesRemaining); datachunk->mDuration = DurationFromBytes(mBytesRemaining);
} }
datachunk->mTimecode = media::TimeUnit::FromMicroseconds(datachunk->mTime); datachunk->mTimecode = datachunk->mTime;
datachunk->mKeyframe = true; datachunk->mKeyframe = true;
MOZ_ASSERT(datachunk->mTime >= 0); MOZ_ASSERT(!datachunk->mTime.IsNegative());
MOZ_ASSERT(!datachunk->mDuration.IsNegative()); MOZ_ASSERT(!datachunk->mDuration.IsNegative());
return datachunk.forget(); return datachunk.forget();

View File

@ -723,7 +723,7 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
} }
} }
sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp); sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp);
sample->mTime = tstamp; sample->mTime = media::TimeUnit::FromMicroseconds(tstamp);
sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp); sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
sample->mOffset = holder->Offset(); sample->mOffset = holder->Offset();
sample->mKeyframe = isKeyframe; sample->mKeyframe = isKeyframe;
@ -1082,7 +1082,7 @@ WebMTrackDemuxer::Seek(const media::TimeUnit& aTime)
// Check what time we actually seeked to. // Check what time we actually seeked to.
if (mSamples.GetSize() > 0) { if (mSamples.GetSize() > 0) {
const RefPtr<MediaRawData>& sample = mSamples.First(); const RefPtr<MediaRawData>& sample = mSamples.First();
seekTime = media::TimeUnit::FromMicroseconds(sample->mTime); seekTime = sample->mTime;
} }
SetNextKeyFrameTime(); SetNextKeyFrameTime();
@ -1140,7 +1140,7 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
return; return;
} }
int64_t frameTime = -1; auto frameTime = media::TimeUnit::Invalid();
mNextKeyframeTime.reset(); mNextKeyframeTime.reset();
@ -1181,8 +1181,8 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
// in the right order. // in the right order.
mSamples.PushFront(Move(skipSamplesQueue)); mSamples.PushFront(Move(skipSamplesQueue));
if (frameTime != -1) { if (frameTime.IsValid()) {
mNextKeyframeTime.emplace(media::TimeUnit::FromMicroseconds(frameTime)); mNextKeyframeTime.emplace(frameTime);
WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)", WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)",
mNextKeyframeTime.value().ToSeconds(), mNextKeyframeTime.value().ToSeconds(),
uint32_t(mSamples.GetSize()), uint32_t(mSamples.GetSize()),
@ -1220,8 +1220,7 @@ WebMTrackDemuxer::UpdateSamples(nsTArray<RefPtr<MediaRawData>>& aSamples)
} }
} }
if (mNextKeyframeTime.isNothing() if (mNextKeyframeTime.isNothing()
|| aSamples.LastElement()->mTime || aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
>= mNextKeyframeTime.value().ToMicroseconds()) {
SetNextKeyFrameTime(); SetNextKeyFrameTime();
} }
} }
@ -1247,13 +1246,13 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(
bool found = false; bool found = false;
RefPtr<MediaRawData> sample; RefPtr<MediaRawData> sample;
nsresult rv = NS_OK; nsresult rv = NS_OK;
int64_t sampleTime;
WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds()); WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) { while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
parsed++; parsed++;
sampleTime = sample->mTime; if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
if (sample->mKeyframe && sampleTime >= aTimeThreshold.ToMicroseconds()) { WEBM_DEBUG("next sample: %f (parsed: %d)",
sample->mTime.ToSeconds(), parsed);
found = true; found = true;
mSamples.Reset(); mSamples.Reset();
mSamples.PushFront(sample.forget()); mSamples.PushFront(sample.forget());
@ -1263,9 +1262,6 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(
SetNextKeyFrameTime(); SetNextKeyFrameTime();
} }
if (found) { if (found) {
WEBM_DEBUG("next sample: %f (parsed: %d)",
media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(),
parsed);
return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
} else { } else {
SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed); SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);

View File

@ -101,7 +101,7 @@ already_AddRefed<MediaRawData> SampleIterator::GetNext()
RefPtr<MediaRawData> sample = new MediaRawData(); RefPtr<MediaRawData> sample = new MediaRawData();
sample->mTimecode= TimeUnit::FromMicroseconds(s->mDecodeTime); sample->mTimecode= TimeUnit::FromMicroseconds(s->mDecodeTime);
sample->mTime = s->mCompositionRange.start; sample->mTime = TimeUnit::FromMicroseconds(s->mCompositionRange.start);
sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length()); sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length());
sample->mOffset = s->mByteRange.mStart; sample->mOffset = s->mByteRange.mStart;
sample->mKeyframe = s->mSync; sample->mKeyframe = s->mSync;