Bug 1356530 - Change the type of MediaData::mTime to TimeUnit since int64_t is ambiguous. r=kaku

MozReview-Commit-ID: 4bVeqIuWO2O

--HG--
extra : rebase_source : d504ac15a6dc59ad42f3ab80faf23f629d74315f
extra : intermediate-source : 6e52995b6c8146451d98dffc62f6907755dc856e
extra : source : 82d2649cdafb5a6389f6858c23578811933580c9
This commit is contained in:
JW Wang 2017-04-14 17:13:36 +08:00
parent 5f606637f1
commit 302d82c85a
42 changed files with 170 additions and 178 deletions

View File

@ -752,12 +752,12 @@ ADTSTrackDemuxer::GetNextFrame(const adts::Frame& aFrame)
UpdateState(aFrame);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
frame->mTime = Duration(mFrameIndex - 1);
frame->mDuration = Duration(1);
frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
frame->mTimecode = frame->mTime;
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(frame->mDuration.IsPositive());
ADTSLOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64

View File

@ -604,12 +604,12 @@ MP3TrackDemuxer::GetNextFrame(const MediaByteRange& aRange)
UpdateState(aRange);
frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
frame->mTime = Duration(mFrameIndex - 1);
frame->mDuration = Duration(1);
frame->mTimecode = media::TimeUnit::FromMicroseconds(frame->mTime);
frame->mTimecode = frame->mTime;
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(frame->mDuration.IsPositive());
if (mNumParsedFrames == 1) {

View File

@ -237,7 +237,7 @@ VideoData::UpdateTimestamp(const TimeUnit& aTimestamp)
auto updatedDuration = GetEndTime() - aTimestamp;
MOZ_ASSERT(!updatedDuration.IsNegative());
mTime = aTimestamp.ToMicroseconds();
mTime = aTimestamp;
mDuration = updatedDuration;
}

View File

@ -294,7 +294,7 @@ public:
uint32_t aFrames)
: mType(aType)
, mOffset(aOffset)
, mTime(aTimestamp)
, mTime(media::TimeUnit::FromMicroseconds(aTimestamp))
, mTimecode(media::TimeUnit::FromMicroseconds(aTimestamp))
, mDuration(media::TimeUnit::FromMicroseconds(aDuration))
, mFrames(aFrames)
@ -308,8 +308,8 @@ public:
// Approximate byte offset where this data was demuxed from its media.
int64_t mOffset;
// Start time of sample, in microseconds.
int64_t mTime;
// Start time of sample.
media::TimeUnit mTime;
// Codec specific internal time code. For Ogg based codecs this is the
// granulepos.
@ -325,13 +325,13 @@ public:
media::TimeUnit GetEndTime() const
{
return media::TimeUnit::FromMicroseconds(mTime) + mDuration;
return mTime + mDuration;
}
bool AdjustForStartTime(int64_t aStartTime)
{
mTime = mTime - aStartTime;
return mTime >= 0;
mTime = mTime - media::TimeUnit::FromMicroseconds(aStartTime);
return !mTime.IsNegative();
}
template <typename ReturnType>
@ -352,7 +352,6 @@ protected:
MediaData(Type aType, uint32_t aFrames)
: mType(aType)
, mOffset(0)
, mTime(0)
, mFrames(aFrames)
, mKeyframe(false)
{

View File

@ -1237,8 +1237,8 @@ private:
return seekTime;
}
const int64_t audioStart = audio ? audio->mTime : INT64_MAX;
const int64_t videoStart = video ? video->mTime : INT64_MAX;
const int64_t audioStart = audio ? audio->mTime.ToMicroseconds() : INT64_MAX;
const int64_t videoStart = video ? video->mTime.ToMicroseconds() : INT64_MAX;
const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds());
const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds());
return TimeUnit::FromMicroseconds(
@ -1314,7 +1314,7 @@ private:
{
if (mSeekJob.mTarget->IsFast()
&& mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek
&& aSample->mTime < mCurrentTimeBeforeSeek.ToMicroseconds()) {
&& aSample->mTime < mCurrentTimeBeforeSeek) {
// We are doing a fastSeek, but we ended up *before* the previous
// playback position. This is surprising UX, so switch to an accurate
// seek and decode to the seek target. This is not conformant to the
@ -1335,7 +1335,7 @@ private:
return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR;
}
auto audioTime = TimeUnit::FromMicroseconds(aAudio->mTime);
auto audioTime = aAudio->mTime;
if (audioTime + sampleDuration <= mSeekJob.mTarget->GetTime()) {
// Our seek target lies after the frames in this AudioData. Don't
// push it onto the audio queue, and keep decoding forwards.
@ -1405,18 +1405,18 @@ private:
{
MOZ_ASSERT(aVideo);
SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]",
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds());
aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
const auto target = mSeekJob.mTarget->GetTime();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
if (target >= aVideo->GetEndTime()) {
SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 "] target=%" PRId64,
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
target.ToMicroseconds());
mFirstVideoFrameAfterSeek = aVideo;
} else {
if (target.ToMicroseconds() >= aVideo->mTime &&
if (target >= aVideo->mTime &&
aVideo->GetEndTime() >= target) {
// The seek target lies inside this frame's time slice. Adjust the
// frame's start time to match the seek target.
@ -1426,7 +1426,7 @@ private:
SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 "] "
"containing target=%" PRId64,
aVideo->mTime, aVideo->GetEndTime().ToMicroseconds(),
aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds(),
target.ToMicroseconds());
MOZ_ASSERT(VideoQueue().GetSize() == 0,
@ -1475,7 +1475,7 @@ static void
DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare)
{
while(aQueue.GetSize() > 0) {
if (aCompare(aQueue.PeekFront()->mTime)) {
if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) {
RefPtr<Type> releaseMe = aQueue.PopFront();
continue;
}
@ -1575,7 +1575,7 @@ private:
MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished");
MOZ_ASSERT(NeedMoreVideo());
if (aVideo->mTime > mCurrentTime.ToMicroseconds()) {
if (aVideo->mTime > mCurrentTime) {
mMaster->PushVideo(aVideo);
FinishSeek();
} else {
@ -1667,7 +1667,7 @@ private:
{
RefPtr<VideoData> data = VideoQueue().PeekFront();
if (data) {
mSeekJob.mTarget->SetTime(TimeUnit::FromMicroseconds(data->mTime));
mSeekJob.mTarget->SetTime(data->mTime);
} else {
MOZ_ASSERT(VideoQueue().AtEndOfStream());
mSeekJob.mTarget->SetTime(mDuration);
@ -3177,7 +3177,8 @@ MediaDecoderStateMachine::RequestAudioData()
// audio->GetEndTime() is not always mono-increasing in chained ogg.
mDecodedAudioEndTime = std::max(
aAudio->GetEndTime(), mDecodedAudioEndTime);
LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", aAudio->mTime,
LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]",
aAudio->mTime.ToMicroseconds(),
aAudio->GetEndTime().ToMicroseconds());
mStateObj->HandleAudioDecoded(aAudio);
},
@ -3223,7 +3224,8 @@ MediaDecoderStateMachine::RequestVideoData(bool aSkipToNextKeyframe,
// Handle abnormal or negative timestamps.
mDecodedVideoEndTime = std::max(
mDecodedVideoEndTime, aVideo->GetEndTime());
LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", aVideo->mTime,
LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]",
aVideo->mTime.ToMicroseconds(),
aVideo->GetEndTime().ToMicroseconds());
mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime);
},

View File

@ -1724,7 +1724,7 @@ MediaFormatReader::NotifyNewOutput(
auto& decoder = GetDecoderData(aTrack);
for (auto& sample : aResults) {
LOGV("Received new %s sample time:%" PRId64 " duration:%" PRId64,
TrackTypeToStr(aTrack), sample->mTime,
TrackTypeToStr(aTrack), sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds());
decoder.mOutput.AppendElement(sample);
decoder.mNumSamplesOutput++;
@ -2011,19 +2011,19 @@ MediaFormatReader::HandleDemuxedSamples(
if (sample->mKeyframe) {
ScheduleUpdate(aTrack);
} else {
auto time = TimeInterval(
TimeUnit::FromMicroseconds(sample->mTime), sample->GetEndTime());
auto time = TimeInterval(sample->mTime, sample->GetEndTime());
InternalSeekTarget seekTarget =
decoder.mTimeThreshold.refOr(InternalSeekTarget(time, false));
LOG("Stream change occurred on a non-keyframe. Seeking to:%" PRId64,
sample->mTime);
sample->mTime.ToMicroseconds());
InternalSeek(aTrack, seekTarget);
}
return;
}
LOGV("Input:%" PRId64 " (dts:%" PRId64 " kf:%d)",
sample->mTime, sample->mTimecode.ToMicroseconds(), sample->mKeyframe);
sample->mTime.ToMicroseconds(), sample->mTimecode.ToMicroseconds(),
sample->mKeyframe);
decoder.mNumSamplesInput++;
decoder.mSizeOfQueue++;
if (aTrack == TrackInfo::kVideoTrack) {
@ -2186,7 +2186,7 @@ MediaFormatReader::Update(TrackType aTrack)
while (decoder.mTimeThreshold && decoder.mOutput.Length()) {
RefPtr<MediaData>& output = decoder.mOutput[0];
InternalSeekTarget target = decoder.mTimeThreshold.ref();
media::TimeUnit time = media::TimeUnit::FromMicroseconds(output->mTime);
media::TimeUnit time = output->mTime;
if (time >= target.Time()) {
// We have reached our internal seek target.
decoder.mTimeThreshold.reset();
@ -2196,7 +2196,7 @@ MediaFormatReader::Update(TrackType aTrack)
if (time < target.Time() || (target.mDropTarget && target.Contains(time))) {
LOGV("Internal Seeking: Dropping %s frame time:%f wanted:%f (kf:%d)",
TrackTypeToStr(aTrack),
media::TimeUnit::FromMicroseconds(output->mTime).ToSeconds(),
output->mTime.ToSeconds(),
target.Time().ToSeconds(),
output->mKeyframe);
decoder.mOutput.RemoveElementAt(0);
@ -2206,7 +2206,8 @@ MediaFormatReader::Update(TrackType aTrack)
while (decoder.mOutput.Length()
&& decoder.mOutput[0]->mType == MediaData::NULL_DATA) {
LOGV("Dropping null data. Time: %" PRId64, decoder.mOutput[0]->mTime);
LOGV("Dropping null data. Time: %" PRId64,
decoder.mOutput[0]->mTime.ToMicroseconds());
decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1;
}
@ -2218,8 +2219,7 @@ MediaFormatReader::Update(TrackType aTrack)
decoder.mOutput.RemoveElementAt(0);
decoder.mSizeOfQueue -= 1;
decoder.mLastSampleTime =
Some(TimeInterval(TimeUnit::FromMicroseconds(output->mTime),
output->GetEndTime()));
Some(TimeInterval(output->mTime, output->GetEndTime()));
decoder.mNumSamplesOutputTotal++;
ReturnOutput(output, aTrack);
// We have a decoded sample ready to be returned.
@ -2229,17 +2229,17 @@ MediaFormatReader::Update(TrackType aTrack)
a.mStats.mDecodedFrames = static_cast<uint32_t>(delta);
mLastReportedNumDecodedFrames = decoder.mNumSamplesOutputTotal;
if (output->mKeyframe) {
if (mPreviousDecodedKeyframeTime_us < output->mTime) {
if (mPreviousDecodedKeyframeTime_us < output->mTime.ToMicroseconds()) {
// There is a previous keyframe -> Record inter-keyframe stats.
uint64_t segment_us =
output->mTime - mPreviousDecodedKeyframeTime_us;
output->mTime.ToMicroseconds() - mPreviousDecodedKeyframeTime_us;
a.mStats.mInterKeyframeSum_us += segment_us;
a.mStats.mInterKeyframeCount += 1;
if (a.mStats.mInterKeyFrameMax_us < segment_us) {
a.mStats.mInterKeyFrameMax_us = segment_us;
}
}
mPreviousDecodedKeyframeTime_us = output->mTime;
mPreviousDecodedKeyframeTime_us = output->mTime.ToMicroseconds();
}
nsCString error;
mVideo.mIsHardwareAccelerated =
@ -2379,7 +2379,7 @@ MediaFormatReader::ReturnOutput(MediaData* aData, TrackType aTrack)
MOZ_ASSERT(GetDecoderData(aTrack).HasPromise());
MOZ_DIAGNOSTIC_ASSERT(aData->mType != MediaData::NULL_DATA);
LOG("Resolved data promise for %s [%" PRId64 ", %" PRId64 "]", TrackTypeToStr(aTrack),
aData->mTime, aData->GetEndTime().ToMicroseconds());
aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
if (aTrack == TrackInfo::kAudioTrack) {
AudioData* audioData = static_cast<AudioData*>(aData);
@ -2506,8 +2506,7 @@ MediaFormatReader::DropDecodedSamples(TrackType aTrack)
auto& decoder = GetDecoderData(aTrack);
size_t lengthDecodedQueue = decoder.mOutput.Length();
if (lengthDecodedQueue && decoder.mTimeThreshold.isSome()) {
TimeUnit time =
TimeUnit::FromMicroseconds(decoder.mOutput.LastElement()->mTime);
TimeUnit time = decoder.mOutput.LastElement()->mTime;
if (time >= decoder.mTimeThreshold.ref().Time()) {
// We would have reached our internal seek target.
decoder.mTimeThreshold.reset();
@ -3102,8 +3101,7 @@ MediaFormatReader::OnFirstDemuxCompleted(
auto& decoder = GetDecoderData(aType);
MOZ_ASSERT(decoder.mFirstDemuxedSampleTime.isNothing());
decoder.mFirstDemuxedSampleTime.emplace(
TimeUnit::FromMicroseconds(aSamples->mSamples[0]->mTime));
decoder.mFirstDemuxedSampleTime.emplace(aSamples->mSamples[0]->mTime);
MaybeResolveMetadataPromise();
}

View File

@ -47,7 +47,7 @@ public:
MOZ_ASSERT(!mEndOfStream);
MOZ_ASSERT(aItem);
NS_ADDREF(aItem);
MOZ_ASSERT(aItem->GetEndTime().ToMicroseconds() >= aItem->mTime);
MOZ_ASSERT(aItem->GetEndTime() >= aItem->mTime);
nsDeque::Push(aItem);
mPushEvent.Notify(RefPtr<T>(aItem));
}
@ -104,7 +104,7 @@ public:
}
T* last = static_cast<T*>(nsDeque::Peek());
T* first = static_cast<T*>(nsDeque::PeekFront());
return last->GetEndTime().ToMicroseconds() - first->mTime;
return (last->GetEndTime() - first->mTime).ToMicroseconds();
}
void LockedForEach(nsDequeFunctor& aFunctor) const {

View File

@ -141,7 +141,8 @@ bool AndroidMediaReader::DecodeVideoFrame(bool& aKeyframeSkip,
if (mLastVideoFrame) {
int64_t durationUs;
mPlugin->GetDuration(mPlugin, &durationUs);
durationUs = std::max<int64_t>(durationUs - mLastVideoFrame->mTime, 0);
durationUs = std::max<int64_t>(
durationUs - mLastVideoFrame->mTime.ToMicroseconds(), 0);
mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(durationUs));
mVideoQueue.Push(mLastVideoFrame);
mLastVideoFrame = nullptr;
@ -247,8 +248,8 @@ bool AndroidMediaReader::DecodeVideoFrame(bool& aKeyframeSkip,
// Calculate the duration as the timestamp of the current frame minus the
// timestamp of the previous frame. We can then return the previously
// decoded frame, and it will have a valid timestamp.
int64_t duration = v->mTime - mLastVideoFrame->mTime;
mLastVideoFrame->UpdateDuration(TimeUnit::FromMicroseconds(duration));
auto duration = v->mTime - mLastVideoFrame->mTime;
mLastVideoFrame->UpdateDuration(duration);
// We have the start time of the next frame, so we can push the previous
// frame into the queue, except if the end time is below the threshold,
@ -320,7 +321,7 @@ AndroidMediaReader::Seek(const SeekTarget& aTarget)
RefPtr<AndroidMediaReader> self = this;
DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = v->mTime;
self->mAudioSeekTimeUs = v->mTime.ToMicroseconds();
self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
}, [self, aTarget] () {
self->mSeekRequest.Complete();

View File

@ -980,13 +980,13 @@ FlacTrackDemuxer::GetNextFrame(const flac::Frame& aFrame)
return nullptr;
}
frame->mTime = aFrame.Time().ToMicroseconds();
frame->mTime = aFrame.Time();
frame->mDuration = aFrame.Duration();
frame->mTimecode = TimeUnit::FromMicroseconds(frame->mTime);
frame->mTimecode = frame->mTime;
frame->mOffset = aFrame.Offset();
frame->mKeyframe = true;
MOZ_ASSERT(frame->mTime >= 0);
MOZ_ASSERT(!frame->mTime.IsNegative());
MOZ_ASSERT(!frame->mDuration.IsNegative());
return frame.forget();

View File

@ -411,10 +411,10 @@ MP4TrackDemuxer::EnsureUpToDateIndex()
RefPtr<MP4TrackDemuxer::SeekPromise>
MP4TrackDemuxer::Seek(const media::TimeUnit& aTime)
{
int64_t seekTime = aTime.ToMicroseconds();
auto seekTime = aTime;
mQueuedSample = nullptr;
mIterator->Seek(seekTime);
mIterator->Seek(seekTime.ToMicroseconds());
// Check what time we actually seeked to.
RefPtr<MediaRawData> sample;
@ -436,8 +436,7 @@ MP4TrackDemuxer::Seek(const media::TimeUnit& aTime)
SetNextKeyFrameTime();
return SeekPromise::CreateAndResolve(
media::TimeUnit::FromMicroseconds(seekTime), __func__);
return SeekPromise::CreateAndResolve(seekTime, __func__);
}
already_AddRefed<MediaRawData>
@ -461,7 +460,8 @@ MP4TrackDemuxer::GetNextSample()
NS_WARNING(nsPrintfCString("Frame incorrectly marked as %skeyframe "
"@ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64,
keyframe ? "" : "non-", sample->mTime,
keyframe ? "" : "non-",
sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds(),
sample->mTimecode.ToMicroseconds())
.get());
@ -473,7 +473,8 @@ MP4TrackDemuxer::GetNextSample()
NS_WARNING(
nsPrintfCString("Invalid H264 frame @ pts:%" PRId64 " dur:%" PRId64
" dts:%" PRId64,
sample->mTime, sample->mDuration.ToMicroseconds(),
sample->mTime.ToMicroseconds(),
sample->mDuration.ToMicroseconds(),
sample->mTimecode.ToMicroseconds())
.get());
// We could reject the sample now, however demuxer errors are fatal.
@ -540,7 +541,7 @@ MP4TrackDemuxer::GetSamples(int32_t aNumSamples)
if (mNextKeyframeTime.isNothing()
|| samples->mSamples.LastElement()->mTime
>= mNextKeyframeTime.value().ToMicroseconds()) {
>= mNextKeyframeTime.value()) {
SetNextKeyFrameTime();
}
return SamplesPromise::CreateAndResolve(samples, __func__);
@ -590,7 +591,7 @@ MP4TrackDemuxer::SkipToNextRandomAccessPoint(
RefPtr<MediaRawData> sample;
while (!found && (sample = GetNextSample())) {
parsed++;
if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) {
if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
found = true;
mQueuedSample = sample;
}

View File

@ -190,7 +190,7 @@ ChromiumCDMParent::InitCDMInputBuffer(gmp::CDMInputBuffer& aBuffer,
aBuffer = gmp::CDMInputBuffer(shmem,
crypto.mKeyId,
crypto.mIV,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(),
crypto.mPlainSizes,
crypto.mEncryptedSizes,
@ -835,7 +835,7 @@ ChromiumCDMParent::DecryptAndDecodeFrame(MediaRawData* aSample)
}
GMP_LOG("ChromiumCDMParent::DecryptAndDecodeFrame t=%" PRId64,
aSample->mTime);
aSample->mTime.ToMicroseconds());
CDMInputBuffer buffer;

View File

@ -63,7 +63,7 @@ public:
RefPtr<MediaTrackDemuxer> track = aTrackDemuxer;
RefPtr<MP4DemuxerBinding> binding = this;
int64_t time = -1;
auto time = media::TimeUnit::Invalid();
while (mIndex < mSamples.Length()) {
uint32_t i = mIndex++;
if (mSamples[i]->mKeyframe) {
@ -74,7 +74,7 @@ public:
RefPtr<GenericPromise> p = mCheckTrackKeyFramePromise.Ensure(__func__);
if (time == -1) {
if (!time.IsValid()) {
mCheckTrackKeyFramePromise.Resolve(true, __func__);
return p;
}
@ -82,7 +82,7 @@ public:
DispatchTask(
[track, time, binding] () {
track->Seek(media::TimeUnit::FromMicroseconds(time))->Then(binding->mTaskQueue, __func__,
track->Seek(time)->Then(binding->mTaskQueue, __func__,
[track, time, binding] () {
track->GetSamples()->Then(binding->mTaskQueue, __func__,
[track, time, binding] (RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {

View File

@ -230,7 +230,7 @@ VideoDecoderChild::Decode(MediaRawData* aSample)
memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size());
MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mTimecode.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(),
aSample->mFrames,

View File

@ -137,7 +137,7 @@ VideoDecoderParent::RecvInput(const MediaRawDataIPDL& aData)
return IPC_OK();
}
data->mOffset = aData.base().offset();
data->mTime = aData.base().time();
data->mTime = media::TimeUnit::FromMicroseconds(aData.base().time());
data->mTimecode = media::TimeUnit::FromMicroseconds(aData.base().timecode());
data->mDuration = media::TimeUnit::FromMicroseconds(aData.base().duration());
data->mKeyframe = aData.base().keyframe();
@ -191,7 +191,8 @@ VideoDecoderParent::ProcessDecodedData(
}
VideoDataIPDL output(
MediaDataIPDL(data->mOffset, data->mTime, data->mTimecode.ToMicroseconds(),
MediaDataIPDL(data->mOffset, data->mTime.ToMicroseconds(),
data->mTimecode.ToMicroseconds(),
data->mDuration.ToMicroseconds(),
data->mFrames, data->mKeyframe),
video->mDisplay,

View File

@ -285,7 +285,8 @@ AudioSink::PopFrames(uint32_t aFrames)
auto framesToPop = std::min(aFrames, mCursor->Available());
SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u",
mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
mCurrentData->mTime.ToMicroseconds(),
mCurrentData->mFrames - mCursor->Available(), framesToPop);
UniquePtr<AudioStream::Chunk> chunk =
MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
@ -406,8 +407,8 @@ AudioSink::NotifyAudioNeeded()
// audio hardware, so we can play across the gap.
// Calculate the timestamp of the next chunk of audio in numbers of
// samples.
CheckedInt64 sampleTime = TimeUnitToFrames(
TimeUnit::FromMicroseconds(data->mTime) - mStartTime, data->mRate);
CheckedInt64 sampleTime =
TimeUnitToFrames(data->mTime - mStartTime, data->mRate);
// Calculate the number of frames that have been pushed onto the audio hardware.
CheckedInt64 missingFrames = sampleTime - mFramesParsed;
@ -501,7 +502,7 @@ AudioSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer,
}
RefPtr<AudioData> data =
new AudioData(aReference->mOffset,
aReference->mTime,
aReference->mTime.ToMicroseconds(),
duration.value(),
frames,
Move(aBuffer),

View File

@ -461,7 +461,7 @@ SendStreamAudio(DecodedStreamData* aStream, const media::TimeUnit& aStartTime,
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
+ TimeUnitToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
CheckedInt64 frameOffset = TimeUnitToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
@ -595,7 +595,7 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
if (mData->mNextVideoTime.ToMicroseconds() < v->mTime) {
if (mData->mNextVideoTime < v->mTime) {
// Write last video frame to catch up. mLastVideoImage can be null here
// which is fine, it just means there's no video.
@ -605,12 +605,11 @@ DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHa
// video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long.
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
FromMicroseconds(v->mTime),
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime),
tracksStartTimeStamp + v->mTime.ToTimeDuration(),
&output, aPrincipalHandle);
mData->mNextVideoTime = FromMicroseconds(v->mTime);
mData->mNextVideoTime = v->mTime;
}
if (mData->mNextVideoTime < v->GetEndTime()) {
@ -746,7 +745,7 @@ DecodedStream::NotifyOutput(int64_t aTime)
{
AssertOwnerThread();
mLastOutputTime = FromMicroseconds(aTime);
int64_t currentTime = GetPosition().ToMicroseconds();
auto currentTime = GetPosition();
// Remove audio samples that have been played by MSG from the queue.
RefPtr<AudioData> a = mAudioQueue.PeekFront();

View File

@ -365,8 +365,7 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
continue;
}
int64_t frameTime = frame->mTime;
if (frameTime < 0) {
if (frame->mTime.IsNegative()) {
// Frame times before the start time are invalid; drop such frames
continue;
}
@ -374,7 +373,7 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
TimeStamp t;
if (aMaxFrames > 1) {
MOZ_ASSERT(!aClockTimeStamp.IsNull());
int64_t delta = frame->mTime - aClockTime;
int64_t delta = frame->mTime.ToMicroseconds() - aClockTime;
t = aClockTimeStamp +
TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
@ -394,7 +393,8 @@ VideoSink::RenderVideoFrames(int32_t aMaxFrames,
img->mProducerID = mProducerID;
VSINK_LOG_V("playing video frame %" PRId64 " (id=%x) (vq-queued=%" PRIuSIZE ")",
frame->mTime, frame->mFrameID, VideoQueue().GetSize());
frame->mTime.ToMicroseconds(), frame->mFrameID,
VideoQueue().GetSize());
}
if (images.Length() > 0) {
@ -424,7 +424,7 @@ VideoSink::UpdateRenderedVideoFrames()
} else {
mFrameStats.NotifyDecodedFrames({ 0, 0, 1 });
VSINK_LOG_V("discarding video frame mTime=%" PRId64 " clock_time=%" PRId64,
frame->mTime, clockTime.ToMicroseconds());
frame->mTime.ToMicroseconds(), clockTime.ToMicroseconds());
}
}
@ -450,7 +450,7 @@ VideoSink::UpdateRenderedVideoFrames()
return;
}
int64_t nextFrameTime = frames[1]->mTime;
int64_t nextFrameTime = frames[1]->mTime.ToMicroseconds();
int64_t delta = std::max(
nextFrameTime - clockTime.ToMicroseconds(), MIN_UPDATE_INTERVAL_US);
TimeStamp target = nowTime + TimeDuration::FromMicroseconds(

View File

@ -479,7 +479,7 @@ MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples)
}
RefPtr<SamplesHolder> samples = new SamplesHolder;
samples->mSamples.AppendElement(sample);
if (mNextRandomAccessPoint.ToMicroseconds() <= sample->mTime) {
if (mNextRandomAccessPoint <= sample->mTime) {
MonitorAutoLock mon(mMonitor);
mNextRandomAccessPoint =
mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ);

View File

@ -459,8 +459,8 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
MSE_DEBUG("Step1. Evicting %" PRId64 " bytes prior currentTime",
aSizeToEvict - toEvict);
CodedFrameRemoval(
TimeInterval(TimeUnit::FromMicroseconds(0),
TimeUnit::FromMicroseconds(buffer[lastKeyFrameIndex]->mTime - 1)));
TimeInterval(TimeUnit::Zero(),
buffer[lastKeyFrameIndex]->mTime - TimeUnit::FromMicroseconds(1)));
}
if (mSizeSourceBuffer <= finalSize) {
@ -487,7 +487,7 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
uint32_t evictedFramesStartIndex = buffer.Length();
for (int32_t i = buffer.Length() - 1; i >= 0; i--) {
const auto& frame = buffer[i];
if (frame->mTime <= upperLimit.ToMicroseconds() || toEvict < 0) {
if (frame->mTime <= upperLimit || toEvict < 0) {
// We've reached a frame that shouldn't be evicted -> Evict after it -> i+1.
// Or the previous loop reached the eviction threshold -> Evict from it -> i+1.
evictedFramesStartIndex = i + 1;
@ -499,7 +499,7 @@ TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
MSE_DEBUG("Step2. Evicting %" PRId64 " bytes from trailing data",
mSizeSourceBuffer - finalSize - toEvict);
CodedFrameRemoval(
TimeInterval(TimeUnit::FromMicroseconds(buffer[evictedFramesStartIndex]->mTime),
TimeInterval(buffer[evictedFramesStartIndex]->mTime,
TimeUnit::FromInfinity()));
}
}
@ -559,8 +559,8 @@ TrackBuffersManager::CodedFrameRemoval(TimeInterval aInterval)
// then update remove end timestamp to that random access point timestamp.
if (end < track->mBufferedRanges.GetEnd()) {
for (auto& frame : track->GetTrackBuffer()) {
if (frame->mKeyframe && frame->mTime >= end.ToMicroseconds()) {
removeEndTimestamp = TimeUnit::FromMicroseconds(frame->mTime);
if (frame->mKeyframe && frame->mTime >= end) {
removeEndTimestamp = frame->mTime;
break;
}
}
@ -1420,14 +1420,12 @@ TimeInterval
TrackBuffersManager::PresentationInterval(const TrackBuffer& aSamples) const
{
TimeInterval presentationInterval =
TimeInterval(TimeUnit::FromMicroseconds(aSamples[0]->mTime),
aSamples[0]->GetEndTime());
TimeInterval(aSamples[0]->mTime, aSamples[0]->GetEndTime());
for (uint32_t i = 1; i < aSamples.Length(); i++) {
auto& sample = aSamples[i];
presentationInterval = presentationInterval.Span(
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
sample->GetEndTime()));
TimeInterval(sample->mTime, sample->GetEndTime()));
}
return presentationInterval;
}
@ -1445,8 +1443,8 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Let presentation timestamp be a double precision floating point representation of the coded frame's presentation timestamp in seconds.
TimeUnit presentationTimestamp =
mSourceBufferAttributes->mGenerateTimestamps
? TimeUnit()
: TimeUnit::FromMicroseconds(aSamples[0]->mTime);
? TimeUnit::Zero()
: aSamples[0]->mTime;
// 3. If mode equals "sequence" and group start timestamp is set, then run the following steps:
CheckSequenceDiscontinuity(presentationTimestamp);
@ -1488,7 +1486,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
SAMPLE_DEBUG("Processing %s frame(pts:%" PRId64 " end:%" PRId64 ", dts:%" PRId64 ", duration:%" PRId64 ", "
"kf:%d)",
aTrackData.mInfo->mMimeType.get(),
sample->mTime,
sample->mTime.ToMicroseconds(),
sample->GetEndTime().ToMicroseconds(),
sample->mTimecode.ToMicroseconds(),
sample->mDuration.ToMicroseconds(),
@ -1524,7 +1522,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
// Step 3 is performed earlier or when a discontinuity has been detected.
// 4. If timestampOffset is not 0, then run the following steps:
TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
TimeUnit sampleTime = sample->mTime;
TimeUnit sampleTimecode = sample->mTimecode;
TimeUnit sampleDuration = sample->mDuration;
TimeUnit timestampOffset = mSourceBufferAttributes->GetTimestampOffset();
@ -1618,7 +1616,7 @@ TrackBuffersManager::ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData)
samplesRange += sampleInterval;
sizeNewSamples += sample->ComputedSizeOfIncludingThis();
sample->mTime = sampleInterval.mStart.ToMicroseconds();
sample->mTime = sampleInterval.mStart;
sample->mTimecode = decodeTimestamp;
sample->mTrackInfo = trackBuffer.mLastInfo;
samples.AppendElement(sample);
@ -1694,7 +1692,7 @@ TrackBuffersManager::CheckNextInsertionIndex(TrackData& aTrackData,
// We will insert our new frames right before.
for (uint32_t i = 0; i < data.Length(); i++) {
const RefPtr<MediaRawData>& sample = data[i];
if (sample->mTime >= target.mStart.ToMicroseconds() ||
if (sample->mTime >= target.mStart ||
sample->GetEndTime() > target.mStart) {
aTrackData.mNextInsertionIndex = Some(i);
return true;
@ -1764,8 +1762,7 @@ TrackBuffersManager::InsertFrames(TrackBuffer& aSamples,
}
// 16. Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
if (!CheckNextInsertionIndex(aTrackData,
TimeUnit::FromMicroseconds(aSamples[0]->mTime))) {
if (!CheckNextInsertionIndex(aTrackData, aSamples[0]->mTime)) {
RejectProcessing(NS_ERROR_FAILURE, __func__);
return;
}
@ -1837,8 +1834,7 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
for (uint32_t i = aStartIndex; i < data.Length(); i++) {
const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
sample->GetEndTime());
TimeInterval(sample->mTime, sample->GetEndTime());
if (aIntervals.Contains(sampleInterval)) {
if (firstRemovedIndex.isNothing()) {
firstRemovedIndex = Some(i);
@ -1875,8 +1871,7 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
for (uint32_t i = firstRemovedIndex.ref(); i <= lastRemovedIndex; i++) {
const RefPtr<MediaRawData> sample = data[i];
TimeInterval sampleInterval =
TimeInterval(TimeUnit::FromMicroseconds(sample->mTime),
sample->GetEndTime());
TimeInterval(sample->mTime, sample->GetEndTime());
removedIntervals += sampleInterval;
if (sample->mDuration > maxSampleDuration) {
maxSampleDuration = sample->mDuration;
@ -1938,15 +1933,14 @@ TrackBuffersManager::RemoveFrames(const TimeIntervals& aIntervals,
if (aIntervals.GetEnd() >= aTrackData.mHighestStartTimestamp) {
// The sample with the highest presentation time got removed.
// Rescan the trackbuffer to determine the new one.
int64_t highestStartTime = 0;
TimeUnit highestStartTime;
for (const auto& sample : data) {
if (sample->mTime > highestStartTime) {
highestStartTime = sample->mTime;
}
}
MonitorAutoLock mon(mMonitor);
aTrackData.mHighestStartTimestamp =
TimeUnit::FromMicroseconds(highestStartTime);
aTrackData.mHighestStartTimestamp = highestStartTime;
}
return firstRemovedIndex.ref();
@ -2116,7 +2110,7 @@ uint32_t TrackBuffersManager::FindSampleIndex(const TrackBuffer& aTrackBuffer,
for (uint32_t i = 0; i < aTrackBuffer.Length(); i++) {
const RefPtr<MediaRawData>& sample = aTrackBuffer[i];
if (sample->mTime >= target.ToMicroseconds() ||
if (sample->mTime >= target ||
sample->GetEndTime() > target) {
return i;
}
@ -2165,7 +2159,7 @@ TrackBuffersManager::Seek(TrackInfo::TrackType aTrack,
uint32_t lastKeyFrameIndex = 0;
for (; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeUnit sampleTime = TimeUnit::FromMicroseconds(sample->mTime);
TimeUnit sampleTime = sample->mTime;
if (sampleTime > aTime && lastKeyFrameTime.isSome()) {
break;
}
@ -2238,7 +2232,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
break;
}
if (sample->mKeyframe &&
sample->mTime >= aTimeThreadshold.ToMicroseconds()) {
sample->mTime >= aTimeThreadshold) {
aFound = true;
break;
}
@ -2252,8 +2246,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
// skipped.
if (aFound) {
trackData.mNextSampleTimecode = track[i]->mTimecode;
trackData.mNextSampleTime =
TimeUnit::FromMicroseconds(track[i]->mTime);
trackData.mNextSampleTime = track[i]->mTime;
trackData.mNextGetSampleIndex = Some(i);
} else if (i > 0) {
// Go back to the previous keyframe or the original position so the next
@ -2262,7 +2255,7 @@ TrackBuffersManager::SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack,
const RefPtr<MediaRawData>& sample = track[j];
if (sample->mKeyframe) {
trackData.mNextSampleTimecode = sample->mTimecode;
trackData.mNextSampleTime = TimeUnit::FromMicroseconds(sample->mTime);
trackData.mNextSampleTime = sample->mTime;
trackData.mNextGetSampleIndex = Some(uint32_t(j));
// We are unable to skip to a keyframe past aTimeThreshold, however
// we are speeding up decoding by dropping the unplayable frames.
@ -2298,7 +2291,7 @@ TrackBuffersManager::GetSample(TrackInfo::TrackType aTrack,
const RefPtr<MediaRawData>& sample = track[aIndex];
if (!aIndex || sample->mTimecode <= aExpectedDts + aFuzz ||
sample->mTime <= (aExpectedPts + aFuzz).ToMicroseconds()) {
sample->mTime <= aExpectedPts + aFuzz) {
return sample;
}
@ -2366,8 +2359,7 @@ TrackBuffersManager::GetSample(TrackInfo::TrackType aTrack,
if (nextSample) {
// We have a valid next sample, can use exact values.
trackData.mNextSampleTimecode = nextSample->mTimecode;
trackData.mNextSampleTime =
TimeUnit::FromMicroseconds(nextSample->mTime);
trackData.mNextSampleTime = nextSample->mTime;
} else {
// Next sample isn't available yet. Use estimates.
trackData.mNextSampleTimecode = nextSampleTimecode;
@ -2462,7 +2454,7 @@ TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{
TimeUnit::FromMicroseconds(sample->mTime),
sample->mTime,
sample->GetEndTime(),
aFuzz};
@ -2502,7 +2494,7 @@ TrackBuffersManager::GetNextRandomAccessPoint(TrackInfo::TrackType aTrack,
break;
}
if (sample->mKeyframe) {
return TimeUnit::FromMicroseconds(sample->mTime);
return sample->mTime;
}
nextSampleTimecode = sample->mTimecode + sample->mDuration;
nextSampleTime = sample->GetEndTime();

View File

@ -260,7 +260,7 @@ OggCodecState::PacketOutAsMediaRawData()
NS_ASSERTION(duration >= 0, "duration invalid");
sample->mTimecode = media::TimeUnit::FromMicroseconds(packet->granulepos);
sample->mTime = end_tstamp - duration;
sample->mTime = media::TimeUnit::FromMicroseconds(end_tstamp - duration);
sample->mDuration = media::TimeUnit::FromMicroseconds(duration);
sample->mKeyframe = IsKeyframe(packet.get());
sample->mEOS = packet->e_o_s;

View File

@ -1317,7 +1317,7 @@ OggTrackDemuxer::Seek(const TimeUnit& aTime)
// Check what time we actually seeked to.
if (sample != nullptr) {
seekTime = TimeUnit::FromMicroseconds(sample->mTime);
seekTime = sample->mTime;
OGG_DEBUG("%p seeked to time %" PRId64, this, seekTime.ToMicroseconds());
}
mQueuedSample = sample;
@ -1403,15 +1403,14 @@ OggTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold)
OGG_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && (sample = NextSample())) {
parsed++;
if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) {
if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
found = true;
mQueuedSample = sample;
}
}
if (found) {
OGG_DEBUG("next sample: %f (parsed: %d)",
TimeUnit::FromMicroseconds(sample->mTime).ToSeconds(),
parsed);
sample->mTime.ToSeconds(), parsed);
return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
} else {
SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);

View File

@ -75,11 +75,11 @@ BlankVideoDataCreator::Create(MediaRawData* aSample)
return VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration,
buffer,
aSample->mKeyframe,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
mPicture);
}
@ -116,7 +116,7 @@ BlankAudioDataCreator::Create(MediaRawData* aSample)
mFrameSum++;
}
RefPtr<AudioData> data(new AudioData(aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(),
uint32_t(frames.value()),
Move(samples),

View File

@ -17,7 +17,7 @@ public:
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe,
aSample->mTimecode.ToMicroseconds(),

View File

@ -167,10 +167,11 @@ OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
__func__);
}
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block.
mFrames = 0;
mLastFrameTime = Some(aSample->mTime);
mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
}
// Maximum value is 63*2880, so there's no chance of overflow.
@ -231,7 +232,7 @@ OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
__func__);
}
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
CheckedInt64 startTime = aSample->mTime;
CheckedInt64 startTime = aSample->mTime.ToMicroseconds();
// Trim the initial frames while the decoder is settling.
if (mSkip > 0) {

View File

@ -172,7 +172,7 @@ TheoraDecoder::ProcessDecode(MediaRawData* aSample)
VideoData::CreateAndCopyData(info,
mImageContainer,
aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration,
b,
aSample->mKeyframe,

View File

@ -207,7 +207,7 @@ VPXDecoder::ProcessDecode(MediaRawData* aSample)
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration,
b,
aSample->mKeyframe,
@ -224,7 +224,7 @@ VPXDecoder::ProcessDecode(MediaRawData* aSample)
v = VideoData::CreateAndCopyData(mInfo,
mImageContainer,
aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration,
b,
alpha_plane,

View File

@ -141,15 +141,16 @@ VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size();
int64_t aOffset = aSample->mOffset;
int64_t aTstampUsecs = aSample->mTime;
int64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
int64_t aTotalFrames = 0;
MOZ_ASSERT(mPacketCount >= 3);
if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block.
mFrames = 0;
mLastFrameTime = Some(aSample->mTime);
mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
}
ogg_packet pkt = InitVorbisPacket(

View File

@ -79,7 +79,7 @@ WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
size_t aLength = aSample->Size();
ByteReader aReader(aSample->Data(), aLength);
int64_t aOffset = aSample->mOffset;
uint64_t aTstampUsecs = aSample->mTime;
uint64_t aTstampUsecs = aSample->mTime.ToMicroseconds();
int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;

View File

@ -200,7 +200,7 @@ GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
frame->SetEncodedWidth(mConfig.mDisplay.width);
frame->SetEncodedHeight(mConfig.mDisplay.height);
frame->SetTimeStamp(aSample->mTime);
frame->SetTimeStamp(aSample->mTime.ToMicroseconds());
frame->SetCompleteFrame(true);
frame->SetDuration(aSample->mDuration.ToMicroseconds());
frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);

View File

@ -226,7 +226,7 @@ public:
InputInfo info(
aSample->mDuration.ToMicroseconds(), config->mImage, config->mDisplay);
mInputInfos.Insert(aSample->mTime, info);
mInputInfos.Insert(aSample->mTime.ToMicroseconds(), info);
return RemoteDataDecoder::Decode(aSample);
}
@ -537,7 +537,7 @@ RemoteDataDecoder::Decode(MediaRawData* aSample)
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
bufferInfo->Set(0, sample->Size(), sample->mTime, 0);
bufferInfo->Set(0, sample->Size(), sample->mTime.ToMicroseconds(), 0);
mDrainStatus = DrainStatus::DRAINABLE;
return mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(sample))

View File

@ -67,7 +67,7 @@ RefPtr<MediaDataDecoder::DecodePromise>
AppleATDecoder::Decode(MediaRawData* aSample)
{
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio", aSample,
aSample->mDuration.ToMicroseconds(), aSample->mTime,
aSample->mDuration.ToMicroseconds(), aSample->mTime.ToMicroseconds(),
aSample->mKeyframe ? " keyframe" : "",
(unsigned long long)aSample->Size());
RefPtr<AppleATDecoder> self = this;
@ -270,7 +270,7 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
LOG("Error decoding audio sample: %d\n", static_cast<int>(rv));
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
static_cast<int>(rv), aSample->mTime));
static_cast<int>(rv), aSample->mTime.ToMicroseconds()));
}
if (numFrames) {
@ -323,7 +323,7 @@ AppleATDecoder::DecodeSample(MediaRawData* aSample)
}
RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
duration.ToMicroseconds(),
numFrames,
data.Forget(),

View File

@ -78,7 +78,7 @@ AppleVTDecoder::Decode(MediaRawData* aSample)
{
LOG("mp4 input sample %p pts %lld duration %lld us%s %" PRIuSIZE " bytes",
aSample,
aSample->mTime,
aSample->mTime.ToMicroseconds(),
aSample->mDuration.ToMicroseconds(),
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
@ -132,7 +132,7 @@ TimingInfoFromSample(MediaRawData* aSample)
timestamp.duration = CMTimeMake(
aSample->mDuration.ToMicroseconds(), USECS_PER_S);
timestamp.presentationTimeStamp =
CMTimeMake(aSample->mTime, USECS_PER_S);
CMTimeMake(aSample->mTime.ToMicroseconds(), USECS_PER_S);
timestamp.decodeTimeStamp =
CMTimeMake(aSample->mTimecode.ToMicroseconds(), USECS_PER_S);

View File

@ -33,7 +33,7 @@ public:
explicit AppleFrameRef(const MediaRawData& aSample)
: decode_timestamp(aSample.mTimecode)
, composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
, composition_timestamp(aSample.mTime)
, duration(aSample.mDuration)
, byte_offset(aSample.mOffset)
, is_sync_point(aSample.mKeyframe)

View File

@ -137,7 +137,7 @@ FFmpegAudioDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
}
int64_t samplePosition = aSample->mOffset;
media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
media::TimeUnit pts = aSample->mTime;
DecodedData results;
while (packet.size > 0) {

View File

@ -197,7 +197,8 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame,
int size;
int len = mLib->av_parser_parse2(
mCodecParser, mCodecContext, &data, &size, inputData, inputSize,
aSample->mTime, aSample->mTimecode.ToMicroseconds(), aSample->mOffset);
aSample->mTime.ToMicroseconds(), aSample->mTimecode.ToMicroseconds(),
aSample->mOffset);
if (size_t(len) > inputSize) {
return NS_ERROR_DOM_MEDIA_DECODE_ERR;
}
@ -232,7 +233,7 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
packet.data = aData;
packet.size = aSize;
packet.dts = mLastInputDts = aSample->mTimecode.ToMicroseconds();
packet.pts = aSample->mTime;
packet.pts = aSample->mTime.ToMicroseconds();
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset;

View File

@ -445,7 +445,7 @@ OmxDataDecoder::FillAndEmptyBuffers()
inbuf->mBuffer->nOffset = 0;
inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ?
OMX_BUFFERFLAG_ENDOFFRAME : 0;
inbuf->mBuffer->nTimeStamp = data->mTime;
inbuf->mBuffer->nTimeStamp = data->mTime.ToMicroseconds();
if (data->Size()) {
inbuf->mRawData = mMediaRawDatas[0];
} else {

View File

@ -123,7 +123,7 @@ already_AddRefed<MediaRawData>
OmxPromiseLayer::FindAndRemoveRawData(OMX_TICKS aTimecode)
{
for (auto raw : mRawDatas) {
if (raw->mTime == aTimecode) {
if (raw->mTime.ToMicroseconds() == aTimecode) {
mRawDatas.RemoveElement(raw);
return raw.forget();
}

View File

@ -193,7 +193,7 @@ WMFAudioMFTManager::Input(MediaRawData* aSample)
{
return mDecoder->Input(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime);
aSample->mTime.ToMicroseconds());
}
HRESULT

View File

@ -668,12 +668,12 @@ WMFVideoMFTManager::Input(MediaRawData* aSample)
RefPtr<IMFSample> inputSample;
HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime,
aSample->mTime.ToMicroseconds(),
&inputSample);
NS_ENSURE_TRUE(SUCCEEDED(hr) && inputSample != nullptr, hr);
mLastDuration = aSample->mDuration.ToMicroseconds();
mLastTime = aSample->mTime;
mLastTime = aSample->mTime.ToMicroseconds();
mSamplesCount++;
// Forward sample data to the decoder.
@ -1032,7 +1032,7 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
aOutData = frame;
// Set the potentially corrected pts and duration.
aOutData->mTime = pts.ToMicroseconds();
aOutData->mTime = pts;
aOutData->mDuration = duration;
if (mNullOutputCount) {

View File

@ -531,7 +531,7 @@ WAVTrackDemuxer::GetNextChunk(const MediaByteRange& aRange)
++mNumParsedChunks;
++mChunkIndex;
datachunk->mTime = Duration(mChunkIndex - 1).ToMicroseconds();
datachunk->mTime = Duration(mChunkIndex - 1);
if (static_cast<uint32_t>(mChunkIndex) * DATA_CHUNK_SIZE < mDataLength) {
datachunk->mDuration = Duration(1);
@ -540,10 +540,10 @@ WAVTrackDemuxer::GetNextChunk(const MediaByteRange& aRange)
mDataLength - mChunkIndex * DATA_CHUNK_SIZE;
datachunk->mDuration = DurationFromBytes(mBytesRemaining);
}
datachunk->mTimecode = media::TimeUnit::FromMicroseconds(datachunk->mTime);
datachunk->mTimecode = datachunk->mTime;
datachunk->mKeyframe = true;
MOZ_ASSERT(datachunk->mTime >= 0);
MOZ_ASSERT(!datachunk->mTime.IsNegative());
MOZ_ASSERT(!datachunk->mDuration.IsNegative());
return datachunk.forget();

View File

@ -723,7 +723,7 @@ WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
}
}
sample->mTimecode = media::TimeUnit::FromMicroseconds(tstamp);
sample->mTime = tstamp;
sample->mTime = media::TimeUnit::FromMicroseconds(tstamp);
sample->mDuration = media::TimeUnit::FromMicroseconds(next_tstamp - tstamp);
sample->mOffset = holder->Offset();
sample->mKeyframe = isKeyframe;
@ -1082,7 +1082,7 @@ WebMTrackDemuxer::Seek(const media::TimeUnit& aTime)
// Check what time we actually seeked to.
if (mSamples.GetSize() > 0) {
const RefPtr<MediaRawData>& sample = mSamples.First();
seekTime = media::TimeUnit::FromMicroseconds(sample->mTime);
seekTime = sample->mTime;
}
SetNextKeyFrameTime();
@ -1140,7 +1140,7 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
return;
}
int64_t frameTime = -1;
auto frameTime = media::TimeUnit::Invalid();
mNextKeyframeTime.reset();
@ -1181,8 +1181,8 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
// in the right order.
mSamples.PushFront(Move(skipSamplesQueue));
if (frameTime != -1) {
mNextKeyframeTime.emplace(media::TimeUnit::FromMicroseconds(frameTime));
if (frameTime.IsValid()) {
mNextKeyframeTime.emplace(frameTime);
WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)",
mNextKeyframeTime.value().ToSeconds(),
uint32_t(mSamples.GetSize()),
@ -1220,8 +1220,7 @@ WebMTrackDemuxer::UpdateSamples(nsTArray<RefPtr<MediaRawData>>& aSamples)
}
}
if (mNextKeyframeTime.isNothing()
|| aSamples.LastElement()->mTime
>= mNextKeyframeTime.value().ToMicroseconds()) {
|| aSamples.LastElement()->mTime >= mNextKeyframeTime.value()) {
SetNextKeyFrameTime();
}
}
@ -1247,13 +1246,13 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(
bool found = false;
RefPtr<MediaRawData> sample;
nsresult rv = NS_OK;
int64_t sampleTime;
WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && NS_SUCCEEDED((rv = NextSample(sample)))) {
parsed++;
sampleTime = sample->mTime;
if (sample->mKeyframe && sampleTime >= aTimeThreshold.ToMicroseconds()) {
if (sample->mKeyframe && sample->mTime >= aTimeThreshold) {
WEBM_DEBUG("next sample: %f (parsed: %d)",
sample->mTime.ToSeconds(), parsed);
found = true;
mSamples.Reset();
mSamples.PushFront(sample.forget());
@ -1263,9 +1262,6 @@ WebMTrackDemuxer::SkipToNextRandomAccessPoint(
SetNextKeyFrameTime();
}
if (found) {
WEBM_DEBUG("next sample: %f (parsed: %d)",
media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(),
parsed);
return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
} else {
SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed);

View File

@ -101,7 +101,7 @@ already_AddRefed<MediaRawData> SampleIterator::GetNext()
RefPtr<MediaRawData> sample = new MediaRawData();
sample->mTimecode= TimeUnit::FromMicroseconds(s->mDecodeTime);
sample->mTime = s->mCompositionRange.start;
sample->mTime = TimeUnit::FromMicroseconds(s->mCompositionRange.start);
sample->mDuration = TimeUnit::FromMicroseconds(s->mCompositionRange.Length());
sample->mOffset = s->mByteRange.mStart;
sample->mKeyframe = s->mSync;