Bug 1243608: P3. Make SeekTarget::mTime a TimeUnit object. r=cpearce

Also makes it a private member and provide a GetTime() accessor instead.
This commit is contained in:
Jean-Yves Avenard 2016-01-28 21:24:30 +11:00
parent 83fff9be79
commit b7b6a77158
11 changed files with 72 additions and 52 deletions

View File

@ -648,7 +648,7 @@ MediaDecoderStateMachine::OnAudioDecoded(MediaData* aAudioSample)
// We must be after the discontinuity; we're receiving samples
// at or after the seek target.
if (mCurrentSeek.mTarget.mType == SeekTarget::PrevSyncPoint &&
mCurrentSeek.mTarget.mTime > mCurrentTimeBeforeSeek &&
mCurrentSeek.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek &&
audio->mTime < mCurrentTimeBeforeSeek) {
// We are doing a fastSeek, but we ended up *before* the previous
// playback position. This is surprising UX, so switch to an accurate
@ -968,7 +968,7 @@ MediaDecoderStateMachine::OnVideoDecoded(MediaData* aVideoSample)
// We must be after the discontinuity; we're receiving samples
// at or after the seek target.
if (mCurrentSeek.mTarget.mType == SeekTarget::PrevSyncPoint &&
mCurrentSeek.mTarget.mTime > mCurrentTimeBeforeSeek &&
mCurrentSeek.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek &&
video->mTime < mCurrentTimeBeforeSeek) {
// We are doing a fastSeek, but we ended up *before* the previous
// playback position. This is surprising UX, so switch to an accurate
@ -1524,7 +1524,7 @@ MediaDecoderStateMachine::Seek(SeekTarget aTarget)
mPendingSeek.RejectIfExists(__func__);
mPendingSeek.mTarget = aTarget;
DECODER_LOG("Changed state to SEEKING (to %lld)", mPendingSeek.mTarget.mTime);
DECODER_LOG("Changed state to SEEKING (to %lld)", mPendingSeek.mTarget.GetTime().ToMicroseconds());
SetState(DECODER_STATE_SEEKING);
ScheduleStateMachine();
@ -1618,12 +1618,12 @@ MediaDecoderStateMachine::InitiateSeek()
// Bound the seek time to be inside the media range.
int64_t end = Duration().ToMicroseconds();
NS_ASSERTION(end != -1, "Should know end time by now");
int64_t seekTime = mCurrentSeek.mTarget.mTime;
int64_t seekTime = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
seekTime = std::min(seekTime, end);
seekTime = std::max(int64_t(0), seekTime);
NS_ASSERTION(seekTime >= 0 && seekTime <= end,
"Can only seek in range [0,duration]");
mCurrentSeek.mTarget.mTime = seekTime;
mCurrentSeek.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));
mDropAudioUntilNextDiscontinuity = HasAudio();
mDropVideoUntilNextDiscontinuity = HasVideo();
@ -1633,7 +1633,7 @@ MediaDecoderStateMachine::InitiateSeek()
// dispatching SeekingStarted, playback doesn't advance and mess with
// mCurrentPosition that we've setting to seekTime here.
StopPlayback();
UpdatePlaybackPositionInternal(mCurrentSeek.mTarget.mTime);
UpdatePlaybackPositionInternal(mCurrentSeek.mTarget.GetTime().ToMicroseconds());
mOnSeekingStart.Notify(mCurrentSeek.mTarget.mEventVisibility);
@ -1643,7 +1643,7 @@ MediaDecoderStateMachine::InitiateSeek()
// Do the seek.
RefPtr<MediaDecoderStateMachine> self = this;
SeekTarget seekTarget = mCurrentSeek.mTarget;
seekTarget.mTime += StartTime();
seekTarget.SetTime(seekTarget.GetTime() + media::TimeUnit::FromMicroseconds(StartTime()));
mSeekRequest.Begin(InvokeAsync(DecodeTaskQueue(), mReader.get(), __func__,
&MediaDecoderReader::Seek, seekTarget,
Duration().ToMicroseconds())
@ -2082,7 +2082,7 @@ MediaDecoderStateMachine::SeekCompleted()
MOZ_ASSERT(OnTaskQueue());
MOZ_ASSERT(mState == DECODER_STATE_SEEKING);
int64_t seekTime = mCurrentSeek.mTarget.mTime;
int64_t seekTime = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
int64_t newCurrentTime = seekTime;
// Setup timestamp state.
@ -2514,7 +2514,7 @@ MediaDecoderStateMachine::DropVideoUpToSeekTarget(MediaData* aSample)
DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
video->mTime, video->GetEndTime());
MOZ_ASSERT(mCurrentSeek.Exists());
const int64_t target = mCurrentSeek.mTarget.mTime;
const int64_t target = mCurrentSeek.mTarget.GetTime().ToMicroseconds();
// If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it.
@ -2556,13 +2556,13 @@ MediaDecoderStateMachine::DropAudioUpToSeekTarget(MediaData* aSample)
return NS_ERROR_FAILURE;
}
if (audio->mTime + sampleDuration.value() <= mCurrentSeek.mTarget.mTime) {
if (audio->mTime + sampleDuration.value() <= mCurrentSeek.mTarget.GetTime().ToMicroseconds()) {
// Our seek target lies after the frames in this AudioData. Don't
// push it onto the audio queue, and keep decoding forwards.
return NS_OK;
}
if (audio->mTime > mCurrentSeek.mTarget.mTime) {
if (audio->mTime > mCurrentSeek.mTarget.GetTime().ToMicroseconds()) {
// The seek target doesn't lie in the audio block just after the last
// audio frames we've seen which were before the seek target. This
// could have been the first audio data we've seen after seek, i.e. the
@ -2578,13 +2578,13 @@ MediaDecoderStateMachine::DropAudioUpToSeekTarget(MediaData* aSample)
// The seek target lies somewhere in this AudioData's frames, strip off
// any frames which lie before the seek target, so we'll begin playback
// exactly at the seek target.
NS_ASSERTION(mCurrentSeek.mTarget.mTime >= audio->mTime,
NS_ASSERTION(mCurrentSeek.mTarget.GetTime().ToMicroseconds() >= audio->mTime,
"Target must at or be after data start.");
NS_ASSERTION(mCurrentSeek.mTarget.mTime < audio->mTime + sampleDuration.value(),
NS_ASSERTION(mCurrentSeek.mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(),
"Data must end after target.");
CheckedInt64 framesToPrune =
UsecsToFrames(mCurrentSeek.mTarget.mTime - audio->mTime, mInfo.mAudio.mRate);
UsecsToFrames(mCurrentSeek.mTarget.GetTime().ToMicroseconds() - audio->mTime, mInfo.mAudio.mRate);
if (!framesToPrune.isValid()) {
return NS_ERROR_FAILURE;
}
@ -2605,7 +2605,7 @@ MediaDecoderStateMachine::DropAudioUpToSeekTarget(MediaData* aSample)
return NS_ERROR_FAILURE;
}
RefPtr<AudioData> data(new AudioData(audio->mOffset,
mCurrentSeek.mTarget.mTime,
mCurrentSeek.mTarget.GetTime().ToMicroseconds(),
duration.value(),
frames,
Move(audioData),

View File

@ -1407,7 +1407,7 @@ MediaFormatReader::Seek(SeekTarget aTarget, int64_t aUnused)
{
MOZ_ASSERT(OnTaskQueue());
LOG("aTarget=(%lld)", aTarget.mTime);
LOG("aTarget=(%lld)", aTarget.GetTime().ToMicroseconds());
MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty());
MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise());
@ -1425,7 +1425,7 @@ MediaFormatReader::Seek(SeekTarget aTarget, int64_t aUnused)
return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
}
mOriginalSeekTime = Some(media::TimeUnit::FromMicroseconds(aTarget.mTime));
mOriginalSeekTime = Some(aTarget.GetTime());
mPendingSeekTime = mOriginalSeekTime;
RefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__);

View File

@ -25,40 +25,60 @@ struct SeekTarget {
Accurate
};
SeekTarget()
: mTime(-1.0)
, mType(SeekTarget::Invalid)
: mType(SeekTarget::Invalid)
, mEventVisibility(MediaDecoderEventVisibility::Observable)
, mTime(media::TimeUnit::Invalid())
{
}
SeekTarget(int64_t aTimeUsecs,
Type aType,
MediaDecoderEventVisibility aEventVisibility =
MediaDecoderEventVisibility::Observable)
: mTime(aTimeUsecs)
, mType(aType)
: mType(aType)
, mEventVisibility(aEventVisibility)
, mTime(media::TimeUnit::FromMicroseconds(aTimeUsecs))
{
}
SeekTarget(const media::TimeUnit& aTime,
Type aType,
MediaDecoderEventVisibility aEventVisibility =
MediaDecoderEventVisibility::Observable)
: mType(aType)
, mEventVisibility(aEventVisibility)
, mTime(aTime)
{
}
SeekTarget(const SeekTarget& aOther)
: mTime(aOther.mTime)
, mType(aOther.mType)
: mType(aOther.mType)
, mEventVisibility(aOther.mEventVisibility)
, mTime(aOther.mTime)
{
}
bool IsValid() const {
return mType != SeekTarget::Invalid;
}
void Reset() {
mTime = -1;
mTime = media::TimeUnit::Invalid();
mType = SeekTarget::Invalid;
}
// Seek target time in microseconds.
int64_t mTime;
media::TimeUnit GetTime() const {
NS_ASSERTION(mTime.IsValid(), "Invalid SeekTarget");
return mTime;
}
void SetTime(const media::TimeUnit& aTime) {
NS_ASSERTION(aTime.IsValid(), "Invalid SeekTarget destination");
mTime = aTime;
}
// Whether we should seek "Fast", or "Accurate".
// "Fast" seeks to the seek point preceeding mTime, whereas
// "Accurate" seeks as close as possible to mTime.
Type mType;
MediaDecoderEventVisibility mEventVisibility;
private:
// Seek target time.
media::TimeUnit mTime;
};
} // namespace mozilla

View File

@ -328,7 +328,7 @@ AndroidMediaReader::Seek(SeekTarget aTarget, int64_t aEndTime)
// stream to the preceeding keyframe first, get the stream time, and then
// seek the audio stream to match the video stream's time. Otherwise, the
// audio and video streams won't be in sync after the seek.
mVideoSeekTimeUs = aTarget.mTime;
mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
RefPtr<AndroidMediaReader> self = this;
mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
@ -337,12 +337,12 @@ AndroidMediaReader::Seek(SeekTarget aTarget, int64_t aEndTime)
self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__);
}, [self, aTarget] () {
self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = aTarget.mTime;
self->mSeekPromise.Resolve(aTarget.mTime, __func__);
self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
self->mSeekPromise.Resolve(aTarget.GetTime().ToMicroseconds(), __func__);
}));
} else {
mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.mTime;
mSeekPromise.Resolve(aTarget.mTime, __func__);
mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
mSeekPromise.Resolve(aTarget.GetTime().ToMicroseconds(), __func__);
}
return p;

View File

@ -331,11 +331,11 @@ DirectShowReader::DecodeVideoFrame(bool &aKeyframeSkip,
RefPtr<MediaDecoderReader::SeekPromise>
DirectShowReader::Seek(SeekTarget aTarget, int64_t aEndTime)
{
nsresult res = SeekInternal(aTarget.mTime);
nsresult res = SeekInternal(aTarget.GetTime().ToMicroseconds());
if (NS_FAILED(res)) {
return SeekPromise::CreateAndReject(res, __func__);
} else {
return SeekPromise::CreateAndResolve(aTarget.mTime, __func__);
return SeekPromise::CreateAndResolve(aTarget.GetTime().ToMicroseconds(), __func__);
}
}

View File

@ -1410,11 +1410,11 @@ nsresult OggReader::SeekInUnbuffered(int64_t aTarget,
RefPtr<MediaDecoderReader::SeekPromise>
OggReader::Seek(SeekTarget aTarget, int64_t aEndTime)
{
nsresult res = SeekInternal(aTarget.mTime, aEndTime);
nsresult res = SeekInternal(aTarget.GetTime().ToMicroseconds(), aEndTime);
if (NS_FAILED(res)) {
return SeekPromise::CreateAndReject(res, __func__);
} else {
return SeekPromise::CreateAndResolve(aTarget.mTime, __func__);
return SeekPromise::CreateAndResolve(aTarget.GetTime().ToMicroseconds(), __func__);
}
}

View File

@ -346,11 +346,11 @@ status_t AudioOffloadPlayer::DoSeek()
CHECK(mAudioSink.get());
AUDIO_OFFLOAD_LOG(LogLevel::Debug,
"DoSeek ( %lld )", mSeekTarget.mTime);
"DoSeek ( %lld )", mSeekTarget.GetTime().ToMicroseconds());
mReachedEOS = false;
mPositionTimeMediaUs = -1;
mStartPosUs = mSeekTarget.mTime;
mStartPosUs = mSeekTarget.GetTime().ToMicroseconds();
if (!mSeekPromise.IsEmpty()) {
nsCOMPtr<nsIRunnable> nsEvent =
@ -388,7 +388,7 @@ int64_t AudioOffloadPlayer::GetMediaTimeUs()
int64_t playPosition = 0;
if (mSeekTarget.IsValid()) {
return mSeekTarget.mTime;
return mSeekTarget.GetTime().ToMicroseconds();
}
if (!mStarted) {
return mPositionTimeMediaUs;
@ -506,7 +506,7 @@ size_t AudioOffloadPlayer::FillBuffer(void* aData, size_t aSize)
android::Mutex::Autolock autoLock(mLock);
if (mSeekTarget.IsValid()) {
seekTimeUs = mSeekTarget.mTime;
seekTimeUs = mSeekTarget.GetTime().ToMicroseconds();
options.setSeekTo(seekTimeUs);
refreshSeekTime = true;
@ -559,7 +559,7 @@ size_t AudioOffloadPlayer::FillBuffer(void* aData, size_t aSize)
}
if (mSeekTarget.IsValid() &&
seekTimeUs == mSeekTarget.mTime) {
seekTimeUs == mSeekTarget.GetTime().ToMicroseconds()) {
MOZ_ASSERT(mSeekTarget.IsValid());
mSeekTarget.Reset();
if (!mSeekPromise.IsEmpty()) {

View File

@ -547,12 +547,12 @@ MediaOmxReader::Seek(SeekTarget aTarget, int64_t aEndTime)
self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__);
}, [self, aTarget] () {
self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = aTarget.mTime;
self->mSeekPromise.Resolve(aTarget.mTime, __func__);
self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
self->mSeekPromise.Resolve(aTarget.GetTime().ToMicroseconds(), __func__);
}));
} else {
mAudioSeekTimeUs = mVideoSeekTimeUs = GetTime().ToMicroseconds();
mSeekPromise.Resolve(aTarget.mTime, __func__);
mSeekPromise.Resolve(aTarget.GetTime().ToMicroseconds(), __func__);
}
return p;

View File

@ -40,7 +40,7 @@ RtspOmxReader::Seek(SeekTarget aTarget, int64_t aEndTime)
// Rtsp stream server through network and also clear the buffer data in
// RtspMediaResource.
if (mRtspResource) {
mRtspResource->SeekTime(aTarget.mTime);
mRtspResource->SeekTime(aTarget.GetTime().ToMicroseconds());
mRtspResource->EnablePlayoutDelay();
}

View File

@ -211,9 +211,9 @@ RawReader::Seek(SeekTarget aTarget, int64_t aEndTime)
MOZ_ASSERT(OnTaskQueue());
uint32_t frame = mCurrentFrame;
if (aTarget.mTime >= UINT_MAX)
if (aTarget.GetTime().ToMicroseconds() >= UINT_MAX)
return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
mCurrentFrame = aTarget.mTime * mFrameRate / USECS_PER_S;
mCurrentFrame = aTarget.GetTime().ToMicroseconds() * mFrameRate / USECS_PER_S;
CheckedUint32 offset = CheckedUint32(mCurrentFrame) * mFrameSize;
offset += sizeof(RawVideoHeader);
@ -233,12 +233,12 @@ RawReader::Seek(SeekTarget aTarget, int64_t aEndTime)
}, [self, aTarget] () {
MOZ_ASSERT(self->OnTaskQueue());
return self->mVideoQueue.Peek() &&
self->mVideoQueue.Peek()->GetEndTime() >= aTarget.mTime;
self->mVideoQueue.Peek()->GetEndTime() >= aTarget.GetTime().ToMicroseconds();
})->Then(OwnerThread(), __func__, [self, p, aTarget] () {
while (self->mVideoQueue.GetSize() >= 2) {
RefPtr<VideoData> releaseMe = self->mVideoQueue.PopFront();
}
p->Resolve(aTarget.mTime, __func__);
p->Resolve(aTarget.GetTime().ToMicroseconds(), __func__);
}, [self, p, frame] {
self->mCurrentFrame = frame;
self->mVideoQueue.Reset();

View File

@ -275,15 +275,15 @@ RefPtr<MediaDecoderReader::SeekPromise>
WaveReader::Seek(SeekTarget aTarget, int64_t aEndTime)
{
MOZ_ASSERT(OnTaskQueue());
LOG(LogLevel::Debug, ("%p About to seek to %lld", mDecoder, aTarget.mTime));
LOG(LogLevel::Debug, ("%p About to seek to %lld", mDecoder, aTarget.GetTime().ToMicroseconds()));
if (NS_FAILED(ResetDecode())) {
return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
}
double d = BytesToTime(GetDataLength());
NS_ASSERTION(d < INT64_MAX / USECS_PER_S, "Duration overflow");
int64_t duration = static_cast<int64_t>(d * USECS_PER_S);
double seekTime = std::min(aTarget.mTime, duration) / static_cast<double>(USECS_PER_S);
media::TimeUnit duration = media::TimeUnit::FromSeconds(d);
double seekTime = std::min(aTarget.GetTime(), duration).ToSeconds();
int64_t position = RoundDownToFrame(static_cast<int64_t>(TimeToBytes(seekTime)));
NS_ASSERTION(INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
position += mWavePCMOffset;
@ -291,7 +291,7 @@ WaveReader::Seek(SeekTarget aTarget, int64_t aEndTime)
if (NS_FAILED(res)) {
return SeekPromise::CreateAndReject(res, __func__);
} else {
return SeekPromise::CreateAndResolve(aTarget.mTime, __func__);
return SeekPromise::CreateAndResolve(aTarget.GetTime().ToMicroseconds(), __func__);
}
}