Bug 601535 - content/media should use CheckedInt.h. r=doublec

This commit is contained in:
Ching Wei Tseng 2012-02-22 13:28:06 +01:00
parent 55a6bf6af8
commit b369406826
7 changed files with 114 additions and 292 deletions

View File

@ -41,163 +41,16 @@
#include "mozilla/StdInt.h"
// Adds two 32bit unsigned numbers, retuns true if addition succeeded,
// or false the if addition would result in an overflow.
bool AddOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult) {
PRUint64 rl = static_cast<PRUint64>(a) + static_cast<PRUint64>(b);
if (rl > PR_UINT32_MAX) {
return false;
}
aResult = static_cast<PRUint32>(rl);
return true;
}
bool MulOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult)
{
// 32 bit integer multiplication with overflow checking. Returns true
// if the multiplication was successful, or false if the operation resulted
// in an integer overflow.
PRUint64 a64 = a;
PRUint64 b64 = b;
PRUint64 r64 = a64 * b64;
if (r64 > PR_UINT32_MAX)
return false;
aResult = static_cast<PRUint32>(r64);
return true;
}
// Adds two 64bit numbers, retuns true if addition succeeded, or false
// if addition would result in an overflow.
bool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult) {
if (b < 1) {
if (INT64_MIN - b <= a) {
aResult = a + b;
return true;
}
} else if (INT64_MAX - b >= a) {
aResult = a + b;
return true;
}
return false;
}
// 64 bit integer multiplication with overflow checking. Returns true
// if the multiplication was successful, or false if the operation resulted
// in an integer overflow.
bool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult) {
// We break a multiplication a * b into of sign_a * sign_b * abs(a) * abs(b)
//
// This is equivalent to:
//
// (sign_a * sign_b) * ((a_hi * 2^32) + a_lo) * ((b_hi * 2^32) + b_lo)
//
// Which is equivalent to:
//
// (sign_a * sign_b) *
// ((a_hi * b_hi << 64) +
// (a_hi * b_lo << 32) + (a_lo * b_hi << 32) +
// a_lo * b_lo)
//
// So to check if a*b overflows, we must check each sub part of the above
// sum.
//
// Note: -1 * INT64_MIN == INT64_MIN ; we can't negate INT64_MIN!
// Note: Shift of negative numbers is undefined.
//
// Figure out the sign after multiplication. Then we can just work with
// unsigned numbers.
PRInt64 sign = (!(a < 0) == !(b < 0)) ? 1 : -1;
PRInt64 abs_a = (a < 0) ? -a : a;
PRInt64 abs_b = (b < 0) ? -b : b;
if (abs_a < 0) {
NS_ASSERTION(a == INT64_MIN, "How else can this happen?");
if (b == 0 || b == 1) {
aResult = a * b;
return true;
} else {
return false;
}
}
if (abs_b < 0) {
NS_ASSERTION(b == INT64_MIN, "How else can this happen?");
if (a == 0 || a == 1) {
aResult = a * b;
return true;
} else {
return false;
}
}
NS_ASSERTION(abs_a >= 0 && abs_b >= 0, "abs values must be non-negative");
PRInt64 a_hi = abs_a >> 32;
PRInt64 a_lo = abs_a & 0xFFFFFFFF;
PRInt64 b_hi = abs_b >> 32;
PRInt64 b_lo = abs_b & 0xFFFFFFFF;
NS_ASSERTION((a_hi<<32) + a_lo == abs_a, "Partition must be correct");
NS_ASSERTION((b_hi<<32) + b_lo == abs_b, "Partition must be correct");
// In the sub-equation (a_hi * b_hi << 64), if a_hi or b_hi
// are non-zero, this will overflow as it's shifted by 64.
// Abort if this overflows.
if (a_hi != 0 && b_hi != 0) {
return false;
}
// We can now assume that either a_hi or b_hi is 0.
NS_ASSERTION(a_hi == 0 || b_hi == 0, "One of these must be 0");
// Next we calculate:
// (a_hi * b_lo << 32) + (a_lo * b_hi << 32)
// We can factor this as:
// (a_hi * b_lo + a_lo * b_hi) << 32
PRInt64 q = a_hi * b_lo + a_lo * b_hi;
if (q > PR_INT32_MAX) {
// q will overflow when we shift by 32; abort.
return false;
}
q <<= 32;
// Both a_lo and b_lo are less than INT32_MAX, so can't overflow.
PRUint64 lo = a_lo * b_lo;
if (lo > INT64_MAX) {
return false;
}
// Add the final result. We must check for overflow during addition.
if (!AddOverflow(q, static_cast<PRInt64>(lo), aResult)) {
return false;
}
aResult *= sign;
NS_ASSERTION(a * b == aResult, "We didn't overflow, but result is wrong!");
return true;
}
// Converts from number of audio frames to microseconds, given the specified
// audio rate.
bool FramesToUsecs(PRInt64 aFrames, PRUint32 aRate, PRInt64& aOutUsecs)
{
PRInt64 x;
if (!MulOverflow(aFrames, USECS_PER_S, x))
return false;
aOutUsecs = x / aRate;
return true;
CheckedInt64 FramesToUsecs(PRInt64 aFrames, PRUint32 aRate) {
return (CheckedInt64(aFrames) * USECS_PER_S) / aRate;
}
// Converts from microseconds to number of audio frames, given the specified
// audio rate.
bool UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutFrames)
{
PRInt64 x;
if (!MulOverflow(aUsecs, aRate, x))
return false;
aOutFrames = x / USECS_PER_S;
return true;
CheckedInt64 UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate) {
return (CheckedInt64(aUsecs) * aRate) / USECS_PER_S;
}
static PRInt32 ConditionDimension(float aValue)

View File

@ -44,6 +44,13 @@
#include "nsRect.h"
#include "nsIThreadManager.h"
#include "CheckedInt.h"
using mozilla::CheckedInt64;
using mozilla::CheckedUint64;
using mozilla::CheckedInt32;
using mozilla::CheckedUint32;
// This file contains stuff we'd rather put elsewhere, but which is
// dependent on other changes which we don't want to wait for. We plan to
// remove this file in the near future.
@ -97,35 +104,17 @@ private:
} // namespace mozilla
// Adds two 32bit unsigned numbers, retuns true if addition succeeded,
// or false the if addition would result in an overflow.
bool AddOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult);
// 32 bit integer multiplication with overflow checking. Returns true
// if the multiplication was successful, or false if the operation resulted
// in an integer overflow.
bool MulOverflow32(PRUint32 a, PRUint32 b, PRUint32& aResult);
// Adds two 64bit numbers, retuns true if addition succeeded, or false
// if addition would result in an overflow.
bool AddOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// 64 bit integer multiplication with overflow checking. Returns true
// if the multiplication was successful, or false if the operation resulted
// in an integer overflow.
bool MulOverflow(PRInt64 a, PRInt64 b, PRInt64& aResult);
// Converts from number of audio frames (aFrames) to microseconds, given
// the specified audio rate (aRate). Stores result in aOutUsecs. Returns true
// if the operation succeeded, or false if there was an integer overflow
// while calulating the conversion.
bool FramesToUsecs(PRInt64 aFrames, PRUint32 aRate, PRInt64& aOutUsecs);
CheckedInt64 FramesToUsecs(PRInt64 aFrames, PRUint32 aRate);
// Converts from microseconds (aUsecs) to number of audio frames, given the
// specified audio rate (aRate). Stores the result in aOutFrames. Returns
// true if the operation succeeded, or false if there was an integer
// overflow while calulating the conversion.
bool UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate, PRInt64& aOutFrames);
CheckedInt64 UsecsToFrames(PRInt64 aUsecs, PRUint32 aRate);
// Number of microseconds per second. 1e6.
static const PRInt64 USECS_PER_S = 1000000;

View File

@ -142,12 +142,10 @@ VideoData* VideoData::Create(nsVideoInfo& aInfo,
// Ensure the picture size specified in the headers can be extracted out of
// the frame we've been supplied without indexing out of bounds.
PRUint32 xLimit;
PRUint32 yLimit;
if (!AddOverflow32(aPicture.x, aPicture.width, xLimit) ||
xLimit > aBuffer.mPlanes[0].mStride ||
!AddOverflow32(aPicture.y, aPicture.height, yLimit) ||
yLimit > aBuffer.mPlanes[0].mHeight)
CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
if (!xLimit.valid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
!yLimit.valid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
{
// The specified picture dimensions can't be contained inside the video
// frame, we'll stomp memory if we try to copy it. Fail.
@ -312,10 +310,6 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
if (HasAudio()) {
// Decode audio forward to the seek target.
PRInt64 targetFrame = 0;
if (!UsecsToFrames(aTarget, mInfo.mAudioRate, targetFrame)) {
return NS_ERROR_FAILURE;
}
bool eof = false;
while (HasAudio() && !eof) {
while (!eof && mAudioQueue.GetSize() == 0) {
@ -330,18 +324,19 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
const AudioData* audio = mAudioQueue.PeekFront();
if (!audio)
break;
PRInt64 startFrame = 0;
if (!UsecsToFrames(audio->mTime, mInfo.mAudioRate, startFrame)) {
CheckedInt64 startFrame = UsecsToFrames(audio->mTime, mInfo.mAudioRate);
CheckedInt64 targetFrame = UsecsToFrames(aTarget, mInfo.mAudioRate);
if (!startFrame.valid() || !targetFrame.valid()) {
return NS_ERROR_FAILURE;
}
if (startFrame + audio->mFrames <= targetFrame) {
if (startFrame.value() + audio->mFrames <= targetFrame.value()) {
// Our seek target lies after the frames in this AudioData. Pop it
// off the queue, and keep decoding forwards.
delete mAudioQueue.PopFront();
audio = nsnull;
continue;
}
if (startFrame > targetFrame) {
if (startFrame.value() > targetFrame.value()) {
// The seek target doesn't lie in the audio block just after the last
// audio frames we've seen which were before the seek target. This
// could have been the first audio data we've seen after seek, i.e. the
@ -356,10 +351,12 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
// The seek target lies somewhere in this AudioData's frames, strip off
// any frames which lie before the seek target, so we'll begin playback
// exactly at the seek target.
NS_ASSERTION(targetFrame >= startFrame, "Target must at or be after data start.");
NS_ASSERTION(targetFrame < startFrame + audio->mFrames, "Data must end after target.");
NS_ASSERTION(targetFrame.value() >= startFrame.value(),
"Target must at or be after data start.");
NS_ASSERTION(targetFrame.value() < startFrame.value() + audio->mFrames,
"Data must end after target.");
PRInt64 framesToPrune = targetFrame - startFrame;
PRInt64 framesToPrune = targetFrame.value() - startFrame.value();
if (framesToPrune > audio->mFrames) {
// We've messed up somehow. Don't try to trim frames, the |frames|
// variable below will overflow.
@ -372,13 +369,13 @@ nsresult nsBuiltinDecoderReader::DecodeToTarget(PRInt64 aTarget)
memcpy(audioData.get(),
audio->mAudioData.get() + (framesToPrune * channels),
frames * channels * sizeof(AudioDataValue));
PRInt64 duration;
if (!FramesToUsecs(frames, mInfo.mAudioRate, duration)) {
CheckedInt64 duration = FramesToUsecs(frames, mInfo.mAudioRate);
if (!duration.valid()) {
return NS_ERROR_FAILURE;
}
nsAutoPtr<AudioData> data(new AudioData(audio->mOffset,
aTarget,
duration,
duration.value(),
frames,
audioData.forget(),
channels));

View File

@ -806,53 +806,39 @@ void nsBuiltinDecoderStateMachine::AudioLoop()
// Calculate the number of frames that have been pushed onto the audio
// hardware.
PRInt64 playedFrames = 0;
if (!UsecsToFrames(audioStartTime, rate, playedFrames)) {
NS_WARNING("Int overflow converting playedFrames");
break;
}
if (!AddOverflow(playedFrames, audioDuration, playedFrames)) {
NS_WARNING("Int overflow adding playedFrames");
break;
}
CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) +
audioDuration;
// Calculate the timestamp of the next chunk of audio in numbers of
// samples.
PRInt64 sampleTime = 0;
if (!UsecsToFrames(s->mTime, rate, sampleTime)) {
NS_WARNING("Int overflow converting sampleTime");
break;
}
PRInt64 missingFrames = 0;
if (!AddOverflow(sampleTime, -playedFrames, missingFrames)) {
NS_WARNING("Int overflow adding missingFrames");
CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate);
CheckedInt64 missingFrames = sampleTime - playedFrames;
if (!missingFrames.valid() || !sampleTime.valid()) {
NS_WARNING("Int overflow adding in AudioLoop()");
break;
}
PRInt64 framesWritten = 0;
if (missingFrames > 0) {
if (missingFrames.value() > 0) {
// The next audio chunk begins some time after the end of the last chunk
// we pushed to the audio hardware. We must push silence into the audio
// hardware so that the next audio chunk begins playback at the correct
// time.
missingFrames = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX), missingFrames);
framesWritten = PlaySilence(static_cast<PRUint32>(missingFrames),
channels, playedFrames);
missingFrames = NS_MIN(static_cast<PRInt64>(PR_UINT32_MAX),
missingFrames.value());
framesWritten = PlaySilence(static_cast<PRUint32>(missingFrames.value()),
channels, playedFrames.value());
} else {
framesWritten = PlayFromAudioQueue(sampleTime, channels);
framesWritten = PlayFromAudioQueue(sampleTime.value(), channels);
}
audioDuration += framesWritten;
{
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
PRInt64 playedUsecs;
if (!FramesToUsecs(audioDuration, rate, playedUsecs)) {
NS_WARNING("Int overflow calculating playedUsecs");
break;
}
if (!AddOverflow(audioStartTime, playedUsecs, mAudioEndTime)) {
CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime;
if (!playedUsecs.valid()) {
NS_WARNING("Int overflow calculating audio end time");
break;
}
mAudioEndTime = playedUsecs.value();
}
}
if (mReader->mAudioQueue.AtEndOfStream() &&

View File

@ -318,33 +318,27 @@ PRInt64 nsTheoraState::Time(th_info* aInfo, PRInt64 aGranulepos)
if (aGranulepos < 0 || aInfo->fps_numerator == 0) {
return -1;
}
PRInt64 t = 0;
// Implementation of th_granule_frame inlined here to operate
// on the th_info structure instead of the theora_state.
int shift = aInfo->keyframe_granule_shift;
ogg_int64_t iframe = aGranulepos >> shift;
ogg_int64_t pframe = aGranulepos - (iframe << shift);
PRInt64 frameno = iframe + pframe - TH_VERSION_CHECK(aInfo, 3, 2, 1);
if (!AddOverflow(frameno, 1, t))
CheckedInt64 t = ((CheckedInt64(frameno) + 1) * USECS_PER_S) * aInfo->fps_denominator;
if (!t.valid())
return -1;
if (!MulOverflow(t, USECS_PER_S, t))
return -1;
if (!MulOverflow(t, aInfo->fps_denominator, t))
return -1;
return t / aInfo->fps_numerator;
t /= aInfo->fps_numerator;
return t.valid() ? t.value() : -1;
}
PRInt64 nsTheoraState::StartTime(PRInt64 granulepos) {
if (granulepos < 0 || !mActive || mInfo.fps_numerator == 0) {
return -1;
}
PRInt64 t = 0;
PRInt64 frameno = th_granule_frame(mCtx, granulepos);
if (!MulOverflow(frameno, USECS_PER_S, t))
CheckedInt64 t = (CheckedInt64(th_granule_frame(mCtx, granulepos)) * USECS_PER_S) * mInfo.fps_denominator;
if (!t.valid())
return -1;
if (!MulOverflow(t, mInfo.fps_denominator, t))
return -1;
return t / mInfo.fps_numerator;
return t.value() / mInfo.fps_numerator;
}
PRInt64
@ -361,9 +355,10 @@ nsTheoraState::MaxKeyframeOffset()
PRInt64 keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;
// Length of frame in usecs.
PRInt64 d = 0; // d will be 0 if multiplication overflows.
MulOverflow(USECS_PER_S, mInfo.fps_denominator, d);
frameDuration = d / mInfo.fps_numerator;
CheckedInt64 d = CheckedInt64(mInfo.fps_denominator) * USECS_PER_S;
if (!d.valid())
d = 0;
frameDuration = d.value() / mInfo.fps_numerator;
// Total time in usecs keyframe can be offset from any given frame.
return frameDuration * keyframeDiff;
@ -601,9 +596,10 @@ PRInt64 nsVorbisState::Time(vorbis_info* aInfo, PRInt64 aGranulepos)
if (aGranulepos == -1 || aInfo->rate == 0) {
return -1;
}
PRInt64 t = 0;
MulOverflow(USECS_PER_S, aGranulepos, t);
return t / aInfo->rate;
CheckedInt64 t = CheckedInt64(aGranulepos) * USECS_PER_S;
if (!t.valid())
t = 0;
return t.value() / aInfo->rate;
}
bool
@ -872,7 +868,6 @@ bool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
PRUint32 serialno = LEUint32(aPacket->packet + INDEX_SERIALNO_OFFSET);
PRInt64 numKeyPoints = LEInt64(aPacket->packet + INDEX_NUM_KEYPOINTS_OFFSET);
PRInt64 n = 0;
PRInt64 endTime = 0, startTime = 0;
const unsigned char* p = aPacket->packet;
@ -884,34 +879,32 @@ bool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
}
// Extract the start time.
n = LEInt64(p + INDEX_FIRST_NUMER_OFFSET);
PRInt64 t;
if (!MulOverflow(n, USECS_PER_S, t)) {
CheckedInt64 t = CheckedInt64(LEInt64(p + INDEX_FIRST_NUMER_OFFSET)) * USECS_PER_S;
if (!t.valid()) {
return (mActive = false);
} else {
startTime = t / timeDenom;
startTime = t.value() / timeDenom;
}
// Extract the end time.
n = LEInt64(p + INDEX_LAST_NUMER_OFFSET);
if (!MulOverflow(n, USECS_PER_S, t)) {
t = LEInt64(p + INDEX_LAST_NUMER_OFFSET) * USECS_PER_S;
if (!t.valid()) {
return (mActive = false);
} else {
endTime = t / timeDenom;
endTime = t.value() / timeDenom;
}
// Check the numKeyPoints value read, ensure we're not going to run out of
// memory while trying to decode the index packet.
PRInt64 minPacketSize;
if (!MulOverflow(numKeyPoints, MIN_KEY_POINT_SIZE, minPacketSize) ||
!AddOverflow(INDEX_KEYPOINT_OFFSET, minPacketSize, minPacketSize))
CheckedInt64 minPacketSize = (CheckedInt64(numKeyPoints) * MIN_KEY_POINT_SIZE) + INDEX_KEYPOINT_OFFSET;
if (!minPacketSize.valid())
{
return (mActive = false);
}
PRInt64 sizeofIndex = aPacket->bytes - INDEX_KEYPOINT_OFFSET;
PRInt64 maxNumKeyPoints = sizeofIndex / MIN_KEY_POINT_SIZE;
if (aPacket->bytes < minPacketSize ||
if (aPacket->bytes < minPacketSize.value() ||
numKeyPoints > maxNumKeyPoints ||
numKeyPoints < 0)
{
@ -933,32 +926,34 @@ bool nsSkeletonState::DecodeIndex(ogg_packet* aPacket)
p = aPacket->packet + INDEX_KEYPOINT_OFFSET;
const unsigned char* limit = aPacket->packet + aPacket->bytes;
PRInt64 numKeyPointsRead = 0;
PRInt64 offset = 0;
PRInt64 time = 0;
CheckedInt64 offset = 0;
CheckedInt64 time = 0;
while (p < limit &&
numKeyPointsRead < numKeyPoints)
{
PRInt64 delta = 0;
p = ReadVariableLengthInt(p, limit, delta);
offset += delta;
if (p == limit ||
!AddOverflow(offset, delta, offset) ||
offset > mLength ||
offset < 0)
!offset.valid() ||
offset.value() > mLength ||
offset.value() < 0)
{
return (mActive = false);
}
p = ReadVariableLengthInt(p, limit, delta);
if (!AddOverflow(time, delta, time) ||
time > endTime ||
time < startTime)
time += delta;
if (!time.valid() ||
time.value() > endTime ||
time.value() < startTime)
{
return (mActive = false);
}
PRInt64 timeUsecs = 0;
if (!MulOverflow(time, USECS_PER_S, timeUsecs))
CheckedInt64 timeUsecs = time * USECS_PER_S;
if (!timeUsecs.valid())
return mActive = false;
timeUsecs /= timeDenom;
keyPoints->Add(offset, timeUsecs);
keyPoints->Add(offset.value(), timeUsecs.value());
numKeyPointsRead++;
}
@ -1064,7 +1059,9 @@ nsresult nsSkeletonState::GetDuration(const nsTArray<PRUint32>& aTracks,
}
}
NS_ASSERTION(endTime > startTime, "Duration must be positive");
return AddOverflow(endTime, -startTime, aDuration) ? NS_OK : NS_ERROR_FAILURE;
CheckedInt64 duration = CheckedInt64(endTime) - startTime;
aDuration = duration.valid() ? duration.value() : 0;
return duration.valid() ? NS_OK : NS_ERROR_FAILURE;
}
bool nsSkeletonState::DecodeHeader(ogg_packet* aPacket)

View File

@ -85,10 +85,9 @@ nsresult nsRawReader::ReadMetadata(nsVideoInfo* aInfo)
mMetadata.minorVersion == 1))
return NS_ERROR_FAILURE;
PRUint32 dummy;
if (!MulOverflow32(mMetadata.frameWidth, mMetadata.frameHeight, dummy))
return NS_ERROR_FAILURE;
CheckedUint32 dummy = CheckedUint32(static_cast<PRUint32>(mMetadata.frameWidth)) *
static_cast<PRUint32>(mMetadata.frameHeight);
NS_ENSURE_TRUE(dummy.valid(), NS_ERROR_FAILURE);
if (mMetadata.aspectDenominator == 0 ||
mMetadata.framerateDenominator == 0)
@ -267,13 +266,11 @@ nsresult nsRawReader::Seek(PRInt64 aTime, PRInt64 aStartTime, PRInt64 aEndTime,
return NS_ERROR_FAILURE;
mCurrentFrame = aTime * mFrameRate / USECS_PER_S;
PRUint32 offset;
if (!MulOverflow32(mCurrentFrame, mFrameSize, offset))
return NS_ERROR_FAILURE;
CheckedUint32 offset = CheckedUint32(mCurrentFrame) * mFrameSize;
offset += sizeof(nsRawVideoHeader);
NS_ENSURE_TRUE(offset.valid(), NS_ERROR_FAILURE);
nsresult rv = resource->Seek(nsISeekableStream::NS_SEEK_SET, offset);
nsresult rv = resource->Seek(nsISeekableStream::NS_SEEK_SET, offset.value());
NS_ENSURE_SUCCESS(rv, rv);
mVideoQueue.Erase();

View File

@ -441,26 +441,23 @@ bool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
// the previous audio chunk, we need to increment the packet count so that
// the vorbis decode doesn't use data from before the gap to help decode
// from after the gap.
PRInt64 tstamp_frames = 0;
if (!UsecsToFrames(tstamp_usecs, rate, tstamp_frames)) {
NS_WARNING("Int overflow converting WebM timestamp to frames");
CheckedInt64 tstamp_frames = UsecsToFrames(tstamp_usecs, rate);
CheckedInt64 decoded_frames = UsecsToFrames(mAudioStartUsec, rate);
if (!tstamp_frames.valid() || !decoded_frames.valid()) {
NS_WARNING("Int overflow converting WebM times to frames");
return false;
}
PRInt64 decoded_frames = 0;
if (!UsecsToFrames(mAudioStartUsec, rate, decoded_frames)) {
NS_WARNING("Int overflow converting WebM start time to frames");
return false;
}
if (!AddOverflow(decoded_frames, mAudioFrames, decoded_frames)) {
decoded_frames += mAudioFrames;
if (!decoded_frames.valid()) {
NS_WARNING("Int overflow adding decoded_frames");
return false;
}
if (tstamp_frames > decoded_frames) {
if (tstamp_frames.value() > decoded_frames.value()) {
#ifdef DEBUG
PRInt64 usecs = 0;
CheckedInt64 usecs = FramesToUsecs(tstamp_frames.value() - decoded_frames.value(), rate);
LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld frames, in audio stream\n",
FramesToUsecs(tstamp_frames - decoded_frames, rate, usecs) ? usecs: -1,
tstamp_frames - decoded_frames));
usecs.valid() ? usecs.value(): -1,
tstamp_frames.value() - decoded_frames.value()));
#endif
mPacketCount++;
mAudioStartUsec = tstamp_usecs;
@ -498,22 +495,28 @@ bool nsWebMReader::DecodeAudioPacket(nestegg_packet* aPacket, PRInt64 aOffset)
}
}
PRInt64 duration = 0;
if (!FramesToUsecs(frames, rate, duration)) {
CheckedInt64 duration = FramesToUsecs(frames, rate);
if (!duration.valid()) {
NS_WARNING("Int overflow converting WebM audio duration");
return false;
}
PRInt64 total_duration = 0;
if (!FramesToUsecs(total_frames, rate, total_duration)) {
CheckedInt64 total_duration = FramesToUsecs(total_frames, rate);
if (!total_duration.valid()) {
NS_WARNING("Int overflow converting WebM audio total_duration");
return false;
}
PRInt64 time = tstamp_usecs + total_duration;
CheckedInt64 time = total_duration + tstamp_usecs;
if (!time.valid()) {
NS_WARNING("Int overflow adding total_duration and tstamp_usecs");
nestegg_free_packet(aPacket);
return PR_FALSE;
};
total_frames += frames;
mAudioQueue.Push(new AudioData(aOffset,
time,
duration,
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels));