mirror of
https://github.com/libretro/scummvm.git
synced 2025-01-31 07:53:36 +00:00
COMMON: Cleanup QuickTime variable and struct naming
This commit is contained in:
parent
2e06681698
commit
547fd1bdca
@ -68,16 +68,16 @@ bool QuickTimeAudioDecoder::loadAudioStream(Common::SeekableReadStream *stream,
|
||||
void QuickTimeAudioDecoder::init() {
|
||||
Common::QuickTimeParser::init();
|
||||
|
||||
_audioStreamIndex = -1;
|
||||
_audioTrackIndex = -1;
|
||||
|
||||
// Find an audio stream
|
||||
for (uint32 i = 0; i < _numStreams; i++)
|
||||
if (_streams[i]->codec_type == CODEC_TYPE_AUDIO && _audioStreamIndex < 0)
|
||||
_audioStreamIndex = i;
|
||||
for (uint32 i = 0; i < _tracks.size(); i++)
|
||||
if (_tracks[i]->codecType == CODEC_TYPE_AUDIO && _audioTrackIndex < 0)
|
||||
_audioTrackIndex = i;
|
||||
|
||||
// Initialize audio, if present
|
||||
if (_audioStreamIndex >= 0) {
|
||||
AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
|
||||
if (_audioTrackIndex >= 0) {
|
||||
AudioSampleDesc *entry = (AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
|
||||
|
||||
if (entry->isAudioCodecSupported()) {
|
||||
_audStream = makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
|
||||
@ -85,16 +85,16 @@ void QuickTimeAudioDecoder::init() {
|
||||
|
||||
// Make sure the bits per sample transfers to the sample size
|
||||
if (entry->getCodecTag() == MKTAG('r', 'a', 'w', ' ') || entry->getCodecTag() == MKTAG('t', 'w', 'o', 's'))
|
||||
_streams[_audioStreamIndex]->sample_size = (entry->_bitsPerSample / 8) * entry->_channels;
|
||||
_tracks[_audioTrackIndex]->sampleSize = (entry->_bitsPerSample / 8) * entry->_channels;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVStreamContext *st, uint32 format) {
|
||||
if (st->codec_type == CODEC_TYPE_AUDIO) {
|
||||
Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(Track *track, uint32 format) {
|
||||
if (track->codecType == CODEC_TYPE_AUDIO) {
|
||||
debug(0, "Audio Codec FourCC: \'%s\'", tag2str(format));
|
||||
|
||||
AudioSampleDesc *entry = new AudioSampleDesc(st, format);
|
||||
AudioSampleDesc *entry = new AudioSampleDesc(track, format);
|
||||
|
||||
uint16 stsdVersion = _fd->readUint16BE();
|
||||
_fd->readUint16BE(); // revision level
|
||||
@ -133,8 +133,8 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
|
||||
entry->_bytesPerFrame = 34 * entry->_channels;
|
||||
}
|
||||
|
||||
if (entry->_sampleRate == 0 && st->time_scale > 1)
|
||||
entry->_sampleRate = st->time_scale;
|
||||
if (entry->_sampleRate == 0 && track->timeScale > 1)
|
||||
entry->_sampleRate = track->timeScale;
|
||||
|
||||
return entry;
|
||||
}
|
||||
@ -143,15 +143,15 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
|
||||
}
|
||||
|
||||
bool QuickTimeAudioDecoder::isOldDemuxing() const {
|
||||
assert(_audioStreamIndex >= 0);
|
||||
return _streams[_audioStreamIndex]->stts_count == 1 && _streams[_audioStreamIndex]->stts_data[0].duration == 1;
|
||||
assert(_audioTrackIndex >= 0);
|
||||
return _tracks[_audioTrackIndex]->timeToSampleCount == 1 && _tracks[_audioTrackIndex]->timeToSample[0].duration == 1;
|
||||
}
|
||||
|
||||
void QuickTimeAudioDecoder::queueNextAudioChunk() {
|
||||
AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
|
||||
AudioSampleDesc *entry = (AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
|
||||
Common::MemoryWriteStreamDynamic *wStream = new Common::MemoryWriteStreamDynamic();
|
||||
|
||||
_fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]);
|
||||
_fd->seek(_tracks[_audioTrackIndex]->chunkOffsets[_curAudioChunk]);
|
||||
|
||||
// First, we have to get the sample count
|
||||
uint32 sampleCount = entry->getAudioChunkSampleCount(_curAudioChunk);
|
||||
@ -172,7 +172,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
|
||||
size = (samples / entry->_samplesPerFrame) * entry->_bytesPerFrame;
|
||||
} else {
|
||||
samples = MIN<uint32>(1024, sampleCount);
|
||||
size = samples * _streams[_audioStreamIndex]->sample_size;
|
||||
size = samples * _tracks[_audioTrackIndex]->sampleSize;
|
||||
}
|
||||
|
||||
// Now, we read in the data for this data and output it
|
||||
@ -191,7 +191,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
|
||||
startSample += entry->getAudioChunkSampleCount(i);
|
||||
|
||||
for (uint32 i = 0; i < sampleCount; i++) {
|
||||
uint32 size = (_streams[_audioStreamIndex]->sample_size != 0) ? _streams[_audioStreamIndex]->sample_size : _streams[_audioStreamIndex]->sample_sizes[i + startSample];
|
||||
uint32 size = (_tracks[_audioTrackIndex]->sampleSize != 0) ? _tracks[_audioTrackIndex]->sampleSize : _tracks[_audioTrackIndex]->sampleSizes[i + startSample];
|
||||
|
||||
// Now, we read in the data for this data and output it
|
||||
byte *data = (byte *)malloc(size);
|
||||
@ -214,31 +214,31 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
|
||||
|
||||
// Re-create the audio stream
|
||||
delete _audStream;
|
||||
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
|
||||
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
|
||||
_audStream = Audio::makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
|
||||
|
||||
// First, we need to track down what audio sample we need
|
||||
Audio::Timestamp curAudioTime = where.convertToFramerate(_streams[_audioStreamIndex]->time_scale);
|
||||
Audio::Timestamp curAudioTime = where.convertToFramerate(_tracks[_audioTrackIndex]->timeScale);
|
||||
uint32 sample = curAudioTime.totalNumberOfFrames();
|
||||
uint32 seekSample = sample;
|
||||
|
||||
if (!isOldDemuxing()) {
|
||||
// We shouldn't have audio samples that are a different duration
|
||||
// That would be quite bad!
|
||||
if (_streams[_audioStreamIndex]->stts_count != 1) {
|
||||
if (_tracks[_audioTrackIndex]->timeToSampleCount != 1) {
|
||||
warning("Failed seeking");
|
||||
return;
|
||||
}
|
||||
|
||||
// Note that duration is in terms of *one* channel
|
||||
// This eases calculation a bit
|
||||
seekSample /= _streams[_audioStreamIndex]->stts_data[0].duration;
|
||||
seekSample /= _tracks[_audioTrackIndex]->timeToSample[0].duration;
|
||||
}
|
||||
|
||||
// Now to track down what chunk it's in
|
||||
uint32 totalSamples = 0;
|
||||
_curAudioChunk = 0;
|
||||
for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) {
|
||||
for (uint32 i = 0; i < _tracks[_audioTrackIndex]->chunkCount; i++, _curAudioChunk++) {
|
||||
uint32 chunkSampleCount = entry->getAudioChunkSampleCount(i);
|
||||
|
||||
if (seekSample < totalSamples + chunkSampleCount)
|
||||
@ -260,7 +260,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
|
||||
}
|
||||
}
|
||||
|
||||
QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
|
||||
QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
|
||||
_channels = 0;
|
||||
_sampleRate = 0;
|
||||
_samplesPerFrame = 0;
|
||||
@ -280,7 +280,7 @@ bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
|
||||
|
||||
if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
|
||||
Common::String audioType;
|
||||
switch (_parentStream->objectTypeMP4) {
|
||||
switch (_parentTrack->objectTypeMP4) {
|
||||
case 0x40: // AAC
|
||||
#ifdef USE_FAAD
|
||||
return true;
|
||||
@ -302,9 +302,9 @@ bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
|
||||
uint32 QuickTimeAudioDecoder::AudioSampleDesc::getAudioChunkSampleCount(uint chunk) const {
|
||||
uint32 sampleCount = 0;
|
||||
|
||||
for (uint32 j = 0; j < _parentStream->sample_to_chunk_sz; j++)
|
||||
if (chunk >= _parentStream->sample_to_chunk[j].first)
|
||||
sampleCount = _parentStream->sample_to_chunk[j].count;
|
||||
for (uint32 j = 0; j < _parentTrack->sampleToChunkCount; j++)
|
||||
if (chunk >= _parentTrack->sampleToChunk[j].first)
|
||||
sampleCount = _parentTrack->sampleToChunk[j].count;
|
||||
|
||||
return sampleCount;
|
||||
}
|
||||
@ -333,13 +333,13 @@ AudioStream *QuickTimeAudioDecoder::AudioSampleDesc::createAudioStream(Common::S
|
||||
} else if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
|
||||
// The 7th Guest iOS uses an MPEG-4 codec
|
||||
#ifdef USE_FAAD
|
||||
if (_parentStream->objectTypeMP4 == 0x40)
|
||||
return makeAACStream(stream, DisposeAfterUse::YES, _parentStream->extradata);
|
||||
if (_parentTrack->objectTypeMP4 == 0x40)
|
||||
return makeAACStream(stream, DisposeAfterUse::YES, _parentTrack->extraData);
|
||||
#endif
|
||||
#ifdef AUDIO_QDM2_H
|
||||
} else if (_codecTag == MKTAG('Q', 'D', 'M', '2')) {
|
||||
// Myst ME uses this codec for many videos
|
||||
return makeQDM2Stream(stream, _parentStream->extradata);
|
||||
return makeQDM2Stream(stream, _parentTrack->extraData);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -357,11 +357,11 @@ public:
|
||||
~QuickTimeAudioStream() {}
|
||||
|
||||
bool openFromFile(const Common::String &filename) {
|
||||
return QuickTimeAudioDecoder::loadAudioFile(filename) && _audioStreamIndex >= 0 && _audStream;
|
||||
return QuickTimeAudioDecoder::loadAudioFile(filename) && _audioTrackIndex >= 0 && _audStream;
|
||||
}
|
||||
|
||||
bool openFromStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
|
||||
return QuickTimeAudioDecoder::loadAudioStream(stream, disposeFileHandle) && _audioStreamIndex >= 0 && _audStream;
|
||||
return QuickTimeAudioDecoder::loadAudioStream(stream, disposeFileHandle) && _audioTrackIndex >= 0 && _audStream;
|
||||
}
|
||||
|
||||
// AudioStream API
|
||||
@ -380,7 +380,7 @@ public:
|
||||
|
||||
bool isStereo() const { return _audStream->isStereo(); }
|
||||
int getRate() const { return _audStream->getRate(); }
|
||||
bool endOfData() const { return _curAudioChunk >= _streams[_audioStreamIndex]->chunk_count && _audStream->endOfData(); }
|
||||
bool endOfData() const { return _curAudioChunk >= _tracks[_audioTrackIndex]->chunkCount && _audStream->endOfData(); }
|
||||
|
||||
// SeekableAudioStream API
|
||||
bool seek(const Timestamp &where) {
|
||||
@ -392,7 +392,7 @@ public:
|
||||
}
|
||||
|
||||
Timestamp getLength() const {
|
||||
return Timestamp(0, _streams[_audioStreamIndex]->duration, _streams[_audioStreamIndex]->time_scale);
|
||||
return Timestamp(0, _tracks[_audioTrackIndex]->duration, _tracks[_audioTrackIndex]->timeScale);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -67,7 +67,7 @@ public:
|
||||
protected:
|
||||
class AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
|
||||
public:
|
||||
AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
|
||||
AudioSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag);
|
||||
|
||||
bool isAudioCodecSupported() const;
|
||||
uint32 getAudioChunkSampleCount(uint chunk) const;
|
||||
@ -82,14 +82,14 @@ protected:
|
||||
};
|
||||
|
||||
// Common::QuickTimeParser API
|
||||
virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
|
||||
virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(Track *track, uint32 format);
|
||||
|
||||
void init();
|
||||
void setAudioStreamPos(const Timestamp &where);
|
||||
bool isOldDemuxing() const;
|
||||
void queueNextAudioChunk();
|
||||
|
||||
int _audioStreamIndex;
|
||||
int _audioTrackIndex;
|
||||
uint _curAudioChunk;
|
||||
QueuingAudioStream *_audStream;
|
||||
};
|
||||
|
@ -48,7 +48,6 @@ namespace Common {
|
||||
|
||||
QuickTimeParser::QuickTimeParser() {
|
||||
_beginOffset = 0;
|
||||
_numStreams = 0;
|
||||
_fd = 0;
|
||||
_scaleFactorX = 1;
|
||||
_scaleFactorY = 1;
|
||||
@ -68,10 +67,9 @@ bool QuickTimeParser::parseFile(const Common::String &filename) {
|
||||
return false;
|
||||
|
||||
_foundMOOV = false;
|
||||
_numStreams = 0;
|
||||
_disposeFileHandle = DisposeAfterUse::YES;
|
||||
|
||||
MOVatom atom = { 0, 0, 0xffffffff };
|
||||
Atom atom = { 0, 0, 0xffffffff };
|
||||
|
||||
if (_resFork->hasResFork()) {
|
||||
// Search for a 'moov' resource
|
||||
@ -104,10 +102,9 @@ bool QuickTimeParser::parseFile(const Common::String &filename) {
|
||||
bool QuickTimeParser::parseStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle) {
|
||||
_fd = stream;
|
||||
_foundMOOV = false;
|
||||
_numStreams = 0;
|
||||
_disposeFileHandle = disposeFileHandle;
|
||||
|
||||
MOVatom atom = { 0, 0, 0xffffffff };
|
||||
Atom atom = { 0, 0, 0xffffffff };
|
||||
|
||||
if (readDefault(atom) < 0 || !_foundMOOV) {
|
||||
close();
|
||||
@ -119,21 +116,19 @@ bool QuickTimeParser::parseStream(Common::SeekableReadStream *stream, DisposeAft
|
||||
}
|
||||
|
||||
void QuickTimeParser::init() {
|
||||
// Remove unknown/unhandled streams
|
||||
for (uint32 i = 0; i < _numStreams;) {
|
||||
if (_streams[i]->codec_type == CODEC_TYPE_MOV_OTHER) {
|
||||
delete _streams[i];
|
||||
for (uint32 j = i + 1; j < _numStreams; j++)
|
||||
_streams[j - 1] = _streams[j];
|
||||
_numStreams--;
|
||||
} else
|
||||
i++;
|
||||
// Remove unknown/unhandled tracks
|
||||
for (uint32 i = 0; i < _tracks.size(); i++) {
|
||||
if (_tracks[i]->codecType == CODEC_TYPE_MOV_OTHER) {
|
||||
delete _tracks[i];
|
||||
_tracks.remove_at(i);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust time scale
|
||||
for (uint32 i = 0; i < _numStreams; i++)
|
||||
if (!_streams[i]->time_scale)
|
||||
_streams[i]->time_scale = _timeScale;
|
||||
for (uint32 i = 0; i < _tracks.size(); i++)
|
||||
if (!_tracks[i]->timeScale)
|
||||
_tracks[i]->timeScale = _timeScale;
|
||||
}
|
||||
|
||||
void QuickTimeParser::initParseTable() {
|
||||
@ -170,9 +165,9 @@ void QuickTimeParser::initParseTable() {
|
||||
_parseTable = p;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readDefault(MOVatom atom) {
|
||||
int QuickTimeParser::readDefault(Atom atom) {
|
||||
uint32 total_size = 0;
|
||||
MOVatom a;
|
||||
Atom a;
|
||||
int err = 0;
|
||||
|
||||
a.offset = atom.offset;
|
||||
@ -240,14 +235,14 @@ int QuickTimeParser::readDefault(MOVatom atom) {
|
||||
return err;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readLeaf(MOVatom atom) {
|
||||
int QuickTimeParser::readLeaf(Atom atom) {
|
||||
if (atom.size > 1)
|
||||
_fd->seek(atom.size, SEEK_SET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readMOOV(MOVatom atom) {
|
||||
int QuickTimeParser::readMOOV(Atom atom) {
|
||||
if (readDefault(atom) < 0)
|
||||
return -1;
|
||||
|
||||
@ -256,7 +251,7 @@ int QuickTimeParser::readMOOV(MOVatom atom) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readCMOV(MOVatom atom) {
|
||||
int QuickTimeParser::readCMOV(Atom atom) {
|
||||
#ifdef USE_ZLIB
|
||||
// Read in the dcom atom
|
||||
_fd->readUint32BE();
|
||||
@ -294,7 +289,7 @@ int QuickTimeParser::readCMOV(MOVatom atom) {
|
||||
_fd = new Common::MemoryReadStream(uncompressedData, uncompressedSize, DisposeAfterUse::YES);
|
||||
|
||||
// Read the contents of the uncompressed data
|
||||
MOVatom a = { MKTAG('m', 'o', 'o', 'v'), 0, uncompressedSize };
|
||||
Atom a = { MKTAG('m', 'o', 'o', 'v'), 0, uncompressedSize };
|
||||
int err = readDefault(a);
|
||||
|
||||
// Assign the file handle back to the original handle
|
||||
@ -309,7 +304,7 @@ int QuickTimeParser::readCMOV(MOVatom atom) {
|
||||
#endif
|
||||
}
|
||||
|
||||
int QuickTimeParser::readMVHD(MOVatom atom) {
|
||||
int QuickTimeParser::readMVHD(Atom atom) {
|
||||
byte version = _fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
@ -358,21 +353,21 @@ int QuickTimeParser::readMVHD(MOVatom atom) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readTRAK(MOVatom atom) {
|
||||
MOVStreamContext *sc = new MOVStreamContext();
|
||||
int QuickTimeParser::readTRAK(Atom atom) {
|
||||
Track *track = new Track();
|
||||
|
||||
if (!sc)
|
||||
if (!track)
|
||||
return -1;
|
||||
|
||||
sc->codec_type = CODEC_TYPE_MOV_OTHER;
|
||||
sc->start_time = 0; // XXX: check
|
||||
_streams[_numStreams++] = sc;
|
||||
track->codecType = CODEC_TYPE_MOV_OTHER;
|
||||
track->startTime = 0; // XXX: check
|
||||
_tracks.push_back(track);
|
||||
|
||||
return readDefault(atom);
|
||||
}
|
||||
|
||||
int QuickTimeParser::readTKHD(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readTKHD(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
byte version = _fd->readByte();
|
||||
|
||||
_fd->readByte(); _fd->readByte();
|
||||
@ -392,9 +387,9 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
|
||||
_fd->readUint32BE(); // modification time
|
||||
}
|
||||
|
||||
/* st->id = */_fd->readUint32BE(); // track id (NOT 0 !)
|
||||
/* track->id = */_fd->readUint32BE(); // track id (NOT 0 !)
|
||||
_fd->readUint32BE(); // reserved
|
||||
//st->start_time = 0; // check
|
||||
//track->startTime = 0; // check
|
||||
(version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // highlevel (considering edits) duration in movie timebase
|
||||
_fd->readUint32BE(); // reserved
|
||||
_fd->readUint32BE(); // reserved
|
||||
@ -411,11 +406,11 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
|
||||
uint32 yMod = _fd->readUint32BE();
|
||||
_fd->skip(16);
|
||||
|
||||
st->scaleFactorX = Common::Rational(0x10000, xMod);
|
||||
st->scaleFactorY = Common::Rational(0x10000, yMod);
|
||||
track->scaleFactorX = Common::Rational(0x10000, xMod);
|
||||
track->scaleFactorY = Common::Rational(0x10000, yMod);
|
||||
|
||||
st->scaleFactorX.debugPrint(1, "readTKHD(): scaleFactorX =");
|
||||
st->scaleFactorY.debugPrint(1, "readTKHD(): scaleFactorY =");
|
||||
track->scaleFactorX.debugPrint(1, "readTKHD(): scaleFactorX =");
|
||||
track->scaleFactorY.debugPrint(1, "readTKHD(): scaleFactorY =");
|
||||
|
||||
// these are fixed-point, 16:16
|
||||
// uint32 tkWidth = _fd->readUint32BE() >> 16; // track width
|
||||
@ -425,33 +420,33 @@ int QuickTimeParser::readTKHD(MOVatom atom) {
|
||||
}
|
||||
|
||||
// edit list atom
|
||||
int QuickTimeParser::readELST(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readELST(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->editCount = _fd->readUint32BE();
|
||||
st->editList = new EditListEntry[st->editCount];
|
||||
track->editCount = _fd->readUint32BE();
|
||||
track->editList = new EditListEntry[track->editCount];
|
||||
|
||||
debug(2, "Track %d edit list count: %d", _numStreams - 1, st->editCount);
|
||||
debug(2, "Track %d edit list count: %d", _tracks.size() - 1, track->editCount);
|
||||
|
||||
for (uint32 i = 0; i < st->editCount; i++){
|
||||
st->editList[i].trackDuration = _fd->readUint32BE();
|
||||
st->editList[i].mediaTime = _fd->readSint32BE();
|
||||
st->editList[i].mediaRate = Common::Rational(_fd->readUint32BE(), 0x10000);
|
||||
debugN(3, "\tDuration = %d, Media Time = %d, ", st->editList[i].trackDuration, st->editList[i].mediaTime);
|
||||
st->editList[i].mediaRate.debugPrint(3, "Media Rate =");
|
||||
for (uint32 i = 0; i < track->editCount; i++){
|
||||
track->editList[i].trackDuration = _fd->readUint32BE();
|
||||
track->editList[i].mediaTime = _fd->readSint32BE();
|
||||
track->editList[i].mediaRate = Common::Rational(_fd->readUint32BE(), 0x10000);
|
||||
debugN(3, "\tDuration = %d, Media Time = %d, ", track->editList[i].trackDuration, track->editList[i].mediaTime);
|
||||
track->editList[i].mediaRate.debugPrint(3, "Media Rate =");
|
||||
}
|
||||
|
||||
if (st->editCount != 1)
|
||||
if (track->editCount != 1)
|
||||
warning("Multiple edit list entries. Things may go awry");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readHDLR(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readHDLR(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
@ -469,9 +464,9 @@ int QuickTimeParser::readHDLR(MOVatom atom) {
|
||||
debug(0, "MPEG-4 detected");
|
||||
|
||||
if (type == MKTAG('v', 'i', 'd', 'e'))
|
||||
st->codec_type = CODEC_TYPE_VIDEO;
|
||||
track->codecType = CODEC_TYPE_VIDEO;
|
||||
else if (type == MKTAG('s', 'o', 'u', 'n'))
|
||||
st->codec_type = CODEC_TYPE_AUDIO;
|
||||
track->codecType = CODEC_TYPE_AUDIO;
|
||||
|
||||
_fd->readUint32BE(); // component manufacture
|
||||
_fd->readUint32BE(); // component flags
|
||||
@ -489,8 +484,8 @@ int QuickTimeParser::readHDLR(MOVatom atom) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readMDHD(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readMDHD(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
byte version = _fd->readByte();
|
||||
|
||||
if (version > 1)
|
||||
@ -507,8 +502,8 @@ int QuickTimeParser::readMDHD(MOVatom atom) {
|
||||
_fd->readUint32BE(); // modification time
|
||||
}
|
||||
|
||||
st->time_scale = _fd->readUint32BE();
|
||||
st->duration = (version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // duration
|
||||
track->timeScale = _fd->readUint32BE();
|
||||
track->duration = (version == 1) ? (_fd->readUint32BE(), _fd->readUint32BE()) : _fd->readUint32BE(); // duration
|
||||
|
||||
_fd->readUint16BE(); // language
|
||||
_fd->readUint16BE(); // quality
|
||||
@ -516,17 +511,17 @@ int QuickTimeParser::readMDHD(MOVatom atom) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTSD(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTSD(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
uint32 entryCount = _fd->readUint32BE();
|
||||
st->sampleDescs.resize(entryCount);
|
||||
track->sampleDescs.resize(entryCount);
|
||||
|
||||
for (uint32 i = 0; i < entryCount; i++) { // Parsing Sample description table
|
||||
MOVatom a = { 0, 0, 0 };
|
||||
Atom a = { 0, 0, 0 };
|
||||
uint32 start_pos = _fd->pos();
|
||||
int size = _fd->readUint32BE(); // size
|
||||
uint32 format = _fd->readUint32BE(); // data format
|
||||
@ -535,11 +530,11 @@ int QuickTimeParser::readSTSD(MOVatom atom) {
|
||||
_fd->readUint16BE(); // reserved
|
||||
_fd->readUint16BE(); // index
|
||||
|
||||
st->sampleDescs[i] = readSampleDesc(st, format);
|
||||
track->sampleDescs[i] = readSampleDesc(track, format);
|
||||
|
||||
debug(0, "size=%d 4CC= %s codec_type=%d", size, tag2str(format), st->codec_type);
|
||||
debug(0, "size=%d 4CC= %s codec_type=%d", size, tag2str(format), track->codecType);
|
||||
|
||||
if (!st->sampleDescs[i]) {
|
||||
if (!track->sampleDescs[i]) {
|
||||
// other codec type, just skip (rtp, mp4s, tmcd ...)
|
||||
_fd->seek(size - (_fd->pos() - start_pos), SEEK_CUR);
|
||||
}
|
||||
@ -555,139 +550,139 @@ int QuickTimeParser::readSTSD(MOVatom atom) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTSC(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTSC(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->sample_to_chunk_sz = _fd->readUint32BE();
|
||||
track->sampleToChunkCount = _fd->readUint32BE();
|
||||
|
||||
debug(0, "track[%i].stsc.entries = %i", _numStreams - 1, st->sample_to_chunk_sz);
|
||||
debug(0, "track[%i].stsc.entries = %i", _tracks.size() - 1, track->sampleToChunkCount);
|
||||
|
||||
st->sample_to_chunk = new MOVstsc[st->sample_to_chunk_sz];
|
||||
track->sampleToChunk = new SampleToChunkEntry[track->sampleToChunkCount];
|
||||
|
||||
if (!st->sample_to_chunk)
|
||||
if (!track->sampleToChunk)
|
||||
return -1;
|
||||
|
||||
for (uint32 i = 0; i < st->sample_to_chunk_sz; i++) {
|
||||
st->sample_to_chunk[i].first = _fd->readUint32BE() - 1;
|
||||
st->sample_to_chunk[i].count = _fd->readUint32BE();
|
||||
st->sample_to_chunk[i].id = _fd->readUint32BE();
|
||||
//warning("Sample to Chunk[%d]: First = %d, Count = %d", i, st->sample_to_chunk[i].first, st->sample_to_chunk[i].count);
|
||||
for (uint32 i = 0; i < track->sampleToChunkCount; i++) {
|
||||
track->sampleToChunk[i].first = _fd->readUint32BE() - 1;
|
||||
track->sampleToChunk[i].count = _fd->readUint32BE();
|
||||
track->sampleToChunk[i].id = _fd->readUint32BE();
|
||||
//warning("Sample to Chunk[%d]: First = %d, Count = %d", i, track->sampleToChunk[i].first, track->sampleToChunk[i].count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTSS(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTSS(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->keyframe_count = _fd->readUint32BE();
|
||||
track->keyframeCount = _fd->readUint32BE();
|
||||
|
||||
debug(0, "keyframe_count = %d", st->keyframe_count);
|
||||
debug(0, "keyframeCount = %d", track->keyframeCount);
|
||||
|
||||
st->keyframes = new uint32[st->keyframe_count];
|
||||
track->keyframes = new uint32[track->keyframeCount];
|
||||
|
||||
if (!st->keyframes)
|
||||
if (!track->keyframes)
|
||||
return -1;
|
||||
|
||||
for (uint32 i = 0; i < st->keyframe_count; i++) {
|
||||
st->keyframes[i] = _fd->readUint32BE() - 1; // Adjust here, the frames are based on 1
|
||||
debug(6, "keyframes[%d] = %d", i, st->keyframes[i]);
|
||||
for (uint32 i = 0; i < track->keyframeCount; i++) {
|
||||
track->keyframes[i] = _fd->readUint32BE() - 1; // Adjust here, the frames are based on 1
|
||||
debug(6, "keyframes[%d] = %d", i, track->keyframes[i]);
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTSZ(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTSZ(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->sample_size = _fd->readUint32BE();
|
||||
st->sample_count = _fd->readUint32BE();
|
||||
track->sampleSize = _fd->readUint32BE();
|
||||
track->sampleCount = _fd->readUint32BE();
|
||||
|
||||
debug(5, "sample_size = %d sample_count = %d", st->sample_size, st->sample_count);
|
||||
debug(5, "sampleSize = %d sampleCount = %d", track->sampleSize, track->sampleCount);
|
||||
|
||||
if (st->sample_size)
|
||||
if (track->sampleSize)
|
||||
return 0; // there isn't any table following
|
||||
|
||||
st->sample_sizes = new uint32[st->sample_count];
|
||||
track->sampleSizes = new uint32[track->sampleCount];
|
||||
|
||||
if (!st->sample_sizes)
|
||||
if (!track->sampleSizes)
|
||||
return -1;
|
||||
|
||||
for(uint32 i = 0; i < st->sample_count; i++) {
|
||||
st->sample_sizes[i] = _fd->readUint32BE();
|
||||
debug(6, "sample_sizes[%d] = %d", i, st->sample_sizes[i]);
|
||||
for(uint32 i = 0; i < track->sampleCount; i++) {
|
||||
track->sampleSizes[i] = _fd->readUint32BE();
|
||||
debug(6, "sampleSizes[%d] = %d", i, track->sampleSizes[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTTS(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTTS(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
uint32 totalSampleCount = 0;
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->stts_count = _fd->readUint32BE();
|
||||
st->stts_data = new MOVstts[st->stts_count];
|
||||
track->timeToSampleCount = _fd->readUint32BE();
|
||||
track->timeToSample = new TimeToSampleEntry[track->timeToSampleCount];
|
||||
|
||||
debug(0, "track[%d].stts.entries = %d", _numStreams - 1, st->stts_count);
|
||||
debug(0, "track[%d].stts.entries = %d", _tracks.size() - 1, track->timeToSampleCount);
|
||||
|
||||
for (int32 i = 0; i < st->stts_count; i++) {
|
||||
st->stts_data[i].count = _fd->readUint32BE();
|
||||
st->stts_data[i].duration = _fd->readUint32BE();
|
||||
for (int32 i = 0; i < track->timeToSampleCount; i++) {
|
||||
track->timeToSample[i].count = _fd->readUint32BE();
|
||||
track->timeToSample[i].duration = _fd->readUint32BE();
|
||||
|
||||
debug(1, "\tCount = %d, Duration = %d", st->stts_data[i].count, st->stts_data[i].duration);
|
||||
debug(1, "\tCount = %d, Duration = %d", track->timeToSample[i].count, track->timeToSample[i].duration);
|
||||
|
||||
totalSampleCount += st->stts_data[i].count;
|
||||
totalSampleCount += track->timeToSample[i].count;
|
||||
}
|
||||
|
||||
st->nb_frames = totalSampleCount;
|
||||
track->frameCount = totalSampleCount;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readSTCO(MOVatom atom) {
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
int QuickTimeParser::readSTCO(Atom atom) {
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readByte(); // version
|
||||
_fd->readByte(); _fd->readByte(); _fd->readByte(); // flags
|
||||
|
||||
st->chunk_count = _fd->readUint32BE();
|
||||
st->chunk_offsets = new uint32[st->chunk_count];
|
||||
track->chunkCount = _fd->readUint32BE();
|
||||
track->chunkOffsets = new uint32[track->chunkCount];
|
||||
|
||||
if (!st->chunk_offsets)
|
||||
if (!track->chunkOffsets)
|
||||
return -1;
|
||||
|
||||
for (uint32 i = 0; i < st->chunk_count; i++) {
|
||||
for (uint32 i = 0; i < track->chunkCount; i++) {
|
||||
// WORKAROUND/HACK: The offsets in Riven videos (ones inside the Mohawk archives themselves)
|
||||
// have offsets relative to the archive and not the video. This is quite nasty. We subtract
|
||||
// the initial offset of the stream to get the correct value inside of the stream.
|
||||
st->chunk_offsets[i] = _fd->readUint32BE() - _beginOffset;
|
||||
track->chunkOffsets[i] = _fd->readUint32BE() - _beginOffset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int QuickTimeParser::readWAVE(MOVatom atom) {
|
||||
if (_numStreams < 1)
|
||||
int QuickTimeParser::readWAVE(Atom atom) {
|
||||
if (_tracks.empty())
|
||||
return 0;
|
||||
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
Track *track = _tracks.back();
|
||||
|
||||
if (atom.size > (1 << 30))
|
||||
return -1;
|
||||
|
||||
if (st->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
|
||||
st->extradata = _fd->readStream(atom.size - 8);
|
||||
if (track->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extra data for QDM2
|
||||
track->extraData = _fd->readStream(atom.size - 8);
|
||||
else if (atom.size > 8)
|
||||
return readDefault(atom);
|
||||
else
|
||||
@ -723,11 +718,11 @@ static void readMP4Desc(Common::SeekableReadStream *stream, byte &tag, int &leng
|
||||
length = readMP4DescLength(stream);
|
||||
}
|
||||
|
||||
int QuickTimeParser::readESDS(MOVatom atom) {
|
||||
if (_numStreams < 1)
|
||||
int QuickTimeParser::readESDS(Atom atom) {
|
||||
if (_tracks.empty())
|
||||
return 0;
|
||||
|
||||
MOVStreamContext *st = _streams[_numStreams - 1];
|
||||
Track *track = _tracks.back();
|
||||
|
||||
_fd->readUint32BE(); // version + flags
|
||||
|
||||
@ -744,7 +739,7 @@ int QuickTimeParser::readESDS(MOVatom atom) {
|
||||
if (tag != kMP4DecConfigDescTag)
|
||||
return 0;
|
||||
|
||||
st->objectTypeMP4 = _fd->readByte();
|
||||
track->objectTypeMP4 = _fd->readByte();
|
||||
_fd->readByte(); // stream type
|
||||
_fd->readUint16BE(); _fd->readByte(); // buffer size
|
||||
_fd->readUint32BE(); // max bitrate
|
||||
@ -755,17 +750,17 @@ int QuickTimeParser::readESDS(MOVatom atom) {
|
||||
if (tag != kMP4DecSpecificDescTag)
|
||||
return 0;
|
||||
|
||||
st->extradata = _fd->readStream(length);
|
||||
track->extraData = _fd->readStream(length);
|
||||
|
||||
debug(0, "MPEG-4 object type = %02x", st->objectTypeMP4);
|
||||
debug(0, "MPEG-4 object type = %02x", track->objectTypeMP4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void QuickTimeParser::close() {
|
||||
for (uint32 i = 0; i < _numStreams; i++)
|
||||
delete _streams[i];
|
||||
for (uint32 i = 0; i < _tracks.size(); i++)
|
||||
delete _tracks[i];
|
||||
|
||||
_numStreams = 0;
|
||||
_tracks.clear();
|
||||
|
||||
if (_disposeFileHandle == DisposeAfterUse::YES)
|
||||
delete _fd;
|
||||
@ -773,44 +768,44 @@ void QuickTimeParser::close() {
|
||||
_fd = 0;
|
||||
}
|
||||
|
||||
QuickTimeParser::SampleDesc::SampleDesc(MOVStreamContext *parentStream, uint32 codecTag) {
|
||||
_parentStream = parentStream;
|
||||
QuickTimeParser::SampleDesc::SampleDesc(Track *parentTrack, uint32 codecTag) {
|
||||
_parentTrack = parentTrack;
|
||||
_codecTag = codecTag;
|
||||
}
|
||||
|
||||
QuickTimeParser::MOVStreamContext::MOVStreamContext() {
|
||||
chunk_count = 0;
|
||||
chunk_offsets = 0;
|
||||
stts_count = 0;
|
||||
stts_data = 0;
|
||||
sample_to_chunk_sz = 0;
|
||||
sample_to_chunk = 0;
|
||||
sample_size = 0;
|
||||
sample_count = 0;
|
||||
sample_sizes = 0;
|
||||
keyframe_count = 0;
|
||||
QuickTimeParser::Track::Track() {
|
||||
chunkCount = 0;
|
||||
chunkOffsets = 0;
|
||||
timeToSampleCount = 0;
|
||||
timeToSample = 0;
|
||||
sampleToChunkCount = 0;
|
||||
sampleToChunk = 0;
|
||||
sampleSize = 0;
|
||||
sampleCount = 0;
|
||||
sampleSizes = 0;
|
||||
keyframeCount = 0;
|
||||
keyframes = 0;
|
||||
time_scale = 0;
|
||||
timeScale = 0;
|
||||
width = 0;
|
||||
height = 0;
|
||||
codec_type = CODEC_TYPE_MOV_OTHER;
|
||||
codecType = CODEC_TYPE_MOV_OTHER;
|
||||
editCount = 0;
|
||||
editList = 0;
|
||||
extradata = 0;
|
||||
nb_frames = 0;
|
||||
extraData = 0;
|
||||
frameCount = 0;
|
||||
duration = 0;
|
||||
start_time = 0;
|
||||
startTime = 0;
|
||||
objectTypeMP4 = 0;
|
||||
}
|
||||
|
||||
QuickTimeParser::MOVStreamContext::~MOVStreamContext() {
|
||||
delete[] chunk_offsets;
|
||||
delete[] stts_data;
|
||||
delete[] sample_to_chunk;
|
||||
delete[] sample_sizes;
|
||||
QuickTimeParser::Track::~Track() {
|
||||
delete[] chunkOffsets;
|
||||
delete[] timeToSample;
|
||||
delete[] sampleToChunk;
|
||||
delete[] sampleSizes;
|
||||
delete[] keyframes;
|
||||
delete[] editList;
|
||||
delete extradata;
|
||||
delete extraData;
|
||||
|
||||
for (uint32 i = 0; i < sampleDescs.size(); i++)
|
||||
delete sampleDescs[i];
|
||||
|
@ -88,23 +88,23 @@ protected:
|
||||
|
||||
DisposeAfterUse::Flag _disposeFileHandle;
|
||||
|
||||
struct MOVatom {
|
||||
struct Atom {
|
||||
uint32 type;
|
||||
uint32 offset;
|
||||
uint32 size;
|
||||
};
|
||||
|
||||
struct ParseTable {
|
||||
int (QuickTimeParser::*func)(MOVatom atom);
|
||||
int (QuickTimeParser::*func)(Atom atom);
|
||||
uint32 type;
|
||||
};
|
||||
|
||||
struct MOVstts {
|
||||
struct TimeToSampleEntry {
|
||||
int count;
|
||||
int duration;
|
||||
};
|
||||
|
||||
struct MOVstsc {
|
||||
struct SampleToChunkEntry {
|
||||
uint32 first;
|
||||
uint32 count;
|
||||
uint32 id;
|
||||
@ -116,17 +116,17 @@ protected:
|
||||
Common::Rational mediaRate;
|
||||
};
|
||||
|
||||
struct MOVStreamContext;
|
||||
struct Track;
|
||||
|
||||
class SampleDesc {
|
||||
public:
|
||||
SampleDesc(MOVStreamContext *parentStream, uint32 codecTag);
|
||||
SampleDesc(Track *parentTrack, uint32 codecTag);
|
||||
virtual ~SampleDesc() {}
|
||||
|
||||
uint32 getCodecTag() const { return _codecTag; }
|
||||
|
||||
protected:
|
||||
MOVStreamContext *_parentStream;
|
||||
Track *_parentTrack;
|
||||
uint32 _codecTag;
|
||||
};
|
||||
|
||||
@ -136,77 +136,76 @@ protected:
|
||||
CODEC_TYPE_AUDIO
|
||||
};
|
||||
|
||||
struct MOVStreamContext {
|
||||
MOVStreamContext();
|
||||
~MOVStreamContext();
|
||||
struct Track {
|
||||
Track();
|
||||
~Track();
|
||||
|
||||
uint32 chunk_count;
|
||||
uint32 *chunk_offsets;
|
||||
int stts_count;
|
||||
MOVstts *stts_data;
|
||||
uint32 sample_to_chunk_sz;
|
||||
MOVstsc *sample_to_chunk;
|
||||
uint32 sample_size;
|
||||
uint32 sample_count;
|
||||
uint32 *sample_sizes;
|
||||
uint32 keyframe_count;
|
||||
uint32 chunkCount;
|
||||
uint32 *chunkOffsets;
|
||||
int timeToSampleCount;
|
||||
TimeToSampleEntry *timeToSample;
|
||||
uint32 sampleToChunkCount;
|
||||
SampleToChunkEntry *sampleToChunk;
|
||||
uint32 sampleSize;
|
||||
uint32 sampleCount;
|
||||
uint32 *sampleSizes;
|
||||
uint32 keyframeCount;
|
||||
uint32 *keyframes;
|
||||
int32 time_scale;
|
||||
int32 timeScale;
|
||||
|
||||
uint16 width;
|
||||
uint16 height;
|
||||
CodecType codec_type;
|
||||
CodecType codecType;
|
||||
|
||||
Common::Array<SampleDesc *> sampleDescs;
|
||||
|
||||
uint32 editCount;
|
||||
EditListEntry *editList;
|
||||
|
||||
Common::SeekableReadStream *extradata;
|
||||
Common::SeekableReadStream *extraData;
|
||||
|
||||
uint32 nb_frames;
|
||||
uint32 frameCount;
|
||||
uint32 duration;
|
||||
uint32 start_time;
|
||||
uint32 startTime;
|
||||
Common::Rational scaleFactorX;
|
||||
Common::Rational scaleFactorY;
|
||||
|
||||
byte objectTypeMP4;
|
||||
};
|
||||
|
||||
virtual SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format) = 0;
|
||||
virtual SampleDesc *readSampleDesc(Track *track, uint32 format) = 0;
|
||||
|
||||
const ParseTable *_parseTable;
|
||||
bool _foundMOOV;
|
||||
uint32 _timeScale;
|
||||
uint32 _duration;
|
||||
uint32 _numStreams;
|
||||
Common::Rational _scaleFactorX;
|
||||
Common::Rational _scaleFactorY;
|
||||
MOVStreamContext *_streams[20];
|
||||
Common::Array<Track *> _tracks;
|
||||
uint32 _beginOffset;
|
||||
Common::MacResManager *_resFork;
|
||||
|
||||
void initParseTable();
|
||||
void init();
|
||||
|
||||
int readDefault(MOVatom atom);
|
||||
int readLeaf(MOVatom atom);
|
||||
int readELST(MOVatom atom);
|
||||
int readHDLR(MOVatom atom);
|
||||
int readMDHD(MOVatom atom);
|
||||
int readMOOV(MOVatom atom);
|
||||
int readMVHD(MOVatom atom);
|
||||
int readTKHD(MOVatom atom);
|
||||
int readTRAK(MOVatom atom);
|
||||
int readSTCO(MOVatom atom);
|
||||
int readSTSC(MOVatom atom);
|
||||
int readSTSD(MOVatom atom);
|
||||
int readSTSS(MOVatom atom);
|
||||
int readSTSZ(MOVatom atom);
|
||||
int readSTTS(MOVatom atom);
|
||||
int readCMOV(MOVatom atom);
|
||||
int readWAVE(MOVatom atom);
|
||||
int readESDS(MOVatom atom);
|
||||
int readDefault(Atom atom);
|
||||
int readLeaf(Atom atom);
|
||||
int readELST(Atom atom);
|
||||
int readHDLR(Atom atom);
|
||||
int readMDHD(Atom atom);
|
||||
int readMOOV(Atom atom);
|
||||
int readMVHD(Atom atom);
|
||||
int readTKHD(Atom atom);
|
||||
int readTRAK(Atom atom);
|
||||
int readSTCO(Atom atom);
|
||||
int readSTSC(Atom atom);
|
||||
int readSTSD(Atom atom);
|
||||
int readSTSS(Atom atom);
|
||||
int readSTSZ(Atom atom);
|
||||
int readSTTS(Atom atom);
|
||||
int readCMOV(Atom atom);
|
||||
int readWAVE(Atom atom);
|
||||
int readESDS(Atom atom);
|
||||
};
|
||||
|
||||
} // End of namespace Common
|
||||
|
@ -69,50 +69,50 @@ QuickTimeDecoder::~QuickTimeDecoder() {
|
||||
}
|
||||
|
||||
uint16 QuickTimeDecoder::getWidth() const {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 0;
|
||||
|
||||
return (Common::Rational(_streams[_videoStreamIndex]->width) / getScaleFactorX()).toInt();
|
||||
return (Common::Rational(_tracks[_videoTrackIndex]->width) / getScaleFactorX()).toInt();
|
||||
}
|
||||
|
||||
uint16 QuickTimeDecoder::getHeight() const {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 0;
|
||||
|
||||
return (Common::Rational(_streams[_videoStreamIndex]->height) / getScaleFactorY()).toInt();
|
||||
return (Common::Rational(_tracks[_videoTrackIndex]->height) / getScaleFactorY()).toInt();
|
||||
}
|
||||
|
||||
uint32 QuickTimeDecoder::getFrameCount() const {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 0;
|
||||
|
||||
return _streams[_videoStreamIndex]->nb_frames;
|
||||
return _tracks[_videoTrackIndex]->frameCount;
|
||||
}
|
||||
|
||||
Common::Rational QuickTimeDecoder::getScaleFactorX() const {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 1;
|
||||
|
||||
return (_scaleFactorX * _streams[_videoStreamIndex]->scaleFactorX);
|
||||
return (_scaleFactorX * _tracks[_videoTrackIndex]->scaleFactorX);
|
||||
}
|
||||
|
||||
Common::Rational QuickTimeDecoder::getScaleFactorY() const {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 1;
|
||||
|
||||
return (_scaleFactorY * _streams[_videoStreamIndex]->scaleFactorY);
|
||||
return (_scaleFactorY * _tracks[_videoTrackIndex]->scaleFactorY);
|
||||
}
|
||||
|
||||
uint32 QuickTimeDecoder::getFrameDuration() {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return 0;
|
||||
|
||||
uint32 curFrameIndex = 0;
|
||||
for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count; i++) {
|
||||
curFrameIndex += _streams[_videoStreamIndex]->stts_data[i].count;
|
||||
for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount; i++) {
|
||||
curFrameIndex += _tracks[_videoTrackIndex]->timeToSample[i].count;
|
||||
if ((uint32)_curFrame < curFrameIndex) {
|
||||
// Ok, now we have what duration this frame has.
|
||||
return _streams[_videoStreamIndex]->stts_data[i].duration;
|
||||
return _tracks[_videoTrackIndex]->timeToSample[i].duration;
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,17 +131,17 @@ Graphics::PixelFormat QuickTimeDecoder::getPixelFormat() const {
|
||||
}
|
||||
|
||||
uint32 QuickTimeDecoder::findKeyFrame(uint32 frame) const {
|
||||
for (int i = _streams[_videoStreamIndex]->keyframe_count - 1; i >= 0; i--)
|
||||
if (_streams[_videoStreamIndex]->keyframes[i] <= frame)
|
||||
return _streams[_videoStreamIndex]->keyframes[i];
|
||||
for (int i = _tracks[_videoTrackIndex]->keyframeCount - 1; i >= 0; i--)
|
||||
if (_tracks[_videoTrackIndex]->keyframes[i] <= frame)
|
||||
return _tracks[_videoTrackIndex]->keyframes[i];
|
||||
|
||||
// If none found, we'll assume the requested frame is a key frame
|
||||
return frame;
|
||||
}
|
||||
|
||||
void QuickTimeDecoder::seekToFrame(uint32 frame) {
|
||||
assert(_videoStreamIndex >= 0);
|
||||
assert(frame < _streams[_videoStreamIndex]->nb_frames);
|
||||
assert(_videoTrackIndex >= 0);
|
||||
assert(frame < _tracks[_videoTrackIndex]->frameCount);
|
||||
|
||||
// Stop all audio (for now)
|
||||
stopAudio();
|
||||
@ -155,20 +155,20 @@ void QuickTimeDecoder::seekToFrame(uint32 frame) {
|
||||
_nextFrameStartTime = 0;
|
||||
uint32 curFrame = 0;
|
||||
|
||||
for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) {
|
||||
for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) {
|
||||
for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && curFrame < frame; i++) {
|
||||
for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count && curFrame < frame; j++) {
|
||||
curFrame++;
|
||||
_nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration;
|
||||
_nextFrameStartTime += _tracks[_videoTrackIndex]->timeToSample[i].duration;
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the video starting point
|
||||
const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _streams[_videoStreamIndex]->time_scale);
|
||||
const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _tracks[_videoTrackIndex]->timeScale);
|
||||
_startTime = g_system->getMillis() - curVideoTime.msecs();
|
||||
resetPauseStartTime();
|
||||
|
||||
// Adjust the audio starting point
|
||||
if (_audioStreamIndex >= 0) {
|
||||
if (_audioTrackIndex >= 0) {
|
||||
_audioStartOffset = curVideoTime;
|
||||
|
||||
// Seek to the new audio location
|
||||
@ -181,17 +181,17 @@ void QuickTimeDecoder::seekToFrame(uint32 frame) {
|
||||
|
||||
void QuickTimeDecoder::seekToTime(Audio::Timestamp time) {
|
||||
// Use makeQuickTimeStream() instead
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
error("Audio-only seeking not supported");
|
||||
|
||||
// Try to find the last frame that should have been decoded
|
||||
uint32 frame = 0;
|
||||
Audio::Timestamp totalDuration(0, _streams[_videoStreamIndex]->time_scale);
|
||||
Audio::Timestamp totalDuration(0, _tracks[_videoTrackIndex]->timeScale);
|
||||
bool done = false;
|
||||
|
||||
for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && !done; i++) {
|
||||
for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count; j++) {
|
||||
totalDuration = totalDuration.addFrames(_streams[_videoStreamIndex]->stts_data[i].duration);
|
||||
for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && !done; i++) {
|
||||
for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count; j++) {
|
||||
totalDuration = totalDuration.addFrames(_tracks[_videoTrackIndex]->timeToSample[i].duration);
|
||||
if (totalDuration > time) {
|
||||
done = true;
|
||||
break;
|
||||
@ -221,14 +221,14 @@ void QuickTimeDecoder::pauseVideoIntern(bool pause) {
|
||||
}
|
||||
|
||||
Codec *QuickTimeDecoder::findDefaultVideoCodec() const {
|
||||
if (_videoStreamIndex < 0 || _streams[_videoStreamIndex]->sampleDescs.empty())
|
||||
if (_videoTrackIndex < 0 || _tracks[_videoTrackIndex]->sampleDescs.empty())
|
||||
return 0;
|
||||
|
||||
return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->_videoCodec;
|
||||
return ((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[0])->_videoCodec;
|
||||
}
|
||||
|
||||
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
|
||||
if (_videoStreamIndex < 0 || _curFrame >= (int32)getFrameCount() - 1)
|
||||
if (_videoTrackIndex < 0 || _curFrame >= (int32)getFrameCount() - 1)
|
||||
return 0;
|
||||
|
||||
if (_startTime == 0)
|
||||
@ -244,11 +244,11 @@ const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
|
||||
uint32 descId;
|
||||
Common::SeekableReadStream *frameData = getNextFramePacket(descId);
|
||||
|
||||
if (!frameData || !descId || descId > _streams[_videoStreamIndex]->sampleDescs.size())
|
||||
if (!frameData || !descId || descId > _tracks[_videoTrackIndex]->sampleDescs.size())
|
||||
return 0;
|
||||
|
||||
// Find which video description entry we want
|
||||
VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[descId - 1];
|
||||
VideoSampleDesc *entry = (VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[descId - 1];
|
||||
|
||||
if (!entry->_videoCodec)
|
||||
return 0;
|
||||
@ -305,7 +305,7 @@ uint32 QuickTimeDecoder::getTimeToNextFrame() const {
|
||||
return 0;
|
||||
|
||||
// Convert from the QuickTime rate base to 1000
|
||||
uint32 nextFrameStartTime = _nextFrameStartTime * 1000 / _streams[_videoStreamIndex]->time_scale;
|
||||
uint32 nextFrameStartTime = _nextFrameStartTime * 1000 / _tracks[_videoTrackIndex]->timeScale;
|
||||
uint32 elapsedTime = getElapsedTime();
|
||||
|
||||
if (nextFrameStartTime <= elapsedTime)
|
||||
@ -333,13 +333,13 @@ bool QuickTimeDecoder::loadStream(Common::SeekableReadStream *stream) {
|
||||
void QuickTimeDecoder::init() {
|
||||
Audio::QuickTimeAudioDecoder::init();
|
||||
|
||||
_videoStreamIndex = -1;
|
||||
_videoTrackIndex = -1;
|
||||
_startTime = 0;
|
||||
|
||||
// Find video streams
|
||||
for (uint32 i = 0; i < _numStreams; i++)
|
||||
if (_streams[i]->codec_type == CODEC_TYPE_VIDEO && _videoStreamIndex < 0)
|
||||
_videoStreamIndex = i;
|
||||
for (uint32 i = 0; i < _tracks.size(); i++)
|
||||
if (_tracks[i]->codecType == CODEC_TYPE_VIDEO && _videoTrackIndex < 0)
|
||||
_videoTrackIndex = i;
|
||||
|
||||
// Start the audio codec if we've got one that we can handle
|
||||
if (_audStream) {
|
||||
@ -348,9 +348,9 @@ void QuickTimeDecoder::init() {
|
||||
}
|
||||
|
||||
// Initialize video, if present
|
||||
if (_videoStreamIndex >= 0) {
|
||||
for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++)
|
||||
((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i])->initCodec();
|
||||
if (_videoTrackIndex >= 0) {
|
||||
for (uint32 i = 0; i < _tracks[_videoTrackIndex]->sampleDescs.size(); i++)
|
||||
((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[i])->initCodec();
|
||||
|
||||
if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
|
||||
// We have to initialize the scaled surface
|
||||
@ -360,11 +360,11 @@ void QuickTimeDecoder::init() {
|
||||
}
|
||||
}
|
||||
|
||||
Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamContext *st, uint32 format) {
|
||||
if (st->codec_type == CODEC_TYPE_VIDEO) {
|
||||
Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(Track *track, uint32 format) {
|
||||
if (track->codecType == CODEC_TYPE_VIDEO) {
|
||||
debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
|
||||
|
||||
VideoSampleDesc *entry = new VideoSampleDesc(st, format);
|
||||
VideoSampleDesc *entry = new VideoSampleDesc(track, format);
|
||||
|
||||
_fd->readUint16BE(); // version
|
||||
_fd->readUint16BE(); // revision level
|
||||
@ -378,21 +378,21 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
|
||||
// The width is most likely invalid for entries after the first one
|
||||
// so only set the overall width if it is not zero here.
|
||||
if (width)
|
||||
st->width = width;
|
||||
track->width = width;
|
||||
|
||||
if (height)
|
||||
st->height = height;
|
||||
track->height = height;
|
||||
|
||||
_fd->readUint32BE(); // horiz resolution
|
||||
_fd->readUint32BE(); // vert resolution
|
||||
_fd->readUint32BE(); // data size, always 0
|
||||
_fd->readUint16BE(); // frames per samples
|
||||
|
||||
byte codec_name[32];
|
||||
_fd->read(codec_name, 32); // codec name, pascal string (FIXME: true for mp4?)
|
||||
if (codec_name[0] <= 31) {
|
||||
memcpy(entry->_codecName, &codec_name[1], codec_name[0]);
|
||||
entry->_codecName[codec_name[0]] = 0;
|
||||
byte codecName[32];
|
||||
_fd->read(codecName, 32); // codec name, pascal string (FIXME: true for mp4?)
|
||||
if (codecName[0] <= 31) {
|
||||
memcpy(entry->_codecName, &codecName[1], codecName[0]);
|
||||
entry->_codecName[codecName[0]] = 0;
|
||||
}
|
||||
|
||||
entry->_bitsPerSample = _fd->readUint16BE(); // depth
|
||||
@ -455,7 +455,7 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
|
||||
}
|
||||
|
||||
// Pass it on up
|
||||
return Audio::QuickTimeAudioDecoder::readSampleDesc(st, format);
|
||||
return Audio::QuickTimeAudioDecoder::readSampleDesc(track, format);
|
||||
}
|
||||
|
||||
void QuickTimeDecoder::close() {
|
||||
@ -472,7 +472,7 @@ void QuickTimeDecoder::close() {
|
||||
}
|
||||
|
||||
Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId) {
|
||||
if (_videoStreamIndex < 0)
|
||||
if (_videoTrackIndex < 0)
|
||||
return NULL;
|
||||
|
||||
// First, we have to track down which chunk holds the sample and which sample in the chunk contains the frame we are looking for.
|
||||
@ -480,22 +480,22 @@ Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId)
|
||||
int32 sampleInChunk = 0;
|
||||
int32 actualChunk = -1;
|
||||
|
||||
for (uint32 i = 0; i < _streams[_videoStreamIndex]->chunk_count; i++) {
|
||||
for (uint32 i = 0; i < _tracks[_videoTrackIndex]->chunkCount; i++) {
|
||||
int32 sampleToChunkIndex = -1;
|
||||
|
||||
for (uint32 j = 0; j < _streams[_videoStreamIndex]->sample_to_chunk_sz; j++)
|
||||
if (i >= _streams[_videoStreamIndex]->sample_to_chunk[j].first)
|
||||
for (uint32 j = 0; j < _tracks[_videoTrackIndex]->sampleToChunkCount; j++)
|
||||
if (i >= _tracks[_videoTrackIndex]->sampleToChunk[j].first)
|
||||
sampleToChunkIndex = j;
|
||||
|
||||
if (sampleToChunkIndex < 0)
|
||||
error("This chunk (%d) is imaginary", sampleToChunkIndex);
|
||||
|
||||
totalSampleCount += _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;
|
||||
totalSampleCount += _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].count;
|
||||
|
||||
if (totalSampleCount > getCurFrame()) {
|
||||
actualChunk = i;
|
||||
descId = _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].id;
|
||||
sampleInChunk = _streams[_videoStreamIndex]->sample_to_chunk[sampleToChunkIndex].count - totalSampleCount + getCurFrame();
|
||||
descId = _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].id;
|
||||
sampleInChunk = _tracks[_videoTrackIndex]->sampleToChunk[sampleToChunkIndex].count - totalSampleCount + getCurFrame();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -506,23 +506,23 @@ Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId)
|
||||
}
|
||||
|
||||
// Next seek to that frame
|
||||
_fd->seek(_streams[_videoStreamIndex]->chunk_offsets[actualChunk]);
|
||||
_fd->seek(_tracks[_videoTrackIndex]->chunkOffsets[actualChunk]);
|
||||
|
||||
// Then, if the chunk holds more than one frame, seek to where the frame we want is located
|
||||
for (int32 i = getCurFrame() - sampleInChunk; i < getCurFrame(); i++) {
|
||||
if (_streams[_videoStreamIndex]->sample_size != 0)
|
||||
_fd->skip(_streams[_videoStreamIndex]->sample_size);
|
||||
if (_tracks[_videoTrackIndex]->sampleSize != 0)
|
||||
_fd->skip(_tracks[_videoTrackIndex]->sampleSize);
|
||||
else
|
||||
_fd->skip(_streams[_videoStreamIndex]->sample_sizes[i]);
|
||||
_fd->skip(_tracks[_videoTrackIndex]->sampleSizes[i]);
|
||||
}
|
||||
|
||||
// Finally, read in the raw data for the frame
|
||||
//printf ("Frame Data[%d]: Offset = %d, Size = %d\n", getCurFrame(), _fd->pos(), _streams[_videoStreamIndex]->sample_sizes[getCurFrame()]);
|
||||
//printf ("Frame Data[%d]: Offset = %d, Size = %d\n", getCurFrame(), _fd->pos(), _tracks[_videoTrackIndex]->sampleSizes[getCurFrame()]);
|
||||
|
||||
if (_streams[_videoStreamIndex]->sample_size != 0)
|
||||
return _fd->readStream(_streams[_videoStreamIndex]->sample_size);
|
||||
if (_tracks[_videoTrackIndex]->sampleSize != 0)
|
||||
return _fd->readStream(_tracks[_videoTrackIndex]->sampleSize);
|
||||
|
||||
return _fd->readStream(_streams[_videoStreamIndex]->sample_sizes[getCurFrame()]);
|
||||
return _fd->readStream(_tracks[_videoTrackIndex]->sampleSizes[getCurFrame()]);
|
||||
}
|
||||
|
||||
void QuickTimeDecoder::updateAudioBuffer() {
|
||||
@ -531,21 +531,21 @@ void QuickTimeDecoder::updateAudioBuffer() {
|
||||
|
||||
uint32 numberOfChunksNeeded = 0;
|
||||
|
||||
if (_videoStreamIndex < 0 || _curFrame == (int32)_streams[_videoStreamIndex]->nb_frames - 1) {
|
||||
if (_videoTrackIndex < 0 || _curFrame == (int32)_tracks[_videoTrackIndex]->frameCount - 1) {
|
||||
// If we have no video, there's nothing to base our buffer against
|
||||
// However, one must ask why a QuickTimeDecoder is being used instead of the nice makeQuickTimeStream() function
|
||||
|
||||
// If we're on the last frame, make sure all audio remaining is buffered
|
||||
numberOfChunksNeeded = _streams[_audioStreamIndex]->chunk_count;
|
||||
numberOfChunksNeeded = _tracks[_audioTrackIndex]->chunkCount;
|
||||
} else {
|
||||
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
|
||||
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0];
|
||||
|
||||
// Calculate the amount of chunks we need in memory until the next frame
|
||||
uint32 timeToNextFrame = getTimeToNextFrame();
|
||||
uint32 timeFilled = 0;
|
||||
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
|
||||
|
||||
for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
|
||||
for (; timeFilled < timeToNextFrame && curAudioChunk < _tracks[_audioTrackIndex]->chunkCount; numberOfChunksNeeded++, curAudioChunk++) {
|
||||
uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk);
|
||||
assert(sampleCount);
|
||||
|
||||
@ -557,11 +557,11 @@ void QuickTimeDecoder::updateAudioBuffer() {
|
||||
}
|
||||
|
||||
// Keep three streams in buffer so that if/when the first two end, it goes right into the next
|
||||
while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _streams[_audioStreamIndex]->chunk_count)
|
||||
while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _tracks[_audioTrackIndex]->chunkCount)
|
||||
queueNextAudioChunk();
|
||||
}
|
||||
|
||||
QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
|
||||
QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentTrack, codecTag) {
|
||||
memset(_codecName, 0, 32);
|
||||
_colorTableId = 0;
|
||||
_palette = 0;
|
||||
@ -582,15 +582,15 @@ void QuickTimeDecoder::VideoSampleDesc::initCodec() {
|
||||
break;
|
||||
case MKTAG('r','p','z','a'):
|
||||
// Apple Video ("Road Pizza"): Used by some Myst videos.
|
||||
_videoCodec = new RPZADecoder(_parentStream->width, _parentStream->height);
|
||||
_videoCodec = new RPZADecoder(_parentTrack->width, _parentTrack->height);
|
||||
break;
|
||||
case MKTAG('r','l','e',' '):
|
||||
// QuickTime RLE: Used by some Myst ME videos.
|
||||
_videoCodec = new QTRLEDecoder(_parentStream->width, _parentStream->height, _bitsPerSample & 0x1f);
|
||||
_videoCodec = new QTRLEDecoder(_parentTrack->width, _parentTrack->height, _bitsPerSample & 0x1f);
|
||||
break;
|
||||
case MKTAG('s','m','c',' '):
|
||||
// Apple SMC: Used by some Myst videos.
|
||||
_videoCodec = new SMCDecoder(_parentStream->width, _parentStream->height);
|
||||
_videoCodec = new SMCDecoder(_parentTrack->width, _parentTrack->height);
|
||||
break;
|
||||
case MKTAG('S','V','Q','1'):
|
||||
// Sorenson Video 1: Used by some Myst ME videos.
|
||||
@ -606,7 +606,7 @@ void QuickTimeDecoder::VideoSampleDesc::initCodec() {
|
||||
break;
|
||||
case MKTAG('Q','k','B','k'):
|
||||
// CDToons: Used by most of the Broderbund games.
|
||||
_videoCodec = new CDToonsDecoder(_parentStream->width, _parentStream->height);
|
||||
_videoCodec = new CDToonsDecoder(_parentTrack->width, _parentTrack->height);
|
||||
break;
|
||||
default:
|
||||
warning("Unsupported codec \'%s\'", tag2str(_codecTag));
|
||||
|
@ -116,7 +116,7 @@ public:
|
||||
protected:
|
||||
class VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
|
||||
public:
|
||||
VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
|
||||
VideoSampleDesc(Common::QuickTimeParser::Track *parentTrack, uint32 codecTag);
|
||||
~VideoSampleDesc();
|
||||
|
||||
void initCodec();
|
||||
@ -129,7 +129,7 @@ protected:
|
||||
Codec *_videoCodec;
|
||||
};
|
||||
|
||||
Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
|
||||
Common::QuickTimeParser::SampleDesc *readSampleDesc(Track *track, uint32 format);
|
||||
|
||||
private:
|
||||
Common::SeekableReadStream *getNextFramePacket(uint32 &descId);
|
||||
@ -146,7 +146,7 @@ private:
|
||||
Codec *createCodec(uint32 codecTag, byte bitsPerPixel);
|
||||
Codec *findDefaultVideoCodec() const;
|
||||
uint32 _nextFrameStartTime;
|
||||
int8 _videoStreamIndex;
|
||||
int _videoTrackIndex;
|
||||
uint32 findKeyFrame(uint32 frame) const;
|
||||
|
||||
bool _dirtyPalette;
|
||||
|
Loading…
x
Reference in New Issue
Block a user