COMMON: Begin objectifying QuickTimeParser::SampleDesc further

This is preparation for multiple video and audio tracks
This commit is contained in:
Matthew Hoops 2011-06-02 18:40:49 -04:00
parent 717248e162
commit 2e06681698
6 changed files with 230 additions and 216 deletions

View File

@ -79,13 +79,13 @@ void QuickTimeAudioDecoder::init() {
if (_audioStreamIndex >= 0) {
AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
if (checkAudioCodecSupport(entry->codecTag, _streams[_audioStreamIndex]->objectTypeMP4)) {
_audStream = makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
if (entry->isAudioCodecSupported()) {
_audStream = makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
_curAudioChunk = 0;
// Make sure the bits per sample transfers to the sample size
if (entry->codecTag == MKTAG('r', 'a', 'w', ' ') || entry->codecTag == MKTAG('t', 'w', 'o', 's'))
_streams[_audioStreamIndex]->sample_size = (entry->bitsPerSample / 8) * entry->channels;
if (entry->getCodecTag() == MKTAG('r', 'a', 'w', ' ') || entry->getCodecTag() == MKTAG('t', 'w', 'o', 's'))
_streams[_audioStreamIndex]->sample_size = (entry->_bitsPerSample / 8) * entry->_channels;
}
}
}
@ -94,32 +94,31 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
if (st->codec_type == CODEC_TYPE_AUDIO) {
debug(0, "Audio Codec FourCC: \'%s\'", tag2str(format));
AudioSampleDesc *entry = new AudioSampleDesc();
entry->codecTag = format;
AudioSampleDesc *entry = new AudioSampleDesc(st, format);
uint16 stsdVersion = _fd->readUint16BE();
_fd->readUint16BE(); // revision level
_fd->readUint32BE(); // vendor
entry->channels = _fd->readUint16BE(); // channel count
entry->bitsPerSample = _fd->readUint16BE(); // sample size
entry->_channels = _fd->readUint16BE(); // channel count
entry->_bitsPerSample = _fd->readUint16BE(); // sample size
_fd->readUint16BE(); // compression id = 0
_fd->readUint16BE(); // packet size = 0
entry->sampleRate = (_fd->readUint32BE() >> 16);
entry->_sampleRate = (_fd->readUint32BE() >> 16);
debug(0, "stsd version =%d", stsdVersion);
if (stsdVersion == 0) {
// Not used, except in special cases. See below.
entry->samplesPerFrame = entry->bytesPerFrame = 0;
entry->_samplesPerFrame = entry->_bytesPerFrame = 0;
} else if (stsdVersion == 1) {
// Read QT version 1 fields. In version 0 these dont exist.
entry->samplesPerFrame = _fd->readUint32BE();
debug(0, "stsd samples_per_frame =%d",entry->samplesPerFrame);
entry->_samplesPerFrame = _fd->readUint32BE();
debug(0, "stsd samples_per_frame =%d",entry->_samplesPerFrame);
_fd->readUint32BE(); // bytes per packet
entry->bytesPerFrame = _fd->readUint32BE();
debug(0, "stsd bytes_per_frame =%d", entry->bytesPerFrame);
entry->_bytesPerFrame = _fd->readUint32BE();
debug(0, "stsd bytes_per_frame =%d", entry->_bytesPerFrame);
_fd->readUint32BE(); // bytes per sample
} else {
warning("Unsupported QuickTime STSD audio version %d", stsdVersion);
@ -130,12 +129,12 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
// Version 0 videos (such as the Riven ones) don't have this set,
// but we need it later on. Add it in here.
if (format == MKTAG('i', 'm', 'a', '4')) {
entry->samplesPerFrame = 64;
entry->bytesPerFrame = 34 * entry->channels;
entry->_samplesPerFrame = 64;
entry->_bytesPerFrame = 34 * entry->_channels;
}
if (entry->sampleRate == 0 && st->time_scale > 1)
entry->sampleRate = st->time_scale;
if (entry->_sampleRate == 0 && st->time_scale > 1)
entry->_sampleRate = st->time_scale;
return entry;
}
@ -143,91 +142,6 @@ Common::QuickTimeParser::SampleDesc *QuickTimeAudioDecoder::readSampleDesc(MOVSt
return 0;
}
bool QuickTimeAudioDecoder::checkAudioCodecSupport(uint32 tag, byte objectTypeMP4) {
// Check if the codec is a supported codec
if (tag == MKTAG('t', 'w', 'o', 's') || tag == MKTAG('r', 'a', 'w', ' ') || tag == MKTAG('i', 'm', 'a', '4'))
return true;
#ifdef AUDIO_QDM2_H
if (tag == MKTAG('Q', 'D', 'M', '2'))
return true;
#endif
if (tag == MKTAG('m', 'p', '4', 'a')) {
Common::String audioType;
switch (objectTypeMP4) {
case 0x40: // AAC
#ifdef USE_FAAD
return true;
#else
audioType = "AAC";
break;
#endif
default:
audioType = "Unknown";
break;
}
warning("No MPEG-4 audio (%s) support", audioType.c_str());
} else
warning("Audio Codec Not Supported: \'%s\'", tag2str(tag));
return false;
}
AudioStream *QuickTimeAudioDecoder::createAudioStream(Common::SeekableReadStream *stream) {
if (!stream || _audioStreamIndex < 0)
return NULL;
AudioSampleDesc *entry = (AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
if (entry->codecTag == MKTAG('t', 'w', 'o', 's') || entry->codecTag == MKTAG('r', 'a', 'w', ' ')) {
// Fortunately, most of the audio used in Myst videos is raw...
uint16 flags = 0;
if (entry->codecTag == MKTAG('r', 'a', 'w', ' '))
flags |= FLAG_UNSIGNED;
if (entry->channels == 2)
flags |= FLAG_STEREO;
if (entry->bitsPerSample == 16)
flags |= FLAG_16BITS;
uint32 dataSize = stream->size();
byte *data = (byte *)malloc(dataSize);
stream->read(data, dataSize);
delete stream;
return makeRawStream(data, dataSize, entry->sampleRate, flags);
} else if (entry->codecTag == MKTAG('i', 'm', 'a', '4')) {
// Riven uses this codec (as do some Myst ME videos)
return makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), kADPCMApple, entry->sampleRate, entry->channels, 34);
} else if (entry->codecTag == MKTAG('m', 'p', '4', 'a')) {
// The 7th Guest iOS uses an MPEG-4 codec
#ifdef USE_FAAD
if (_streams[_audioStreamIndex]->objectTypeMP4 == 0x40)
return makeAACStream(stream, DisposeAfterUse::YES, _streams[_audioStreamIndex]->extradata);
#endif
#ifdef AUDIO_QDM2_H
} else if (entry->codecTag == MKTAG('Q', 'D', 'M', '2')) {
// Myst ME uses this codec for many videos
return makeQDM2Stream(stream, _streams[_audioStreamIndex]->extradata);
#endif
}
error("Unsupported audio codec");
return NULL;
}
uint32 QuickTimeAudioDecoder::getAudioChunkSampleCount(uint chunk) {
if (_audioStreamIndex < 0)
return 0;
uint32 sampleCount = 0;
for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++)
if (chunk >= _streams[_audioStreamIndex]->sample_to_chunk[j].first)
sampleCount = _streams[_audioStreamIndex]->sample_to_chunk[j].count;
return sampleCount;
}
bool QuickTimeAudioDecoder::isOldDemuxing() const {
assert(_audioStreamIndex >= 0);
return _streams[_audioStreamIndex]->stts_count == 1 && _streams[_audioStreamIndex]->stts_data[0].duration == 1;
@ -240,7 +154,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
_fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]);
// First, we have to get the sample count
uint32 sampleCount = getAudioChunkSampleCount(_curAudioChunk);
uint32 sampleCount = entry->getAudioChunkSampleCount(_curAudioChunk);
assert(sampleCount);
if (isOldDemuxing()) {
@ -250,12 +164,12 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
while (sampleCount > 0) {
uint32 samples = 0, size = 0;
if (entry->samplesPerFrame >= 160) {
samples = entry->samplesPerFrame;
size = entry->bytesPerFrame;
} else if (entry->samplesPerFrame > 1) {
samples = MIN<uint32>((1024 / entry->samplesPerFrame) * entry->samplesPerFrame, sampleCount);
size = (samples / entry->samplesPerFrame) * entry->bytesPerFrame;
if (entry->_samplesPerFrame >= 160) {
samples = entry->_samplesPerFrame;
size = entry->_bytesPerFrame;
} else if (entry->_samplesPerFrame > 1) {
samples = MIN<uint32>((1024 / entry->_samplesPerFrame) * entry->_samplesPerFrame, sampleCount);
size = (samples / entry->_samplesPerFrame) * entry->_bytesPerFrame;
} else {
samples = MIN<uint32>(1024, sampleCount);
size = samples * _streams[_audioStreamIndex]->sample_size;
@ -274,7 +188,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
// Find our starting sample
uint32 startSample = 0;
for (uint32 i = 0; i < _curAudioChunk; i++)
startSample += getAudioChunkSampleCount(i);
startSample += entry->getAudioChunkSampleCount(i);
for (uint32 i = 0; i < sampleCount; i++) {
uint32 size = (_streams[_audioStreamIndex]->sample_size != 0) ? _streams[_audioStreamIndex]->sample_size : _streams[_audioStreamIndex]->sample_sizes[i + startSample];
@ -288,7 +202,7 @@ void QuickTimeAudioDecoder::queueNextAudioChunk() {
}
// Now queue the buffer
_audStream->queueAudioStream(createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES)));
_audStream->queueAudioStream(entry->createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES)));
delete wStream;
_curAudioChunk++;
@ -301,7 +215,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
// Re-create the audio stream
delete _audStream;
Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_streams[_audioStreamIndex]->sampleDescs[0];
_audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
_audStream = Audio::makeQueuingAudioStream(entry->_sampleRate, entry->_channels == 2);
// First, we need to track down what audio sample we need
Audio::Timestamp curAudioTime = where.convertToFramerate(_streams[_audioStreamIndex]->time_scale);
@ -325,7 +239,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
uint32 totalSamples = 0;
_curAudioChunk = 0;
for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) {
uint32 chunkSampleCount = getAudioChunkSampleCount(i);
uint32 chunkSampleCount = entry->getAudioChunkSampleCount(i);
if (seekSample < totalSamples + chunkSampleCount)
break;
@ -338,7 +252,7 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
if (sample != totalSamples) {
// HACK: Skip a certain amount of samples from the stream
// (There's got to be a better way to do this!)
int skipSamples = (sample - totalSamples) * entry->channels;
int skipSamples = (sample - totalSamples) * entry->_channels;
int16 *tempBuffer = new int16[skipSamples];
_audStream->readBuffer(tempBuffer, skipSamples);
@ -346,11 +260,92 @@ void QuickTimeAudioDecoder::setAudioStreamPos(const Timestamp &where) {
}
}
QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc() : Common::QuickTimeParser::SampleDesc() {
channels = 0;
sampleRate = 0;
samplesPerFrame = 0;
bytesPerFrame = 0;
QuickTimeAudioDecoder::AudioSampleDesc::AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
_channels = 0;
_sampleRate = 0;
_samplesPerFrame = 0;
_bytesPerFrame = 0;
_bitsPerSample = 0;
}
bool QuickTimeAudioDecoder::AudioSampleDesc::isAudioCodecSupported() const {
// Check if the codec is a supported codec
if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ') || _codecTag == MKTAG('i', 'm', 'a', '4'))
return true;
#ifdef AUDIO_QDM2_H
if (_codecTag == MKTAG('Q', 'D', 'M', '2'))
return true;
#endif
if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
Common::String audioType;
switch (_parentStream->objectTypeMP4) {
case 0x40: // AAC
#ifdef USE_FAAD
return true;
#else
audioType = "AAC";
break;
#endif
default:
audioType = "Unknown";
break;
}
warning("No MPEG-4 audio (%s) support", audioType.c_str());
} else
warning("Audio Codec Not Supported: \'%s\'", tag2str(_codecTag));
return false;
}
uint32 QuickTimeAudioDecoder::AudioSampleDesc::getAudioChunkSampleCount(uint chunk) const {
uint32 sampleCount = 0;
for (uint32 j = 0; j < _parentStream->sample_to_chunk_sz; j++)
if (chunk >= _parentStream->sample_to_chunk[j].first)
sampleCount = _parentStream->sample_to_chunk[j].count;
return sampleCount;
}
AudioStream *QuickTimeAudioDecoder::AudioSampleDesc::createAudioStream(Common::SeekableReadStream *stream) const {
if (!stream)
return 0;
if (_codecTag == MKTAG('t', 'w', 'o', 's') || _codecTag == MKTAG('r', 'a', 'w', ' ')) {
// Fortunately, most of the audio used in Myst videos is raw...
uint16 flags = 0;
if (_codecTag == MKTAG('r', 'a', 'w', ' '))
flags |= FLAG_UNSIGNED;
if (_channels == 2)
flags |= FLAG_STEREO;
if (_bitsPerSample == 16)
flags |= FLAG_16BITS;
uint32 dataSize = stream->size();
byte *data = (byte *)malloc(dataSize);
stream->read(data, dataSize);
delete stream;
return makeRawStream(data, dataSize, _sampleRate, flags);
} else if (_codecTag == MKTAG('i', 'm', 'a', '4')) {
// Riven uses this codec (as do some Myst ME videos)
return makeADPCMStream(stream, DisposeAfterUse::YES, stream->size(), kADPCMApple, _sampleRate, _channels, 34);
} else if (_codecTag == MKTAG('m', 'p', '4', 'a')) {
// The 7th Guest iOS uses an MPEG-4 codec
#ifdef USE_FAAD
if (_parentStream->objectTypeMP4 == 0x40)
return makeAACStream(stream, DisposeAfterUse::YES, _parentStream->extradata);
#endif
#ifdef AUDIO_QDM2_H
} else if (_codecTag == MKTAG('Q', 'D', 'M', '2')) {
// Myst ME uses this codec for many videos
return makeQDM2Stream(stream, _parentStream->extradata);
#endif
}
error("Unsupported audio codec");
return NULL;
}
/**

View File

@ -65,30 +65,33 @@ public:
bool loadAudioStream(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeFileHandle);
protected:
struct AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
AudioSampleDesc();
class AudioSampleDesc : public Common::QuickTimeParser::SampleDesc {
public:
AudioSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
uint16 channels;
uint32 sampleRate;
uint32 samplesPerFrame;
uint32 bytesPerFrame;
bool isAudioCodecSupported() const;
uint32 getAudioChunkSampleCount(uint chunk) const;
AudioStream *createAudioStream(Common::SeekableReadStream *stream) const;
// TODO: Make private in the long run
uint16 _bitsPerSample;
uint16 _channels;
uint32 _sampleRate;
uint32 _samplesPerFrame;
uint32 _bytesPerFrame;
};
// Common::QuickTimeParser API
virtual Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);
AudioStream *createAudioStream(Common::SeekableReadStream *stream);
bool checkAudioCodecSupport(uint32 tag, byte objectTypeMP4);
void init();
void queueNextAudioChunk();
uint32 getAudioChunkSampleCount(uint chunk);
int8 _audioStreamIndex;
uint _curAudioChunk;
QueuingAudioStream *_audStream;
void setAudioStreamPos(const Timestamp &where);
bool isOldDemuxing() const;
void queueNextAudioChunk();
int _audioStreamIndex;
uint _curAudioChunk;
QueuingAudioStream *_audStream;
};
} // End of namespace Audio

View File

@ -686,7 +686,7 @@ int QuickTimeParser::readWAVE(MOVatom atom) {
if (atom.size > (1 << 30))
return -1;
if (st->sampleDescs[0]->codecTag == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
if (st->sampleDescs[0]->getCodecTag() == MKTAG('Q', 'D', 'M', '2')) // Read extradata for QDM2
st->extradata = _fd->readStream(atom.size - 8);
else if (atom.size > 8)
return readDefault(atom);
@ -773,9 +773,9 @@ void QuickTimeParser::close() {
_fd = 0;
}
QuickTimeParser::SampleDesc::SampleDesc() {
codecTag = 0;
bitsPerSample = 0;
QuickTimeParser::SampleDesc::SampleDesc(MOVStreamContext *parentStream, uint32 codecTag) {
_parentStream = parentStream;
_codecTag = codecTag;
}
QuickTimeParser::MOVStreamContext::MOVStreamContext() {

View File

@ -116,12 +116,18 @@ protected:
Common::Rational mediaRate;
};
struct SampleDesc {
SampleDesc();
struct MOVStreamContext;
class SampleDesc {
public:
SampleDesc(MOVStreamContext *parentStream, uint32 codecTag);
virtual ~SampleDesc() {}
uint32 codecTag;
uint16 bitsPerSample;
uint32 getCodecTag() const { return _codecTag; }
protected:
MOVStreamContext *_parentStream;
uint32 _codecTag;
};
enum CodecType {

View File

@ -203,38 +203,6 @@ void QuickTimeDecoder::seekToTime(Audio::Timestamp time) {
seekToFrame(frame);
}
Codec *QuickTimeDecoder::createCodec(uint32 codecTag, byte bitsPerPixel) {
if (codecTag == MKTAG('c','v','i','d')) {
// Cinepak: As used by most Myst and all Riven videos as well as some Myst ME videos. "The Chief" videos also use this.
return new CinepakDecoder(bitsPerPixel);
} else if (codecTag == MKTAG('r','p','z','a')) {
// Apple Video ("Road Pizza"): Used by some Myst videos.
return new RPZADecoder(getWidth(), getHeight());
} else if (codecTag == MKTAG('r','l','e',' ')) {
// QuickTime RLE: Used by some Myst ME videos.
return new QTRLEDecoder(getWidth(), getHeight(), bitsPerPixel);
} else if (codecTag == MKTAG('s','m','c',' ')) {
// Apple SMC: Used by some Myst videos.
return new SMCDecoder(getWidth(), getHeight());
} else if (codecTag == MKTAG('S','V','Q','1')) {
// Sorenson Video 1: Used by some Myst ME videos.
warning("Sorenson Video 1 not yet supported");
} else if (codecTag == MKTAG('S','V','Q','3')) {
// Sorenson Video 3: Used by some Myst ME videos.
warning("Sorenson Video 3 not yet supported");
} else if (codecTag == MKTAG('j','p','e','g')) {
// Motion JPEG: Used by some Myst ME 10th Anniversary videos.
return new JPEGDecoder();
} else if (codecTag == MKTAG('Q','k','B','k')) {
// CDToons: Used by most of the Broderbund games.
return new CDToonsDecoder(getWidth(), getHeight());
} else {
warning("Unsupported codec \'%s\'", tag2str(codecTag));
}
return NULL;
}
void QuickTimeDecoder::startAudio() {
if (_audStream) {
updateAudioBuffer();
@ -256,7 +224,7 @@ Codec *QuickTimeDecoder::findDefaultVideoCodec() const {
if (_videoStreamIndex < 0 || _streams[_videoStreamIndex]->sampleDescs.empty())
return 0;
return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->videoCodec;
return ((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[0])->_videoCodec;
}
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
@ -282,22 +250,22 @@ const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
// Find which video description entry we want
VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[descId - 1];
if (!entry->videoCodec)
if (!entry->_videoCodec)
return 0;
const Graphics::Surface *frame = entry->videoCodec->decodeImage(frameData);
const Graphics::Surface *frame = entry->_videoCodec->decodeImage(frameData);
delete frameData;
// Update the palette
if (entry->videoCodec->containsPalette()) {
if (entry->_videoCodec->containsPalette()) {
// The codec itself contains a palette
if (entry->videoCodec->hasDirtyPalette()) {
_palette = entry->videoCodec->getPalette();
if (entry->_videoCodec->hasDirtyPalette()) {
_palette = entry->_videoCodec->getPalette();
_dirtyPalette = true;
}
} else {
// Check if the video description has been updated
byte *palette = entry->palette;
byte *palette = entry->_palette;
if (palette != _palette) {
_palette = palette;
@ -381,10 +349,8 @@ void QuickTimeDecoder::init() {
// Initialize video, if present
if (_videoStreamIndex >= 0) {
for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++) {
VideoSampleDesc *entry = (VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i];
entry->videoCodec = createCodec(entry->codecTag, entry->bitsPerSample & 0x1F);
}
for (uint32 i = 0; i < _streams[_videoStreamIndex]->sampleDescs.size(); i++)
((VideoSampleDesc *)_streams[_videoStreamIndex]->sampleDescs[i])->initCodec();
if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
// We have to initialize the scaled surface
@ -398,8 +364,7 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
if (st->codec_type == CODEC_TYPE_VIDEO) {
debug(0, "Video Codec FourCC: \'%s\'", tag2str(format));
VideoSampleDesc *entry = new VideoSampleDesc();
entry->codecTag = format;
VideoSampleDesc *entry = new VideoSampleDesc(st, format);
_fd->readUint16BE(); // version
_fd->readUint16BE(); // revision level
@ -426,24 +391,24 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
byte codec_name[32];
_fd->read(codec_name, 32); // codec name, pascal string (FIXME: true for mp4?)
if (codec_name[0] <= 31) {
memcpy(entry->codecName, &codec_name[1], codec_name[0]);
entry->codecName[codec_name[0]] = 0;
memcpy(entry->_codecName, &codec_name[1], codec_name[0]);
entry->_codecName[codec_name[0]] = 0;
}
entry->bitsPerSample = _fd->readUint16BE(); // depth
entry->colorTableId = _fd->readUint16BE(); // colortable id
entry->_bitsPerSample = _fd->readUint16BE(); // depth
entry->_colorTableId = _fd->readUint16BE(); // colortable id
// figure out the palette situation
byte colorDepth = entry->bitsPerSample & 0x1F;
bool colorGreyscale = (entry->bitsPerSample & 0x20) != 0;
byte colorDepth = entry->_bitsPerSample & 0x1F;
bool colorGreyscale = (entry->_bitsPerSample & 0x20) != 0;
debug(0, "color depth: %d", colorDepth);
// if the depth is 2, 4, or 8 bpp, file is palettized
if (colorDepth == 2 || colorDepth == 4 || colorDepth == 8) {
// Initialize the palette
entry->palette = new byte[256 * 3];
memset(entry->palette, 0, 256 * 3);
entry->_palette = new byte[256 * 3];
memset(entry->_palette, 0, 256 * 3);
if (colorGreyscale) {
debug(0, "Greyscale palette");
@ -453,12 +418,12 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
int16 colorIndex = 255;
byte colorDec = 256 / (colorCount - 1);
for (byte j = 0; j < colorCount; j++) {
entry->palette[j * 3] = entry->palette[j * 3 + 1] = entry->palette[j * 3 + 2] = colorIndex;
entry->_palette[j * 3] = entry->_palette[j * 3 + 1] = entry->_palette[j * 3 + 2] = colorIndex;
colorIndex -= colorDec;
if (colorIndex < 0)
colorIndex = 0;
}
} else if (entry->colorTableId & 0x08) {
} else if (entry->_colorTableId & 0x08) {
// if flag bit 3 is set, use the default palette
//uint16 colorCount = 1 << colorDepth;
@ -476,11 +441,11 @@ Common::QuickTimeParser::SampleDesc *QuickTimeDecoder::readSampleDesc(MOVStreamC
// up front
_fd->readByte();
_fd->readByte();
entry->palette[j * 3] = _fd->readByte();
entry->_palette[j * 3] = _fd->readByte();
_fd->readByte();
entry->palette[j * 3 + 1] = _fd->readByte();
entry->_palette[j * 3 + 1] = _fd->readByte();
_fd->readByte();
entry->palette[j * 3 + 2] = _fd->readByte();
entry->_palette[j * 3 + 2] = _fd->readByte();
_fd->readByte();
}
}
@ -581,10 +546,10 @@ void QuickTimeDecoder::updateAudioBuffer() {
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
uint32 sampleCount = getAudioChunkSampleCount(curAudioChunk);
uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk);
assert(sampleCount);
timeFilled += sampleCount * 1000 / entry->sampleRate;
timeFilled += sampleCount * 1000 / entry->_sampleRate;
}
// Add a couple extra to ensure we don't underrun
@ -596,16 +561,56 @@ void QuickTimeDecoder::updateAudioBuffer() {
queueNextAudioChunk();
}
QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc() : Common::QuickTimeParser::SampleDesc() {
memset(codecName, 0, 32);
colorTableId = 0;
palette = 0;
videoCodec = 0;
QuickTimeDecoder::VideoSampleDesc::VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag) : Common::QuickTimeParser::SampleDesc(parentStream, codecTag) {
memset(_codecName, 0, 32);
_colorTableId = 0;
_palette = 0;
_videoCodec = 0;
_bitsPerSample = 0;
}
QuickTimeDecoder::VideoSampleDesc::~VideoSampleDesc() {
delete[] palette;
delete videoCodec;
delete[] _palette;
delete _videoCodec;
}
void QuickTimeDecoder::VideoSampleDesc::initCodec() {
switch (_codecTag) {
case MKTAG('c','v','i','d'):
// Cinepak: As used by most Myst and all Riven videos as well as some Myst ME videos. "The Chief" videos also use this.
_videoCodec = new CinepakDecoder(_bitsPerSample & 0x1f);
break;
case MKTAG('r','p','z','a'):
// Apple Video ("Road Pizza"): Used by some Myst videos.
_videoCodec = new RPZADecoder(_parentStream->width, _parentStream->height);
break;
case MKTAG('r','l','e',' '):
// QuickTime RLE: Used by some Myst ME videos.
_videoCodec = new QTRLEDecoder(_parentStream->width, _parentStream->height, _bitsPerSample & 0x1f);
break;
case MKTAG('s','m','c',' '):
// Apple SMC: Used by some Myst videos.
_videoCodec = new SMCDecoder(_parentStream->width, _parentStream->height);
break;
case MKTAG('S','V','Q','1'):
// Sorenson Video 1: Used by some Myst ME videos.
warning("Sorenson Video 1 not yet supported");
break;
case MKTAG('S','V','Q','3'):
// Sorenson Video 3: Used by some Myst ME videos.
warning("Sorenson Video 3 not yet supported");
break;
case MKTAG('j','p','e','g'):
// Motion JPEG: Used by some Myst ME 10th Anniversary videos.
_videoCodec = new JPEGDecoder();
break;
case MKTAG('Q','k','B','k'):
// CDToons: Used by most of the Broderbund games.
_videoCodec = new CDToonsDecoder(_parentStream->width, _parentStream->height);
break;
default:
warning("Unsupported codec \'%s\'", tag2str(_codecTag));
}
}
} // End of namespace Video

View File

@ -114,14 +114,19 @@ public:
uint32 getDuration() const { return _duration * 1000 / _timeScale; }
protected:
struct VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
VideoSampleDesc();
class VideoSampleDesc : public Common::QuickTimeParser::SampleDesc {
public:
VideoSampleDesc(Common::QuickTimeParser::MOVStreamContext *parentStream, uint32 codecTag);
~VideoSampleDesc();
char codecName[32];
uint16 colorTableId;
byte *palette;
Codec *videoCodec;
void initCodec();
// TODO: Make private in the long run
uint16 _bitsPerSample;
char _codecName[32];
uint16 _colorTableId;
byte *_palette;
Codec *_videoCodec;
};
Common::QuickTimeParser::SampleDesc *readSampleDesc(MOVStreamContext *st, uint32 format);