Merge pull request #668 from somaen/videoFixes

Update VideoDecoders to use the new VideoDecoder-interface (for testing)
This commit is contained in:
Einar Johan Trøan Sømåen 2013-01-02 01:04:14 -08:00
commit bc5ca044a5
29 changed files with 3975 additions and 2555 deletions

View File

@ -73,8 +73,8 @@ MODULE_OBJS := \
movie/codecs/smush_decoder.o \
movie/bink.o \
movie/mpeg.o \
movie/smush.o \
movie/movie.o \
movie/smush.o \
update/packfile.o \
update/mscab.o \
update/lang_filter.o \

View File

@ -20,12 +20,16 @@
*
*/
// License note: This might be covered by GPLv2 (As additions by somaen was produced from
// https://raw.github.com/clone2727/smushplay/master/smushvideo.cpp
#include "common/endian.h"
#include "common/events.h"
#include "common/file.h"
#include "common/rational.h"
#include "common/system.h"
#include "common/timer.h"
#include "common/memstream.h"
#include "audio/audiostream.h"
#include "audio/mixer.h"
@ -44,105 +48,215 @@ namespace Grim {
#define BUFFER_SIZE 16385
#define SMUSH_SPEED 66667
bool SmushDecoder::_demo = false;
static uint16 smushDestTable[5786];
SmushDecoder::SmushDecoder() {
// Set colour-format statically here for SMUSH (5650), to allow for differing
// PixelFormat in engine and renderer (and conversion from Surface there)
// Which means 16 bpp, 565, shift of 11, 5, 0, 0 for RGBA
_format = Graphics::PixelFormat(2, 5, 6, 5, 0, 11, 5, 0, 0);
_nbframes = 0;
_file = 0;
_width = 0;
_height = 0;
_channels = -1;
_freq = 22050;
_videoLooping = false;
_startPos = 0;
_x = 0;
_y = 0;
_blocky8 = new Blocky8();
_blocky16 = new Blocky16();
_stream = NULL;
_videoTrack = NULL;
_audioTrack = NULL;
}
SmushDecoder::~SmushDecoder() {
delete _blocky8;
delete _blocky16;
delete _videoTrack;
delete _audioTrack;
}
void SmushDecoder::init() {
_IACTpos = 0;
_curFrame = -1;
_videoPause = false;
if (!_demo) {
_surface.create(_width, _height, _format);
vimaInit(smushDestTable);
}
_videoTrack->init();
_audioTrack->init();
}
void SmushDecoder::close() {
_surface.free();
if (_stream) {
_stream->finish();
_stream = NULL;
g_system->getMixer()->stopHandle(_soundHandle);
}
VideoDecoder::close();
_audioTrack = NULL;
_videoTrack = NULL;
_videoLooping = false;
_videoPause = true;
_startPos = 0;
if (_file) {
delete _file;
_file = NULL;
}
}
void SmushDecoder::handleWave(const byte *src, uint32 size) {
int16 *dst = (int16 *) malloc(size * _channels * sizeof(int16));
decompressVima(src, dst, size * _channels * 2, smushDestTable);
int flags = Audio::FLAG_16BITS;
if (_channels == 2)
flags |= Audio::FLAG_STEREO;
if (!_stream) {
_stream = Audio::makeQueuingAudioStream(_freq, (_channels == 2));
g_system->getMixer()->playStream(Audio::Mixer::kMusicSoundType, &_soundHandle, _stream);
bool SmushDecoder::readHeader() {
if (!_file) {
return false;
}
if (g_system->getMixer()->isReady()) {
_stream->queueBuffer((byte *)dst, size * _channels * 2, DisposeAfterUse::YES, flags);
uint32 mainTag = _file->readUint32BE();
uint32 pos = _file->pos();
uint32 expectedTag = 0;
uint32 size = _file->readUint32BE(); // file-size
// Verify that we have the correct combination of headers.
if (mainTag == MKTAG('A', 'N', 'I', 'M')) { // Demo
expectedTag = MKTAG('A', 'H', 'D', 'R');
} else if (mainTag == MKTAG('S', 'A', 'N', 'M')) { // Retail
expectedTag = MKTAG('S', 'H', 'D', 'R');
} else {
free(dst);
error("Invalid SMUSH-header");
}
uint32 tag = _file->readUint32BE();
size = _file->readUint32BE();
pos = _file->pos();
uint32 version = 0;
assert(tag == expectedTag);
if (tag == MKTAG('A', 'H', 'D', 'R')) { // Demo
version = _file->readUint16LE();
uint16 nbFrames = _file->readUint16LE();
_file->readUint16BE(); // unknown
int width = -1;
int height = -1;
_videoLooping = false;
_startPos = 0;
_videoTrack = new SmushVideoTrack(width, height, SMUSH_SPEED, nbFrames, false);
_videoTrack->_x = -1;
_videoTrack->_y = -1;
addTrack(_videoTrack);
_file->read(_videoTrack->getPal(), 0x300);
int audioRate = 11025;
if (version == 2) {
_file->readUint32LE(); // framerate
_file->readUint32LE();
audioRate = _file->readUint32LE();
}
_file->readUint32BE();
_file->readUint32BE();
_audioTrack = new SmushAudioTrack(false, audioRate, 2);
return true;
} else if (tag == MKTAG('S', 'H', 'D', 'R')) { // Retail
_file->readUint16LE();
uint16 nbFrames = _file->readUint32LE();
_file->readUint16LE();
int width = _file->readUint16LE();
int height = _file->readUint16LE();
_file->readUint16LE();
int frameRate = _file->readUint32LE();
int16 flags = _file->readUint16LE();
// Output information for checking out the flags
if (Debug::isChannelEnabled(Debug::Movie | Debug::Info)) {
warning("SMUSH Flags:");
for (int i = 0; i < 16; i++) {
warning(" %d", (flags & (1 << i)) != 0);
}
}
_file->seek(pos + size + (size & 1), SEEK_SET);
_videoLooping = true;
// If the video is NOT looping, setLooping will set the speed to the proper value
_videoTrack = new SmushVideoTrack(width, height, frameRate, nbFrames, true);
addTrack(_videoTrack);
return handleFramesHeader();
}
return false;
}
bool SmushDecoder::handleFramesHeader() {
uint32 tag;
int32 size;
int pos = 0;
int freq = 0;
int channels = 0;
tag = _file->readUint32BE();
if (tag != MKTAG('F', 'L', 'H', 'D')) {
return false;
}
size = _file->readUint32BE();
byte *f_header = new byte[size];
_file->read(f_header, size);
do {
if (READ_BE_UINT32(f_header + pos) == MKTAG('B', 'l', '1', '6')) {
pos += READ_BE_UINT32(f_header + pos + 4) + 8;
} else if (READ_BE_UINT32(f_header + pos) == MKTAG('W', 'a', 'v', 'e')) {
freq = READ_LE_UINT32(f_header + pos + 8);
channels = READ_LE_UINT32(f_header + pos + 12);
pos += 20;
} else {
error("SmushDecoder::handleFramesHeader() unknown tag");
}
} while (pos < size);
delete[] f_header;
_audioTrack = new SmushAudioTrack(true, freq, channels);
addTrack(_audioTrack);
return true;
}
bool SmushDecoder::loadStream(Common::SeekableReadStream *stream) {
close();
_file = stream;
// Load the video
if (!readHeader()) {
warning("Failure loading SMUSH-file");
return false;
}
_startPos = _file->pos();
init();
return true;
}
const Graphics::Surface *SmushDecoder::decodeNextFrame() {
handleFrame();
// We might be interested in getting the last frame even after the video ends:
if (endOfVideo()) {
return _videoTrack->decodeNextFrame();
}
return VideoDecoder::decodeNextFrame();
}
void SmushDecoder::setLooping(bool l) {
_videoLooping = l;
if (!_videoLooping) {
_videoTrack->setMsPerFrame(SMUSH_SPEED);
}
}
void SmushDecoder::handleFrame() {
uint32 tag;
int32 size;
int pos = 0;
if (_videoLooping && _curFrame == _nbframes - 1) {
_file->seek(_startPos, SEEK_SET);
_curFrame = -1;
if (isPaused()) {
return;
}
if (_curFrame == -1)
_startTime = g_system->getMillis();
if (_videoPause)
return;
if (endOfVideo()) { // Looping is handled outside, by rewinding the video.
_videoPause = true;
if (_videoTrack->endOfTrack()) { // Looping is handled outside, by rewinding the video.
_audioTrack->stop(); // HACK: Avoids the movie playing past the last frame
// pauseVideo(true);
return;
}
tag = _file->readUint32BE();
if (tag == MKTAG('A','N','N','O')) {
size = _file->readUint32BE();
if (tag == MKTAG('A', 'N', 'N', 'O')) {
char *anno;
byte *data;
size = _file->readUint32BE();
data = new byte[size];
_file->read(data, size);
anno = (char *)data;
@ -166,31 +280,182 @@ void SmushDecoder::handleFrame() {
}
delete[] anno;
tag = _file->readUint32BE();
size = _file->readUint32BE();
}
assert(tag == MKTAG('F','R','M','E'));
size = _file->readUint32BE();
byte *frame = new byte[size];
_file->read(frame, size);
assert(tag == MKTAG('F', 'R', 'M', 'E'));
handleFRME(_file, size);
do {
if (READ_BE_UINT32(frame + pos) == MKTAG('B','l','1','6')) {
_blocky16->decode((byte *)_surface.pixels, frame + pos + 8);
pos += READ_BE_UINT32(frame + pos + 4) + 8;
} else if (READ_BE_UINT32(frame + pos) == MKTAG('W','a','v','e')) {
int decompressed_size = READ_BE_UINT32(frame + pos + 8);
if (decompressed_size < 0)
handleWave(frame + pos + 8 + 4 + 8, READ_BE_UINT32(frame + pos + 8 + 8));
else
handleWave(frame + pos + 8 + 4, decompressed_size);
pos += READ_BE_UINT32(frame + pos + 4) + 8;
} else {
_videoTrack->finishFrame();
}
void SmushDecoder::handleFRME(Common::SeekableReadStream *stream, uint32 size) {
int blockSize = size;
byte *block = new byte[size];
stream->read(block, size);
Common::MemoryReadStream *memStream = new Common::MemoryReadStream(block, size, DisposeAfterUse::YES);
while (size > 0) {
uint32 subType = memStream->readUint32BE();
uint32 subSize = memStream->readUint32BE();
uint32 subPos = memStream->pos();
switch (subType) {
// Retail only:
case MKTAG('B', 'l', '1', '6'):
_videoTrack->handleBlocky16(memStream, subSize);
break;
case MKTAG('W', 'a', 'v', 'e'):
_audioTrack->handleVIMA(memStream, blockSize);
break;
// Demo only:
case MKTAG('F', 'O', 'B', 'J'):
_videoTrack->handleFrameObject(memStream, subSize);
break;
case MKTAG('I', 'A', 'C', 'T'):
_audioTrack->handleIACT(memStream, subSize);
break;
case MKTAG('X', 'P', 'A', 'L'):
_videoTrack->handleDeltaPalette(memStream, subSize);
break;
default:
Debug::error(Debug::Movie, "SmushDecoder::handleFrame() unknown tag");
}
} while (pos < size);
delete[] frame;
size -= subSize + 8 + (subSize & 1);
memStream->seek(subPos + subSize + (subSize & 1), SEEK_SET);
}
delete memStream;
}
++_curFrame;
bool SmushDecoder::rewind() {
return seekToFrame(0);
}
bool SmushDecoder::seek(const Audio::Timestamp &time) { // FIXME: This will be off by a second or two right now.
int32 wantedFrame = (uint32)((time.msecs() / 1000.0f) * _videoTrack->getFrameRate().toDouble());
if (wantedFrame != 0) {
warning("Seek to time: %d, frame: %d", time.msecs(), wantedFrame);
warning("Current frame: %d", _videoTrack->getCurFrame());
}
uint32 tag;
int32 size;
if (wantedFrame > _videoTrack->getFrameCount()) {
return false;
}
if (wantedFrame < _videoTrack->getCurFrame()) {
_file->seek(_startPos, SEEK_SET);
}
int curFrame = -1;
while (curFrame < wantedFrame - 1) {
tag = _file->readUint32BE();
if (tag == MKTAG('A', 'N', 'N', 'O')) {
size = _file->readUint32BE();
_file->seek(size, SEEK_CUR);
tag = _file->readUint32BE();
}
assert(tag == MKTAG('F', 'R', 'M', 'E'));
size = _file->readUint32BE();
_file->seek(size, SEEK_CUR);
curFrame++;
}
_videoTrack->setCurFrame(curFrame);
VideoDecoder::seek(time);
return true;
}
SmushDecoder::SmushVideoTrack::SmushVideoTrack(int width, int height, int fps, int numFrames, bool is16Bit) {
// Set color-format statically here for SMUSH (5650), to allow for differing
// PixelFormat in engine and renderer (and conversion from Surface there)
// Which means 16 bpp, 565, shift of 11, 5, 0, 0 for RGBA
_format = Graphics::PixelFormat(2, 5, 6, 5, 0, 11, 5, 0, 0);
if (!is16Bit) { // Demo
_blocky8 = new Blocky8();
_blocky16 = 0;
} else {
_blocky8 = 0;
_blocky16 = new Blocky16();
_blocky16->init(width, height);
}
_width = width;
_height = height;
_nbframes = numFrames;
_is16Bit = is16Bit;
_x = 0;
_y = 0;
setMsPerFrame(fps);
}
SmushDecoder::SmushVideoTrack::~SmushVideoTrack() {
delete _blocky8;
delete _blocky16;
_surface.free();
}
void SmushDecoder::SmushVideoTrack::init() {
_curFrame = -1;
if (_is16Bit) { // Retail only
_surface.create(_width, _height, _format);
}
}
void SmushDecoder::SmushVideoTrack::finishFrame() {
if (!_is16Bit) {
convertDemoFrame();
}
_curFrame++;
}
void SmushDecoder::SmushVideoTrack::convertDemoFrame() {
Graphics::Surface conversion;
conversion.create(0, 0, _format); // Avoid issues with copyFrom, by creating an empty surface.
conversion.copyFrom(_surface);
uint16 *d = (uint16 *)_surface.pixels;
for (int l = 0; l < _width * _height; l++) {
int index = ((byte *)conversion.pixels)[l];
d[l] = ((_pal[(index * 3) + 0] & 0xF8) << 8) | ((_pal[(index * 3) + 1] & 0xFC) << 3) | (_pal[(index * 3) + 2] >> 3);
}
conversion.free();
}
void SmushDecoder::SmushVideoTrack::handleBlocky16(Common::SeekableReadStream *stream, uint32 size) {
assert(_is16Bit);
byte *ptr = new byte[size];
stream->read(ptr, size);
_blocky16->decode((byte *)_surface.pixels, ptr);
delete ptr;
}
void SmushDecoder::SmushVideoTrack::handleFrameObject(Common::SeekableReadStream *stream, uint32 size) {
assert(!_is16Bit);
assert(size >= 14);
byte codec = stream->readByte();
assert(codec == 47);
/* byte codecParam = */ stream->readByte();
_x = stream->readSint16LE();
_y = stream->readSint16LE();
uint16 width = stream->readUint16LE();
uint16 height = stream->readUint16LE();
if (width != _width || height != _height) {
_width = width;
_height = height;
_surface.create(_width, _height, _format);
_blocky8->init(_width, _height);
}
stream->readUint16LE();
stream->readUint16LE();
size -= 14;
byte *ptr = new byte[size];
stream->read(ptr, size);
_blocky8->decode((byte *)_surface.pixels, ptr);
delete ptr;
}
static byte delta_color(byte org_color, int16 delta_color) {
@ -198,20 +463,77 @@ static byte delta_color(byte org_color, int16 delta_color) {
return CLIP(t, 0, 255);
}
void SmushDecoder::handleDeltaPalette(byte *src, int32 size) {
void SmushDecoder::SmushVideoTrack::handleDeltaPalette(Common::SeekableReadStream *stream, int32 size) {
if (size == 0x300 * 3 + 4) {
for (int i = 0; i < 0x300; i++)
_deltaPal[i] = READ_LE_UINT16(src + (i * 2) + 4);
memcpy(_pal, src + 0x600 + 4, 0x300);
stream->seek(4, SEEK_CUR);
for (int i = 0; i < 0x300; i++) {
_deltaPal[i] = stream->readUint16LE();
}
stream->read(_pal, 0x300);
} else if (size == 6) {
for (int i = 0; i < 0x300; i++)
for (int i = 0; i < 0x300; i++) {
_pal[i] = delta_color(_pal[i], _deltaPal[i]);
}
} else {
error("SmushDecoder::handleDeltaPalette() Wrong size for DeltaPalette");
}
}
void SmushDecoder::handleIACT(const byte *src, int32 size) {
Graphics::Surface *SmushDecoder::SmushVideoTrack::decodeNextFrame() {
return &_surface;
}
void SmushDecoder::SmushVideoTrack::setMsPerFrame(int ms) {
_frameRate = Common::Rational(1000000, ms);
}
SmushDecoder::SmushAudioTrack::SmushAudioTrack(bool isVima, int freq, int channels) {
_isVima = isVima;
_channels = channels;
_freq = freq;
_queueStream = Audio::makeQueuingAudioStream(_freq, (_channels == 2));
}
SmushDecoder::SmushAudioTrack::~SmushAudioTrack() {
}
void SmushDecoder::SmushAudioTrack::init() {
_IACTpos = 0;
if (_isVima) {
vimaInit(smushDestTable);
}
}
void SmushDecoder::SmushAudioTrack::handleVIMA(Common::SeekableReadStream *stream, uint32 size) {
int decompressedSize = stream->readUint32BE();
if (decompressedSize < 0) {
stream->readUint32BE();
decompressedSize = stream->readUint32BE();
}
byte *src = new byte[size];
stream->read(src, size);
int16 *dst = new int16[decompressedSize * _channels];
decompressVima(src, dst, decompressedSize * _channels * 2, smushDestTable);
int flags = Audio::FLAG_16BITS;
if (_channels == 2) {
flags |= Audio::FLAG_STEREO;
}
if (!_queueStream) {
_queueStream = Audio::makeQueuingAudioStream(_freq, (_channels == 2));
}
_queueStream->queueBuffer((byte *)dst, decompressedSize * _channels * 2, DisposeAfterUse::YES, flags);
delete src;
}
void SmushDecoder::SmushAudioTrack::handleIACT(Common::SeekableReadStream *stream, int32 size) {
byte *src = new byte[size];
stream->read(src, size);
int32 bsize = size - 18;
const byte *d_src = src + 18;
@ -255,11 +577,10 @@ void SmushDecoder::handleIACT(const byte *src, int32 size) {
}
} while (--count);
if (!_stream) {
_stream = Audio::makeQueuingAudioStream(22050, true);
g_system->getMixer()->playStream(Audio::Mixer::kSFXSoundType, &_soundHandle, _stream);
if (!_queueStream) {
_queueStream = Audio::makeQueuingAudioStream(22050, true);
}
_stream->queueBuffer(output_data, 0x1000, DisposeAfterUse::YES, Audio::FLAG_STEREO | Audio::FLAG_16BITS);
_queueStream->queueBuffer(output_data, 0x1000, DisposeAfterUse::YES, Audio::FLAG_STEREO | Audio::FLAG_16BITS);
bsize -= len;
d_src += len;
@ -276,291 +597,12 @@ void SmushDecoder::handleIACT(const byte *src, int32 size) {
bsize--;
}
}
delete src;
}
void SmushDecoder::handleFrameDemo() {
uint32 tag;
int32 size;
int pos = 0;
if (_videoPause)
return;
if (endOfVideo()) {
_videoPause = true;
return;
}
if (_curFrame == -1)
_startTime = g_system->getMillis();
tag = _file->readUint32BE();
assert(tag == MKTAG('F','R','M','E'));
size = _file->readUint32BE();
byte *frame = new byte[size];
_file->read(frame, size);
do {
if (READ_BE_UINT32(frame + pos) == MKTAG('F','O','B','J')) {
_x = READ_LE_UINT16(frame + pos + 10);
_y = READ_LE_UINT16(frame + pos + 12);
int width = READ_LE_UINT16(frame + pos + 14);
int height = READ_LE_UINT16(frame + pos + 16);
if (width != _width || height != _height) {
_width = width;
_height = height;
_surface.create(_width, _height, _format);
_blocky8->init(_width, _height);
}
_blocky8->decode((byte *)_surface.pixels, frame + pos + 8 + 14);
pos += READ_BE_UINT32(frame + pos + 4) + 8;
} else if (READ_BE_UINT32(frame + pos) == MKTAG('I','A','C','T')) {
handleIACT(frame + pos + 8, READ_BE_UINT32(frame + pos + 4));
int offset = READ_BE_UINT32(frame + pos + 4) + 8;
if (offset & 1)
offset += 1;
pos += offset;
} else if (READ_BE_UINT32(frame + pos) == MKTAG('X','P','A','L')) {
handleDeltaPalette(frame + pos + 8, READ_BE_UINT32(frame + pos + 4));
pos += READ_BE_UINT32(frame + pos + 4) + 8;
} else {
error("SmushDecoder::handleFrame() unknown tag");
}
} while (pos < size);
delete[] frame;
Graphics::Surface conversion;
conversion.create(0, 0, _format); // Avoid issues with copyFrom, by creating an empty surface.
conversion.copyFrom(_surface);
uint16 *d = (uint16 *)_surface.pixels;
for (int l = 0; l < _width * _height; l++) {
int index = ((byte *)conversion.pixels)[l];
d[l] = ((_pal[(index * 3) + 0] & 0xF8) << 8) | ((_pal[(index * 3) + 1] & 0xFC) << 3) | (_pal[(index * 3) + 2] >> 3);
}
conversion.free();
_curFrame++;
}
void SmushDecoder::handleFramesHeader() {
uint32 tag;
int32 size;
int pos = 0;
tag = _file->readUint32BE();
assert(tag == MKTAG('F','L','H','D'));
size = _file->readUint32BE();
byte *f_header = new byte[size];
_file->read(f_header, size);
do {
if (READ_BE_UINT32(f_header + pos) == MKTAG('B','l','1','6')) {
pos += READ_BE_UINT32(f_header + pos + 4) + 8;
} else if (READ_BE_UINT32(f_header + pos) == MKTAG('W','a','v','e')) {
_freq = READ_LE_UINT32(f_header + pos + 8);
_channels = READ_LE_UINT32(f_header + pos + 12);
pos += 20;
} else {
error("SmushDecoder::handleFramesHeader() unknown tag");
}
} while (pos < size);
delete[] f_header;
}
bool SmushDecoder::setupAnimDemo() {
uint32 tag;
tag = _file->readUint32BE();
assert(tag == MKTAG('A','N','I','M'));
_file->readUint32BE();
tag = _file->readUint32BE();
assert(tag == MKTAG('A','H','D','R'));
_file->readUint32BE();
_file->readUint16BE(); // version
_nbframes = _file->readUint16LE();
_file->readUint16BE(); // unknown
for (int l = 0; l < 0x300; l++) {
_pal[l] = _file->readByte();
}
_file->readUint32BE();
_file->readUint32BE();
_file->readUint32BE();
_file->readUint32BE();
_file->readUint32BE();
_x = -1;
_y = -1;
_width = -1;
_height = -1;
_videoLooping = false;
_startPos = 0;
setMsPerFrame(SMUSH_SPEED);
bool SmushDecoder::SmushAudioTrack::seek(const Audio::Timestamp &time) {
return true;
}
bool SmushDecoder::setupAnim() {
uint32 tag;
int32 size;
int16 flags;
if (!_file)
return false;
tag = _file->readUint32BE();
assert(tag == MKTAG('S','A','N','M'));
size = _file->readUint32BE();
tag = _file->readUint32BE();
assert(tag == MKTAG('S','H','D','R'));
size = _file->readUint32BE();
byte *s_header = new byte[size];
_file->read(s_header, size);
_nbframes = READ_LE_UINT32(s_header + 2);
int width = READ_LE_UINT16(s_header + 8);
int height = READ_LE_UINT16(s_header + 10);
if (_width != width || _height != height) {
_blocky16->init(width, height);
}
_width = width;
_height = height;
// If the video is NOT looping, setLooping will set the speed to the proper value
setMsPerFrame(READ_LE_UINT32(s_header + 14));
flags = READ_LE_UINT16(s_header + 18);
// Output information for checking out the flags
if (Debug::isChannelEnabled(Debug::Movie | Debug::Info)) {
warning("SMUSH Flags:");
for (int i = 0; i < 16; i++)
warning(" %d", (flags & (1 << i)) != 0);
//printf("\n");
}
_videoLooping = true;
delete[] s_header;
return true;
}
bool SmushDecoder::loadStream(Common::SeekableReadStream *stream) {
close();
_file = stream;
// Load the video
if (_demo) {
if (!setupAnimDemo())
return false;
} else {
if (!setupAnim())
return false;
handleFramesHeader();
}
_startPos = _file->pos();
init();
if (!_demo)
_surface.create(_width, _height, _format);
return true;
}
const Graphics::Surface *SmushDecoder::decodeNextFrame() {
if (_demo)
handleFrameDemo();
else
handleFrame();
return &_surface;
}
void SmushDecoder::setLooping(bool l) {
_videoLooping = l;
if (!_videoLooping)
setMsPerFrame(SMUSH_SPEED);
}
void SmushDecoder::pauseVideoIntern(bool p) {
g_system->getMixer()->pauseHandle(_soundHandle, p);
}
uint32 SmushDecoder::getFrameCount() const {
return _nbframes;
}
void SmushDecoder::setMsPerFrame(int ms) {
_frameRate = Common::Rational(1000000, ms);
}
void SmushDecoder::seekToTime(const Audio::Timestamp &time) { // FIXME: This will be off by a second or two right now.
int32 wantedFrame = (uint32) ((time.msecs() / 1000.0f) * getFrameRate().toDouble());
warning("Seek to time: %d, frame: %d", time.msecs(), wantedFrame);
warning("Current frame: %d", _curFrame);
uint32 tag;
int32 size;
if (_stream) {
_stream->finish();
_stream = NULL;
}
if (wantedFrame > _nbframes)
return;
if (wantedFrame < _curFrame) {
_file->seek(_startPos, SEEK_SET);
}
_videoPause = true;
_startTime = g_system->getMillis() - time.msecs(); // This won't be correct, as we should round off to the frame-start.
while(_curFrame < wantedFrame) {
tag = _file->readUint32BE();
if (tag == MKTAG('A','N','N','O')) {
size = _file->readUint32BE();
_file->seek(size, SEEK_CUR);
tag = _file->readUint32BE();
}
assert(tag == MKTAG('F','R','M','E'));
size = _file->readUint32BE();
_file->seek(size, SEEK_CUR);
_curFrame++;
}
warning("Seek complete");
_videoPause = false;
}
uint32 SmushDecoder::getDuration() const {
return (uint32) (getFrameCount() / getFrameRate().toDouble());
}
uint32 SmushDecoder::getTimeToNextFrame() const {
if (endOfVideo()) { //handle looping
uint32 elapsedTime = getTime();
Common::Rational beginTime = (_curFrame + 1) * 1000;
beginTime /= getFrameRate();
uint32 nextFrameStartTime = beginTime.toInt();
// If the time that the next frame should be shown has past
// the frame should be shown ASAP.
if (nextFrameStartTime <= elapsedTime)
return 0;
return nextFrameStartTime - elapsedTime;
} else {
return FixedRateVideoDecoder::getTimeToNextFrame();
}
}
} // end of namespace Grim

View File

@ -23,16 +23,14 @@
#ifndef GRIM_SMUSH_DECODER_H
#define GRIM_SMUSH_DECODER_H
#include "common/rational.h"
#include "audio/mixer.h"
#include "audio/audiostream.h"
#include "video/video_decoder.h"
#include "graphics/surface.h"
namespace Audio {
class QueuingAudioStream;
class QueuingAudioStream;
}
namespace Grim {
@ -40,77 +38,100 @@ namespace Grim {
class Blocky8;
class Blocky16;
class SmushDecoder : public virtual Video::SeekableVideoDecoder, public virtual Video::FixedRateVideoDecoder {
private:
int32 _nbframes;
int _width, _height;
int _x, _y;
Blocky8 *_blocky8;
Blocky16 *_blocky16;
Common::SeekableReadStream *_file;
Common::Rational _frameRate;
Graphics::Surface _surface;
Graphics::PixelFormat _format;
byte _pal[0x300];
int16 _deltaPal[0x300];
byte _IACToutput[4096];
int32 _IACTpos;
Audio::SoundHandle _soundHandle;
Audio::QueuingAudioStream *_stream;
uint32 _startPos;
int _channels;
int _freq;
bool _videoPause;
bool _videoLooping;
bool _demo;
class SmushDecoder : public Video::VideoDecoder {
public:
SmushDecoder();
~SmushDecoder();
int getX() { return _x; }
int getY() { return _y; }
int getX() const { return _videoTrack->_x; }
int getY() const { return _videoTrack->_y; }
void setLooping(bool l);
void setDemo(bool demo) { _demo = demo; }
uint16 getWidth() const { return _width; }
uint16 getHeight() const { return _height; }
Graphics::PixelFormat getPixelFormat() const { return _surface.format; }
bool isVideoLoaded() const { return _file != 0; }
bool isRewindable() const { return true; }
bool isSeekable() const { return true; }
bool rewind();
bool seek(const Audio::Timestamp &time);
bool loadStream(Common::SeekableReadStream *stream);
const Graphics::Surface *decodeNextFrame();
uint32 getFrameCount() const;
void close();
// Seekable
void seekToTime(const Audio::Timestamp &time);
uint32 getDuration() const;
uint32 getTimeToNextFrame() const;
private:
void pauseVideoIntern(bool p);
void parseNextFrame();
void init();
void handleDeltaPalette(byte *src, int32 size);
void handleFramesHeader();
protected:
bool readHeader();
void handleFrameDemo();
void handleFrame();
void handleBlocky16(byte *src);
void handleWave(const byte *src, uint32 size);
void handleIACT(const byte *src, int32 size);
bool setupAnim();
bool setupAnimDemo();
void setMsPerFrame(int ms);
protected:
// Fixed Rate:
Common::Rational getFrameRate() const { return _frameRate; }
bool handleFramesHeader();
void handleFRME(Common::SeekableReadStream *stream, uint32 size);
void init();
void close();
const Graphics::Surface *decodeNextFrame();
class SmushVideoTrack : public FixedRateVideoTrack {
public:
SmushVideoTrack(int width, int height, int fps, int numFrames, bool is16Bit);
~SmushVideoTrack();
uint16 getWidth() const { return _width; }
uint16 getHeight() const { return _height; }
Graphics::PixelFormat getPixelFormat() const { return _format; }
int getCurFrame() const { return _curFrame; }
void setCurFrame(int frame) { _curFrame = frame; }
int getFrameCount() const { return _nbframes; }
Common::Rational getFrameRate() const { return _frameRate; }
void setMsPerFrame(int ms);
void finishFrame();
bool isSeekable() const { return true; }
bool seek(const Audio::Timestamp &time) { return true; }
void handleBlocky16(Common::SeekableReadStream *stream, uint32 size);
void handleFrameObject(Common::SeekableReadStream *stream, uint32 size);
void handleDeltaPalette(Common::SeekableReadStream *stream, int32 size);
void init();
Graphics::Surface *decodeNextFrame();
byte *getPal() { return _pal; }
int _x, _y;
private:
void convertDemoFrame();
bool _is16Bit;
int32 _curFrame;
byte _pal[0x300];
int16 _deltaPal[0x300];
int _width, _height;
Graphics::Surface _surface;
Graphics::PixelFormat _format;
Common::Rational _frameRate;
Blocky8 *_blocky8;
Blocky16 *_blocky16;
int32 _nbframes;
};
class SmushAudioTrack : public AudioTrack {
public:
SmushAudioTrack(bool isVima, int freq = 22050, int channels = -1);
~SmushAudioTrack();
Audio::AudioStream *getAudioStream() const { return _queueStream; }
bool isSeekable() const { return true; }
bool seek(const Audio::Timestamp &time);
void handleVIMA(Common::SeekableReadStream *stream, uint32 size);
void handleIACT(Common::SeekableReadStream *stream, int32 size);
void init();
private:
bool _isVima;
byte _IACToutput[4096];
int32 _IACTpos;
int _channels;
int _freq;
Audio::QueuingAudioStream *_queueStream;
};
private:
SmushAudioTrack *_audioTrack;
SmushVideoTrack *_videoTrack;
Common::SeekableReadStream *_file;
uint32 _startPos;
bool _videoPause;
bool _videoLooping;
static bool _demo;
};
} // end of namespace Grim

View File

@ -86,8 +86,9 @@ bool MoviePlayer::prepareFrame() {
_videoFinished = true;
}
if (_videoPause)
if (_videoPause) {
return false;
}
if (_videoFinished) {
if (g_grim->getMode() == GrimEngine::SmushMode) {
@ -101,9 +102,10 @@ bool MoviePlayer::prepareFrame() {
return false;
handleFrame();
_internalSurface = _videoDecoder->decodeNextFrame();
_updateNeeded = true;
if (_frame != _videoDecoder->getCurFrame()) {
_updateNeeded = true;
}
_movieTime = _videoDecoder->getTime();
_frame = _videoDecoder->getCurFrame();
@ -147,7 +149,7 @@ void MoviePlayer::deinit() {
_videoFinished = true;
}
bool MoviePlayer::play(Common::String filename, bool looping, int x, int y) {
bool MoviePlayer::play(Common::String filename, bool looping, int x, int y, bool start) {
Common::StackLock lock(_frameMutex);
deinit();
_x = x;
@ -163,8 +165,12 @@ bool MoviePlayer::play(Common::String filename, bool looping, int x, int y) {
init();
_internalSurface = NULL;
// Get the first frame immediately
timerCallback(this);
if (start) {
_videoDecoder->start();
// Get the first frame immediately
timerCallback(this);
}
return true;
}
@ -203,7 +209,7 @@ void MoviePlayer::restoreState(SaveGame *state) {
int y = state->readLESint32();
if (!videoFinished && !_fname.empty()) {
play(_fname.c_str(), videoLooping, x, y);
play(_fname.c_str(), videoLooping, x, y, false);
}
_frame = frame;
_movieTime = movieTime;
@ -215,6 +221,11 @@ void MoviePlayer::restoreState(SaveGame *state) {
#define NEED_NULLPLAYER
#endif
// Temporary fix while reworking codecs:
#ifndef NEED_NULLPLAYER
#define NEED_NULLPLAYER
#endif
// Fallback for when USE_MPEG2 / USE_BINK isnt defined
#ifdef NEED_NULLPLAYER
@ -225,7 +236,7 @@ public:
_videoFinished = true; // Rigs all movies to be completed.
}
~NullPlayer() {}
bool play(Common::String filename, bool looping, int x, int y) {return true;}
bool play(Common::String filename, bool looping, int x, int y, bool start = true) { return true; }
bool loadFile(Common::String filename) { return true; }
void stop() {}
void pause(bool p) {}

View File

@ -66,7 +66,7 @@ public:
* @see init
* @see stop
*/
virtual bool play(Common::String filename, bool looping, int x, int y);
virtual bool play(Common::String filename, bool looping, int x, int y, bool start = true);
virtual void stop();
virtual void pause(bool p);
virtual bool isPlaying() { return !_videoFinished; }

View File

@ -20,28 +20,14 @@
*
*/
#include "common/endian.h"
#include "common/timer.h"
#include "common/file.h"
#include "common/events.h"
#include "common/system.h"
#include "common/textconsole.h"
#include "audio/audiostream.h"
#include "audio/mixer.h"
#include "audio/decoders/raw.h"
#include "graphics/surface.h"
#include "video/mpegps_decoder.h"
#include "engines/grim/movie/mpeg.h"
#include "engines/grim/debug.h"
#include "engines/grim/grim.h"
#ifdef USE_MPEG2
#define MWIDTH 640
#define MHEIGHT 400
#ifdef USE_MPEG2
namespace Grim {
@ -49,71 +35,23 @@ MoviePlayer *CreateMpegPlayer() {
return new MpegPlayer();
}
class MpegHandler : public Video::BaseAnimationState {
public:
MpegHandler(MpegPlayer *vid, OSystem *sys, int width, int height) : BaseAnimationState(sys, width, height) {
_mpeg = vid;
}
protected:
MpegPlayer *_mpeg;
virtual void drawYUV(int width, int height, byte *const *dat) {
plotYUV(MWIDTH, MHEIGHT, dat);
_mpeg->deliverFrameFromDecode(width, height, _overlay);
}
};
MpegPlayer::MpegPlayer() : MoviePlayer() {
g_movie = this;
_speed = 50;
_videoBase = new MpegHandler(this, g_system, MWIDTH, MHEIGHT);
}
void MpegPlayer::init() {
MoviePlayer::init();
// FIXME, deal with pixelformat differently when we get this properly tested.
Graphics::PixelFormat format = Graphics::PixelFormat(16, 5, 6, 5, 0, 11, 5, 0, 0);
_externalSurface->create(MWIDTH, MHEIGHT, format);
g_system->getTimerManager()->installTimerProc(&timerCallback, _speed, this, "mpeg loop");
}
void MpegPlayer::deinit() {
g_system->getTimerManager()->removeTimerProc(&timerCallback);
if (_stream) {
_stream->finish();
_stream = NULL;
g_system->getMixer()->stopHandle(_soundHandle);
}
_videoLooping = false;
_videoPause = true;
}
void MpegPlayer::handleFrame() {
if (_videoPause)
return;
if (!_videoBase->decodeFrame()) {
_videoFinished = true;
g_grim->setMode(GrimEngine::NormalMode);
return;
}
//else
//bas->updateScreen();
}
void MpegPlayer::deliverFrameFromDecode(int width, int height, uint16 *dat) {
memcpy(_externalSurface->pixels, dat, _externalSurface->w * _externalSurface->h * 2);
_frame++;
_updateNeeded = true;
_videoDecoder = new Video::MPEGPSDecoder();
}
bool MpegPlayer::loadFile(Common::String filename) {
_videoBase->init(_fname.c_str());
return true; // FIXME
}
_fname = filename + ".pss";
Common::SeekableReadStream *stream = SearchMan.createReadStreamForMember(_fname);
if (!stream)
return false;
_videoDecoder->loadStream(stream);
_videoDecoder->start();
return true;
}
} // end of namespace Grim
#endif // USE_MPEG2

View File

@ -23,39 +23,20 @@
#ifndef GRIM_MPEG_PLAYER_H
#define GRIM_MPEG_PLAYER_H
#include "common/scummsys.h"
#include "common/file.h"
#include "graphics/pixelformat.h"
#include "audio/mixer.h"
#include "audio/audiostream.h"
#include "video/mpeg_player.h"
#include "engines/grim/movie/movie.h"
#ifdef USE_MPEG2
namespace Grim {
class MpegPlayer : public MoviePlayer {
private:
Video::BaseAnimationState *_videoBase;
Audio::SoundHandle _soundHandle;
Audio::QueuingAudioStream *_stream;
int _speed; // <- Quickfix to fix compile, verify when fixing the decoder properly.
public:
MpegPlayer();
void deliverFrameFromDecode(int width, int height, uint16 *dat);
private:
void handleFrame();
void init();
void deinit();
bool loadFile(Common::String filename);
};
class MpegPlayer : public MoviePlayer {
public:
MpegPlayer();
private:
bool loadFile(Common::String filename);
bool _demo;
};
} // end of namespace Grim
#endif // USE_MPEG2

View File

@ -22,7 +22,6 @@
#include "engines/grim/movie/codecs/smush_decoder.h"
#include "engines/grim/movie/smush.h"
#include "engines/grim/movie/codecs/vima.h"
#include "engines/grim/resource.h"
#include "engines/grim/grim.h"
@ -36,7 +35,7 @@ MoviePlayer *CreateSmushPlayer(bool demo) {
SmushPlayer::SmushPlayer(bool demo) : MoviePlayer(), _demo(demo) {
_smushDecoder = new SmushDecoder();
_videoDecoder = _smushDecoder;
_smushDecoder->setDemo(_demo);
//_smushDecoder->setDemo(_demo);
}
bool SmushPlayer::loadFile(Common::String filename) {
@ -57,7 +56,8 @@ void SmushPlayer::init() {
}
void SmushPlayer::handleFrame() {
if (_videoDecoder->endOfVideo()) {
// Force the last frame to stay in place for it's duration:
if (_videoDecoder->endOfVideo() && _videoDecoder->getTime() >= _videoDecoder->getDuration().msecs()) {
// If we're not supposed to loop (or looping fails) then end the video
if (!_videoLooping ) {
_videoFinished = true;
@ -65,7 +65,8 @@ void SmushPlayer::handleFrame() {
deinit();
return;
} else {
// getDecoder()->rewind(); // This doesnt handle if looping fails.
_smushDecoder->rewind(); // This doesnt handle if looping fails.
_smushDecoder->start();
}
}
}
@ -79,8 +80,11 @@ void SmushPlayer::postHandleFrame() {
void SmushPlayer::restoreState(SaveGame *state) {
MoviePlayer::restoreState(state);
Common::StackLock lock(_frameMutex);
if (isPlaying()) {
_smushDecoder->seekToTime((uint32)_movieTime); // Currently not fully working (out of synch)
// _smushDecoder->seek((uint32)_movieTime); // Currently not fully working (out of synch)
_smushDecoder->start();
timerCallback(this);
}
}

View File

@ -282,7 +282,9 @@ DragItem::DragItem(Myst3Engine *vm, uint id):
// Load the movie
_movieStream = movieDesc->getData();
_bink.loadStream(_movieStream, Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.loadStream(_movieStream);
_bink.start();
const Graphics::Surface *frame = _bink.decodeNextFrame();
_texture = _vm->_gfx->createTexture(frame);

View File

@ -29,7 +29,7 @@
#include "engines/myst3/gfx.h"
#include "video/bink_decoder_seek.h"
#include "video/bink_decoder.h"
namespace Myst3 {
@ -97,7 +97,7 @@ private:
Myst3Engine *_vm;
Common::MemoryReadStream *_movieStream;
Video::SeekableBinkDecoder _bink;
Video::BinkDecoder _bink;
uint16 _frame;
Texture *_texture;

View File

@ -170,7 +170,9 @@ Dialog::Dialog(Myst3Engine *vm, uint id):
// Load the movie
_movieStream = movieDesc->getData();
_bink.loadStream(_movieStream, Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.loadStream(_movieStream);
_bink.start();
const Graphics::Surface *frame = _bink.decodeNextFrame();
_texture = _vm->_gfx->createTexture(frame);

View File

@ -31,7 +31,7 @@
#include "common/savefile.h"
#include "common/str-array.h"
#include "video/bink_decoder_seek.h"
#include "video/bink_decoder.h"
namespace Myst3 {
@ -94,7 +94,7 @@ private:
Myst3Engine *_vm;
Common::MemoryReadStream *_movieStream;
Video::SeekableBinkDecoder _bink;
Video::BinkDecoder _bink;
uint16 _previousframe;
uint16 _frameToDisplay;

View File

@ -60,13 +60,14 @@ Movie::Movie(Myst3Engine *vm, uint16 id) :
loadPosition(binkDesc->getVideoData());
Common::MemoryReadStream *binkStream = binkDesc->getData();
_bink.loadStream(binkStream, Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
_bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
uint language = ConfMan.getInt("audio_language");
_bink.loadStream(binkStream);
_bink.setAudioTrack(language);
_bink.start();
if (ConfMan.getBool("subtitles"))
_subtitles = Subtitles::create(_vm, id);
uint language = ConfMan.getInt("audio_language");
_bink.setAudioTrack(language);
}
void Movie::loadPosition(const VideoData &videoData) {
@ -146,10 +147,12 @@ void Movie::drawOverlay() {
void Movie::drawNextFrameToTexture() {
const Graphics::Surface *frame = _bink.decodeNextFrame();
if (_texture)
_texture->update(frame);
else
_texture = _vm->_gfx->createTexture(frame);
if (frame) {
if (_texture)
_texture->update(frame);
else
_texture = _vm->_gfx->createTexture(frame);
}
}
Movie::~Movie() {
@ -227,6 +230,7 @@ void ScriptedMovie::update() {
|| _bink.getCurFrame() < _startFrame
|| _bink.endOfVideo()) {
_bink.seekToFrame(_startFrame);
_isLastFrame = false;
}
if (!_scriptDriven)
@ -243,10 +247,15 @@ void ScriptedMovie::update() {
if (_nextFrameReadVar) {
int32 nextFrame = _vm->_state->getVar(_nextFrameReadVar);
if (nextFrame > 0 && nextFrame <= (int32)_bink.getFrameCount()) {
// Are we changing frame?
if (_bink.getCurFrame() != nextFrame - 1) {
_bink.seekToFrame(nextFrame - 1);
// Don't seek if we just want to display the next frame
if (_bink.getCurFrame() + 1 != nextFrame - 1) {
_bink.seekToFrame(nextFrame - 1);
}
drawNextFrameToTexture();
}
_vm->_state->setVar(_nextFrameReadVar, 0);
_isLastFrame = false;
}
@ -257,7 +266,7 @@ void ScriptedMovie::update() {
bool complete = false;
if (_isLastFrame) {
_isLastFrame = 0;
_isLastFrame = false;
if (_loop) {
_bink.seekToFrame(_startFrame);

View File

@ -27,7 +27,7 @@
#include "engines/myst3/node.h"
#include "math/vector3d.h"
#include "video/bink_decoder_seek.h"
#include "video/bink_decoder.h"
namespace Myst3 {
@ -68,7 +68,7 @@ protected:
int32 _posU;
int32 _posV;
Video::SeekableBinkDecoder _bink;
Video::BinkDecoder _bink;
Texture *_texture;
int32 _startFrame;

View File

@ -1545,8 +1545,10 @@ void Puzzles::projectorLoadBitmap(uint16 bitmap) {
// Rebuild the complete background image from the frames of the bink movie
Common::MemoryReadStream *movieStream = movieDesc->getData();
Video::SeekableBinkDecoder bink;
bink.loadStream(movieStream, Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
Video::BinkDecoder bink;
bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
bink.loadStream(movieStream);
bink.start();
for (uint i = 0; i < 1024; i += 256)
for (uint j = 0; j < 1024; j += 256) {
@ -1569,8 +1571,10 @@ void Puzzles::projectorAddSpotItem(uint16 bitmap, uint16 x, uint16 y) {
// Rebuild the complete background image from the frames of the bink movie
Common::MemoryReadStream *movieStream = movieDesc->getData();
Video::SeekableBinkDecoder bink;
bink.loadStream(movieStream, Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
Video::BinkDecoder bink;
bink.setDefaultHighColorFormat(Graphics::PixelFormat(4, 8, 8, 8, 8, 0, 8, 16, 24));
bink.loadStream(movieStream);
bink.start();
const Graphics::Surface *frame = bink.decodeNextFrame();
copySurfaceRect(_vm->_projectorBackground, Common::Point(x, y), frame);

View File

@ -25,6 +25,7 @@
#include "engines/myst3/sound.h"
#include "engines/myst3/state.h"
#include "audio/audiostream.h"
#include "audio/decoders/mp3.h"
#include "audio/decoders/wave.h"

View File

@ -83,143 +83,138 @@
// BASIS, AND BROWN UNIVERSITY HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
// SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#include "common/scummsys.h"
#include "common/singleton.h"
#include "common/textconsole.h"
#include "graphics/surface.h"
#include "graphics/yuva_to_rgba.h"
namespace Common {
DECLARE_SINGLETON(Graphics::YUVAToRGBAManager);
}
namespace Graphics {
class YUVAToRGBALookup {
public:
YUVAToRGBALookup(Graphics::PixelFormat format);
~YUVAToRGBALookup();
YUVAToRGBALookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale);
int16 *_colorTab;
uint32 *_rgbToPix;
uint32 *_alphaToPix;
Graphics::PixelFormat getFormat() const { return _format; }
YUVAToRGBAManager::LuminanceScale getScale() const { return _scale; }
const uint32 *getRGBToPix() const { return _rgbToPix; }
const uint32 *getAlphaToPix() const { return _alphaToPix; }
private:
Graphics::PixelFormat _format;
YUVAToRGBAManager::LuminanceScale _scale;
uint32 _rgbToPix[3 * 768]; // 9216 bytes
uint32 _alphaToPix[256]; // 958 bytes
};
YUVAToRGBALookup::YUVAToRGBALookup(Graphics::PixelFormat format) {
_colorTab = new int16[4 * 256]; // 2048 bytes
YUVAToRGBALookup::YUVAToRGBALookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale) {
_format = format;
_scale = scale;
uint32 *r_2_pix_alloc = &_rgbToPix[0 * 768];
uint32 *g_2_pix_alloc = &_rgbToPix[1 * 768];
uint32 *b_2_pix_alloc = &_rgbToPix[2 * 768];
if (scale == YUVAToRGBAManager::kScaleFull) {
// Set up entries 0-255 in rgb-to-pixel value tables.
for (int i = 0; i < 256; i++) {
r_2_pix_alloc[i + 256] = format.ARGBToColor(0, i, 0, 0);
g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, i, 0);
b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, i);
}
// Spread out the values we have to the rest of the array so that we do
// not need to check for overflow.
for (int i = 0; i < 256; i++) {
r_2_pix_alloc[i] = r_2_pix_alloc[256];
r_2_pix_alloc[i + 512] = r_2_pix_alloc[511];
g_2_pix_alloc[i] = g_2_pix_alloc[256];
g_2_pix_alloc[i + 512] = g_2_pix_alloc[511];
b_2_pix_alloc[i] = b_2_pix_alloc[256];
b_2_pix_alloc[i + 512] = b_2_pix_alloc[511];
}
} else {
// Set up entries 16-235 in rgb-to-pixel value tables
for (int i = 16; i < 236; i++) {
int scaledValue = (i - 16) * 255 / 219;
r_2_pix_alloc[i + 256] = format.ARGBToColor(0, scaledValue, 0, 0);
g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, scaledValue, 0);
b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, scaledValue);
}
// Spread out the values we have to the rest of the array so that we do
// not need to check for overflow. We have to do it here in two steps.
for (int i = 0; i < 256 + 16; i++) {
r_2_pix_alloc[i] = r_2_pix_alloc[256 + 16];
g_2_pix_alloc[i] = g_2_pix_alloc[256 + 16];
b_2_pix_alloc[i] = b_2_pix_alloc[256 + 16];
}
for (int i = 256 + 236; i < 768; i++) {
r_2_pix_alloc[i] = r_2_pix_alloc[256 + 236 - 1];
g_2_pix_alloc[i] = g_2_pix_alloc[256 + 236 - 1];
b_2_pix_alloc[i] = b_2_pix_alloc[256 + 236 - 1];
}
}
// Set up entries 0-255 in alpha-to-pixel value table.
for (int i = 0; i < 256; i++) {
_alphaToPix[i] = format.ARGBToColor(i, 0, 0, 0);
}
}
YUVAToRGBAManager::YUVAToRGBAManager() {
_lookup = 0;
int16 *Cr_r_tab = &_colorTab[0 * 256];
int16 *Cr_g_tab = &_colorTab[1 * 256];
int16 *Cb_g_tab = &_colorTab[2 * 256];
int16 *Cb_b_tab = &_colorTab[3 * 256];
_rgbToPix = new uint32[3 * 768]; // 9216 bytes
uint32 *r_2_pix_alloc = &_rgbToPix[0 * 768];
uint32 *g_2_pix_alloc = &_rgbToPix[1 * 768];
uint32 *b_2_pix_alloc = &_rgbToPix[2 * 768];
_alphaToPix = new uint32[256]; // 958 bytes
int16 CR, CB;
int i;
// Generate the tables for the display surface
for (i = 0; i < 256; i++) {
for (int i = 0; i < 256; i++) {
// Gamma correction (luminescence table) and chroma correction
// would be done here. See the Berkeley mpeg_play sources.
CR = CB = (i - 128);
int16 CR = (i - 128), CB = CR;
Cr_r_tab[i] = (int16) ( (0.419 / 0.299) * CR) + 0 * 768 + 256;
Cr_g_tab[i] = (int16) (-(0.299 / 0.419) * CR) + 1 * 768 + 256;
Cb_g_tab[i] = (int16) (-(0.114 / 0.331) * CB);
Cb_b_tab[i] = (int16) ( (0.587 / 0.331) * CB) + 2 * 768 + 256;
}
// Set up entries 0-255 in rgb-to-pixel value tables.
for (i = 0; i < 256; i++) {
r_2_pix_alloc[i + 256] = format.ARGBToColor(0, i, 0, 0);
g_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, i, 0);
b_2_pix_alloc[i + 256] = format.ARGBToColor(0, 0, 0, i);
}
// Set up entries 0-255 in alpha-to-pixel value table.
for (i = 0; i < 256; i++) {
_alphaToPix[i] = format.ARGBToColor(i, 0, 0, 0);
}
// Spread out the values we have to the rest of the array so that we do
// not need to check for overflow.
for (i = 0; i < 256; i++) {
r_2_pix_alloc[i] = r_2_pix_alloc[256];
r_2_pix_alloc[i + 512] = r_2_pix_alloc[511];
g_2_pix_alloc[i] = g_2_pix_alloc[256];
g_2_pix_alloc[i + 512] = g_2_pix_alloc[511];
b_2_pix_alloc[i] = b_2_pix_alloc[256];
b_2_pix_alloc[i + 512] = b_2_pix_alloc[511];
}
}
YUVAToRGBALookup::~YUVAToRGBALookup() {
delete[] _rgbToPix;
delete[] _colorTab;
delete[] _alphaToPix;
}
class YUVAToRGBAManager : public Common::Singleton<YUVAToRGBAManager> {
public:
const YUVAToRGBALookup *getLookup(Graphics::PixelFormat format);
private:
friend class Common::Singleton<SingletonBaseType>;
YUVAToRGBAManager();
~YUVAToRGBAManager();
Graphics::PixelFormat _lastFormat;
YUVAToRGBALookup *_lookup;
};
YUVAToRGBAManager::YUVAToRGBAManager() {
_lookup = 0;
}
YUVAToRGBAManager::~YUVAToRGBAManager() {
delete _lookup;
}
const YUVAToRGBALookup *YUVAToRGBAManager::getLookup(Graphics::PixelFormat format) {
if (_lastFormat == format)
const YUVAToRGBALookup *YUVAToRGBAManager::getLookup(Graphics::PixelFormat format, YUVAToRGBAManager::LuminanceScale scale) {
if (_lookup && _lookup->getFormat() == format && _lookup->getScale() == scale)
return _lookup;
delete _lookup;
_lookup = new YUVAToRGBALookup(format);
_lastFormat = format;
_lookup = new YUVAToRGBALookup(format, scale);
return _lookup;
}
} // End of namespace Graphics
namespace Common {
DECLARE_SINGLETON(Graphics::YUVAToRGBAManager);
}
#define YUVAToRGBAMan (Graphics::YUVAToRGBAManager::instance())
namespace Graphics {
#define PUT_PIXELA(s, a, d) \
L = &rgbToPix[(s)]; \
*((PixelInt *)(d)) = (L[cr_r] | L[crb_g] | L[cb_b] | aToPix[a])
template<typename PixelInt>
void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lookup, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lookup, int16 *colorTab, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
int halfHeight = yHeight >> 1;
int halfWidth = yWidth >> 1;
// Keep the tables in pointers here to avoid a dereference on each pixel
const int16 *Cr_r_tab = lookup->_colorTab;
const int16 *Cr_r_tab = colorTab;
const int16 *Cr_g_tab = Cr_r_tab + 256;
const int16 *Cb_g_tab = Cr_g_tab + 256;
const int16 *Cb_b_tab = Cb_g_tab + 256;
const uint32 *rgbToPix = lookup->_rgbToPix;
const uint32 *aToPix = lookup->_alphaToPix;
const uint32 *rgbToPix = lookup->getRGBToPix();
const uint32 *aToPix = lookup->getAlphaToPix();
for (int h = 0; h < halfHeight; h++) {
for (int w = 0; w < halfWidth; w++) {
@ -241,7 +236,6 @@ void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lo
ySrc++;
aSrc++;
dstPtr += sizeof(PixelInt);
}
dstPtr += dstPitch;
@ -252,23 +246,21 @@ void convertYUVA420ToRGBA(byte *dstPtr, int dstPitch, const YUVAToRGBALookup *lo
}
}
void convertYUVA420ToRGBA(Graphics::Surface *dst, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
void YUVAToRGBAManager::convert420(Graphics::Surface *dst, YUVAToRGBAManager::LuminanceScale scale, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch) {
// Sanity checks
assert(dst && dst->pixels);
assert(dst->format.bytesPerPixel == 2 || dst->format.bytesPerPixel == 4);
assert(ySrc && uSrc && vSrc && aSrc);
assert(ySrc && uSrc && vSrc);
assert((yWidth & 1) == 0);
assert((yHeight & 1) == 0);
if (yHeight & 1) // Odd height, the last line won't be converted
warning("Decoding YUV420 data with an odd height %d", yHeight);
const YUVAToRGBALookup *lookup = YUVAToRGBAMan.getLookup(dst->format);
const YUVAToRGBALookup *lookup = getLookup(dst->format, scale);
// Use a templated function to avoid an if check on every pixel
if (dst->format.bytesPerPixel == 2)
convertYUVA420ToRGBA<uint16>((byte *)dst->pixels, dst->pitch, lookup, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
convertYUVA420ToRGBA<uint16>((byte *)dst->pixels, dst->pitch, lookup, _colorTab, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
else
convertYUVA420ToRGBA<uint32>((byte *)dst->pixels, dst->pitch, lookup, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
convertYUVA420ToRGBA<uint32>((byte *)dst->pixels, dst->pitch, lookup, _colorTab, ySrc, uSrc, vSrc, aSrc, yWidth, yHeight, yPitch, uvPitch);
}
} // End of namespace Graphics

View File

@ -23,6 +23,7 @@
/**
* @file
* YUV to RGB conversion used in engines:
* - mohawk
* - scumm (he)
* - sword25
*/
@ -31,27 +32,50 @@
#define GRAPHICS_YUVA_TO_RGBA_H
#include "common/scummsys.h"
#include "common/singleton.h"
#include "graphics/surface.h"
namespace Graphics {
struct Surface;
class YUVAToRGBALookup;
/**
* Convert a YUVA420 image to an RGBA surface
*
* @param dst the destination surface
* @param ySrc the source of the y component
* @param uSrc the source of the u component
* @param vSrc the source of the v component
* @param aSrc the source of the a component
* @param yWidth the width of the y surface (must be divisible by 2)
* @param yHeight the height of the y surface (must be divisible by 2)
* @param yPitch the pitch of the y surface
* @param uvPitch the pitch of the u and v surfaces
*/
void convertYUVA420ToRGBA(Graphics::Surface *dst, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch);
class YUVAToRGBAManager : public Common::Singleton<YUVAToRGBAManager> {
public:
/** The scale of the luminance values */
enum LuminanceScale {
kScaleFull, /** Luminance values range from [0, 255] */
kScaleITU /** Luminance values range from [16, 235], the range from ITU-R BT.601 */
};
/**
* Convert a YUV420 image to an RGB surface
*
* @param dst the destination surface
* @param scale the scale of the luminance values
* @param ySrc the source of the y component
* @param uSrc the source of the u component
* @param vSrc the source of the v component
* @param aSrc the source of the a component
* @param yWidth the width of the y surface (must be divisible by 2)
* @param yHeight the height of the y surface (must be divisible by 2)
* @param yPitch the pitch of the y surface
* @param uvPitch the pitch of the u and v surfaces
*/
void convert420(Graphics::Surface *dst, LuminanceScale scale, const byte *ySrc, const byte *uSrc, const byte *vSrc, const byte *aSrc, int yWidth, int yHeight, int yPitch, int uvPitch);
private:
friend class Common::Singleton<SingletonBaseType>;
YUVAToRGBAManager();
~YUVAToRGBAManager();
const YUVAToRGBALookup *getLookup(Graphics::PixelFormat format, LuminanceScale scale);
YUVAToRGBALookup *_lookup;
int16 _colorTab[4 * 256]; // 2048 bytes
};
} // End of namespace Graphics
#define YUVAToRGBAMan (::Graphics::YUVAToRGBAManager::instance())
#endif

File diff suppressed because it is too large Load Diff

View File

@ -31,22 +31,28 @@
#ifndef VIDEO_BINK_DECODER_H
#define VIDEO_BINK_DECODER_H
#include "audio/audiostream.h"
#include "audio/mixer.h"
#include "common/array.h"
#include "common/rational.h"
#include "graphics/surface.h"
#include "video/video_decoder.h"
namespace Common {
class SeekableReadStream;
class BitStream;
class Huffman;
namespace Audio {
class AudioStream;
class QueuingAudioStream;
}
class RDFT;
class DCT;
namespace Common {
class SeekableReadStream;
class BitStream;
class Huffman;
class RDFT;
class DCT;
}
namespace Graphics {
struct Surface;
}
namespace Video {
@ -57,92 +63,31 @@ namespace Video {
* Video decoder used in engines:
* - scumm (he)
*/
class BinkDecoder : public FixedRateVideoDecoder {
class BinkDecoder : public VideoDecoder {
public:
BinkDecoder();
~BinkDecoder();
// VideoDecoder API
bool loadStream(Common::SeekableReadStream *stream);
void close();
bool isVideoLoaded() const { return _bink != 0; }
uint16 getWidth() const { return _surface.w; }
uint16 getHeight() const { return _surface.h; }
Graphics::PixelFormat getPixelFormat() const { return _surface.format; }
uint32 getFrameCount() const { return _frames.size(); }
uint32 getTime() const;
const Graphics::Surface *decodeNextFrame();
// FixedRateVideoDecoder
Common::Rational getFrameRate() const { return _frameRate; }
// Bink specific
bool loadStream(Common::SeekableReadStream *stream, const Graphics::PixelFormat &format);
// ResidualVM-specific:
bool seek(const Audio::Timestamp &time);
void setAudioTrack(uint32 track);
uint32 findKeyFrame(uint32 frame) const;
protected:
// VideoDecoder API
void updateVolume();
void updateBalance();
void readNextPacket();
private:
static const int kAudioChannelsMax = 2;
static const int kAudioBlockSizeMax = (kAudioChannelsMax << 11);
/** IDs for different data types used in Bink video codec. */
enum Source {
kSourceBlockTypes = 0, ///< 8x8 block types.
kSourceSubBlockTypes , ///< 16x16 block types (a subset of 8x8 block types).
kSourceColors , ///< Pixel values used for different block types.
kSourcePattern , ///< 8-bit values for 2-color pattern fill.
kSourceXOff , ///< X components of motion value.
kSourceYOff , ///< Y components of motion value.
kSourceIntraDC , ///< DC values for intrablocks with DCT.
kSourceInterDC , ///< DC values for interblocks with DCT.
kSourceRun , ///< Run lengths for special fill block.
kSourceMAX
};
/** Bink video block types. */
enum BlockType {
kBlockSkip = 0, ///< Skipped block.
kBlockScaled , ///< Block has size 16x16.
kBlockMotion , ///< Block is copied from previous frame with some offset.
kBlockRun , ///< Block is composed from runs of colors with custom scan order.
kBlockResidue , ///< Motion block with some difference added.
kBlockIntra , ///< Intra DCT block.
kBlockFill , ///< Block is filled with single color.
kBlockInter , ///< Motion block with DCT applied to the difference.
kBlockPattern , ///< Block is filled with two colors following custom pattern.
kBlockRaw ///< Uncoded 8x8 block.
};
/** Data structure for decoding and tranlating Huffman'd data. */
struct Huffman {
int index; ///< Index of the Huffman codebook to use.
byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list.
};
/** Data structure used for decoding a single Bink data type. */
struct Bundle {
int countLengths[2]; ///< Lengths of number of entries to decode (in bits).
int countLength; ///< Length of number of entries to decode (in bits) for the current plane.
Huffman huffman; ///< Huffman codebook.
byte *data; ///< Buffer for decoded symbols.
byte *dataEnd; ///< Buffer end.
byte *curDec; ///< Pointer to the data that wasn't yet decoded.
byte *curPtr; ///< Pointer to the data that wasn't yet read.
};
enum AudioCodec {
kAudioCodecDCT,
kAudioCodecRDFT
};
/** An audio track. */
struct AudioTrack {
struct AudioInfo {
uint16 flags;
uint32 sampleRate;
@ -177,8 +122,8 @@ protected:
Common::RDFT *rdft;
Common::DCT *dct;
AudioTrack();
~AudioTrack();
AudioInfo();
~AudioInfo();
};
/** A video frame. */
@ -194,149 +139,230 @@ protected:
~VideoFrame();
};
/** A decoder state. */
struct DecodeContext {
VideoFrame *video;
class BinkVideoTrack : public FixedRateVideoTrack {
public:
BinkVideoTrack(uint32 width, uint32 height, const Graphics::PixelFormat &format, uint32 frameCount, const Common::Rational &frameRate, bool swapPlanes, bool hasAlpha, uint32 id);
~BinkVideoTrack();
uint32 planeIdx;
uint16 getWidth() const { return _surface.w; }
uint16 getHeight() const { return _surface.h; }
Graphics::PixelFormat getPixelFormat() const { return _surface.format; }
int getCurFrame() const { return _curFrame; }
int getFrameCount() const { return _frameCount; }
const Graphics::Surface *decodeNextFrame() { return &_surface; }
// ResidualVM-specific:
bool isSeekable() const { return true; }
bool seek(const Audio::Timestamp &time);
void setCurFrame(uint32 frame) { _curFrame = frame; }
// End of ResidualVM-specific
uint32 blockX;
uint32 blockY;
/** Decode a video packet. */
void decodePacket(VideoFrame &frame);
byte *dest;
byte *prev;
protected:
Common::Rational getFrameRate() const { return _frameRate; }
byte *destStart, *destEnd;
byte *prevStart, *prevEnd;
private:
/** A decoder state. */
struct DecodeContext {
VideoFrame *video;
uint32 pitch;
uint32 planeIdx;
int coordMap[64];
int coordScaledMap1[64];
int coordScaledMap2[64];
int coordScaledMap3[64];
int coordScaledMap4[64];
uint32 blockX;
uint32 blockY;
byte *dest;
byte *prev;
byte *destStart, *destEnd;
byte *prevStart, *prevEnd;
uint32 pitch;
int coordMap[64];
int coordScaledMap1[64];
int coordScaledMap2[64];
int coordScaledMap3[64];
int coordScaledMap4[64];
};
/** IDs for different data types used in Bink video codec. */
enum Source {
kSourceBlockTypes = 0, ///< 8x8 block types.
kSourceSubBlockTypes , ///< 16x16 block types (a subset of 8x8 block types).
kSourceColors , ///< Pixel values used for different block types.
kSourcePattern , ///< 8-bit values for 2-color pattern fill.
kSourceXOff , ///< X components of motion value.
kSourceYOff , ///< Y components of motion value.
kSourceIntraDC , ///< DC values for intrablocks with DCT.
kSourceInterDC , ///< DC values for interblocks with DCT.
kSourceRun , ///< Run lengths for special fill block.
kSourceMAX
};
/** Bink video block types. */
enum BlockType {
kBlockSkip = 0, ///< Skipped block.
kBlockScaled , ///< Block has size 16x16.
kBlockMotion , ///< Block is copied from previous frame with some offset.
kBlockRun , ///< Block is composed from runs of colors with custom scan order.
kBlockResidue , ///< Motion block with some difference added.
kBlockIntra , ///< Intra DCT block.
kBlockFill , ///< Block is filled with single color.
kBlockInter , ///< Motion block with DCT applied to the difference.
kBlockPattern , ///< Block is filled with two colors following custom pattern.
kBlockRaw ///< Uncoded 8x8 block.
};
/** Data structure for decoding and tranlating Huffman'd data. */
struct Huffman {
int index; ///< Index of the Huffman codebook to use.
byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list.
};
/** Data structure used for decoding a single Bink data type. */
struct Bundle {
int countLengths[2]; ///< Lengths of number of entries to decode (in bits).
int countLength; ///< Length of number of entries to decode (in bits) for the current plane.
Huffman huffman; ///< Huffman codebook.
byte *data; ///< Buffer for decoded symbols.
byte *dataEnd; ///< Buffer end.
byte *curDec; ///< Pointer to the data that wasn't yet decoded.
byte *curPtr; ///< Pointer to the data that wasn't yet read.
};
int _curFrame;
int _frameCount;
Graphics::Surface _surface;
int _surfaceWidth; ///< The actual surface width
int _surfaceHeight; ///< The actual surface height
uint32 _id; ///< The BIK FourCC.
bool _hasAlpha; ///< Do video frames have alpha?
bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV?
Common::Rational _frameRate;
Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types.
Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding.
/** Huffman codebooks to use for decoding high nibbles in color data types. */
Huffman _colHighHuffman[16];
/** Value of the last decoded high nibble in color data types. */
int _colLastVal;
byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame.
byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame.
/** Initialize the bundles. */
void initBundles();
/** Deinitialize the bundles. */
void deinitBundles();
/** Initialize the Huffman decoders. */
void initHuffman();
/** Decode a plane. */
void decodePlane(VideoFrame &video, int planeIdx, bool isChroma);
/** Read/Initialize a bundle for decoding a plane. */
void readBundle(VideoFrame &video, Source source);
/** Read the symbols for a Huffman code. */
void readHuffman(VideoFrame &video, Huffman &huffman);
/** Merge two Huffman symbol lists. */
void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size);
/** Read and translate a symbol out of a Huffman code. */
byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman);
/** Get a direct value out of a bundle. */
int32 getBundleValue(Source source);
/** Read a count value out of a bundle. */
uint32 readBundleCount(VideoFrame &video, Bundle &bundle);
// Handle the block types
void blockSkip (DecodeContext &ctx);
void blockScaledSkip (DecodeContext &ctx);
void blockScaledRun (DecodeContext &ctx);
void blockScaledIntra (DecodeContext &ctx);
void blockScaledFill (DecodeContext &ctx);
void blockScaledPattern(DecodeContext &ctx);
void blockScaledRaw (DecodeContext &ctx);
void blockScaled (DecodeContext &ctx);
void blockMotion (DecodeContext &ctx);
void blockRun (DecodeContext &ctx);
void blockResidue (DecodeContext &ctx);
void blockIntra (DecodeContext &ctx);
void blockFill (DecodeContext &ctx);
void blockInter (DecodeContext &ctx);
void blockPattern (DecodeContext &ctx);
void blockRaw (DecodeContext &ctx);
// Read the bundles
void readRuns (VideoFrame &video, Bundle &bundle);
void readMotionValues(VideoFrame &video, Bundle &bundle);
void readBlockTypes (VideoFrame &video, Bundle &bundle);
void readPatterns (VideoFrame &video, Bundle &bundle);
void readColors (VideoFrame &video, Bundle &bundle);
void readDCS (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign);
void readDCTCoeffs (VideoFrame &video, int16 *block, bool isIntra);
void readResidue (VideoFrame &video, int16 *block, int masksCount);
// Bink video IDCT
void IDCT(int16 *block);
void IDCTPut(DecodeContext &ctx, int16 *block);
void IDCTAdd(DecodeContext &ctx, int16 *block);
};
class BinkAudioTrack : public AudioTrack {
public:
BinkAudioTrack(AudioInfo &audio);
~BinkAudioTrack();
/** Decode an audio packet. */
void decodePacket();
bool seek(const Audio::Timestamp &time) { return true; } // ResidualVM-specific
bool isSeekable() const { return true; } // ResidualVM-specific
void skipSamples(const Audio::Timestamp &length); // ResidualVM-specific
protected:
Audio::AudioStream *getAudioStream() const;
private:
AudioInfo *_audioInfo;
Audio::QueuingAudioStream *_audioStream;
float getFloat();
/** Decode an audio block. */
void audioBlock(int16 *out);
/** Decode a DCT'd audio block. */
void audioBlockDCT();
/** Decode a RDFT'd audio block. */
void audioBlockRDFT();
void readAudioCoeffs(float *coeffs);
static void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels);
};
Common::SeekableReadStream *_bink;
uint32 _id; ///< The BIK FourCC.
Common::Rational _frameRate;
Graphics::Surface _surface;
Audio::SoundHandle _audioHandle;
Audio::QueuingAudioStream *_audioStream;
int32 _audioStartOffset;
uint32 _videoFlags; ///< Video frame features.
bool _hasAlpha; ///< Do video frames have alpha?
bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV?
Common::Array<AudioTrack> _audioTracks; ///< All audio tracks.
Common::Array<AudioInfo> _audioTracks; ///< All audio tracks.
Common::Array<VideoFrame> _frames; ///< All video frames.
uint32 _audioTrack; ///< Audio track to use.
Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding.
Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types.
/** Huffman codebooks to use for decoding high nibbles in color data types. */
Huffman _colHighHuffman[16];
/** Value of the last decoded high nibble in color data types. */
int _colLastVal;
byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame.
byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame.
/** Initialize the bundles. */
void initBundles();
/** Deinitialize the bundles. */
void deinitBundles();
/** Initialize the Huffman decoders. */
void initHuffman();
/** Decode an audio packet. */
void audioPacket(AudioTrack &audio);
/** Decode a video packet. */
virtual void videoPacket(VideoFrame &video);
/** Decode a plane. */
void decodePlane(VideoFrame &video, int planeIdx, bool isChroma);
/** Read/Initialize a bundle for decoding a plane. */
void readBundle(VideoFrame &video, Source source);
/** Read the symbols for a Huffman code. */
void readHuffman(VideoFrame &video, Huffman &huffman);
/** Merge two Huffman symbol lists. */
void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size);
/** Read and translate a symbol out of a Huffman code. */
byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman);
/** Get a direct value out of a bundle. */
int32 getBundleValue(Source source);
/** Read a count value out of a bundle. */
uint32 readBundleCount(VideoFrame &video, Bundle &bundle);
// Handle the block types
void blockSkip (DecodeContext &ctx);
void blockScaledSkip (DecodeContext &ctx);
void blockScaledRun (DecodeContext &ctx);
void blockScaledIntra (DecodeContext &ctx);
void blockScaledFill (DecodeContext &ctx);
void blockScaledPattern(DecodeContext &ctx);
void blockScaledRaw (DecodeContext &ctx);
void blockScaled (DecodeContext &ctx);
void blockMotion (DecodeContext &ctx);
void blockRun (DecodeContext &ctx);
void blockResidue (DecodeContext &ctx);
void blockIntra (DecodeContext &ctx);
void blockFill (DecodeContext &ctx);
void blockInter (DecodeContext &ctx);
void blockPattern (DecodeContext &ctx);
void blockRaw (DecodeContext &ctx);
// Read the bundles
void readRuns (VideoFrame &video, Bundle &bundle);
void readMotionValues(VideoFrame &video, Bundle &bundle);
void readBlockTypes (VideoFrame &video, Bundle &bundle);
void readPatterns (VideoFrame &video, Bundle &bundle);
void readColors (VideoFrame &video, Bundle &bundle);
void readDCS (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign);
void readDCTCoeffs (VideoFrame &video, int16 *block, bool isIntra);
void readResidue (VideoFrame &video, int16 *block, int masksCount);
void initAudioTrack(AudioTrack &audio);
float getFloat(AudioTrack &audio);
/** Decode an audio block. */
void audioBlock (AudioTrack &audio, int16 *out);
/** Decode a DCT'd audio block. */
void audioBlockDCT (AudioTrack &audio);
/** Decode a RDFT'd audio block. */
void audioBlockRDFT(AudioTrack &audio);
void readAudioCoeffs(AudioTrack &audio, float *coeffs);
void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels);
// Bink video IDCT
void IDCT(int16 *block);
void IDCTPut(DecodeContext &ctx, int16 *block);
void IDCTAdd(DecodeContext &ctx, int16 *block);
/** Start playing the audio track */
void startAudio();
/** Stop playing the audio track */
void stopAudio();
void initAudioTrack(AudioInfo &audio);
// ResidualVM-specific:
uint32 _selectedAudioTrack;
};
} // End of namespace Video

View File

@ -1,170 +0,0 @@
/* Residual - A 3D game interpreter
*
* Residual is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the AUTHORS
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "common/bitstream.h"
#include "common/stream.h"
#include "common/substream.h"
#include "common/system.h"
#include "graphics/surface.h"
#include "graphics/yuva_to_rgba.h"
#include "video/bink_decoder_seek.h"
static const uint32 kBIKiID = MKTAG('B', 'I', 'K', 'i');
namespace Video {
void SeekableBinkDecoder::videoPacket(VideoFrame &video) {
assert(video.bits);
if (_hasAlpha) {
if (_id == kBIKiID)
video.bits->skip(32);
decodePlane(video, 3, false);
}
if (_id == kBIKiID)
video.bits->skip(32);
for (int i = 0; i < 3; i++) {
int planeIdx = ((i == 0) || !_swapPlanes) ? i : (i ^ 3);
decodePlane(video, planeIdx, i != 0);
if (video.bits->pos() >= video.bits->size())
break;
}
// Convert the YUV data we have to our format
assert(_curPlanes[0] && _curPlanes[1] && _curPlanes[2] && _curPlanes[3]);
Graphics::convertYUVA420ToRGBA(&_surface, _curPlanes[0], _curPlanes[1], _curPlanes[2], _curPlanes[3],
_surface.w, _surface.h, _surface.w, _surface.w >> 1);
// And swap the planes with the reference planes
for (int i = 0; i < 4; i++)
SWAP(_curPlanes[i], _oldPlanes[i]);
}
uint32 SeekableBinkDecoder::getDuration() const
{
Common::Rational duration = getFrameCount() * 1000 / getFrameRate();
return duration.toInt();
}
uint32 SeekableBinkDecoder::findKeyFrame(uint32 frame) const {
for (int i = frame; i >= 0; i--) {
if (_frames[i].keyFrame)
return i;
}
// If none found, we'll assume the requested frame is a key frame
return frame;
}
void SeekableBinkDecoder::seekToFrame(uint32 frame) {
assert(frame < _frames.size());
// Fast path
if ((int32)frame == _curFrame + 1)
return;
// Stop all audio (for now)
stopAudio();
// Track down the keyframe
_curFrame = findKeyFrame(frame) - 1;
while (_curFrame < (int32)frame - 1)
skipNextFrame();
// Map out the starting point
Common::Rational startTime = frame * 1000 / getFrameRate();
_startTime = g_system->getMillis() - startTime.toInt();
resetPauseStartTime();
// Adjust the audio starting point
if (_audioTrack < _audioTracks.size()) {
Common::Rational audioStartTime = (frame + 1) * 1000 / getFrameRate();
_audioStartOffset = audioStartTime.toInt();
}
// Restart the audio
startAudio();
}
void SeekableBinkDecoder::seekToTime(const Audio::Timestamp &time) {
// Try to find the last frame that should have been decoded
Common::Rational frame = time.msecs() * getFrameRate() / 1000;
seekToFrame(frame.toInt());
}
void SeekableBinkDecoder::skipNextFrame() {
if (endOfVideo())
return;
VideoFrame &frame = _frames[_curFrame + 1];
if (!_bink->seek(frame.offset))
error("Bad bink seek");
uint32 frameSize = frame.size;
for (uint32 i = 0; i < _audioTracks.size(); i++) {
uint32 audioPacketLength = _bink->readUint32LE();
frameSize -= 4;
if (frameSize < audioPacketLength)
error("Audio packet too big for the frame");
if (audioPacketLength >= 4) {
// Skip audio data
_bink->seek(audioPacketLength, SEEK_CUR);
frameSize -= audioPacketLength;
}
}
uint32 videoPacketStart = _bink->pos();
uint32 videoPacketEnd = _bink->pos() + frameSize;
frame.bits =
new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink,
videoPacketStart, videoPacketEnd), true);
videoPacket(frame);
delete frame.bits;
frame.bits = 0;
_curFrame++;
if (_curFrame == 0)
_startTime = g_system->getMillis();
}
void SeekableBinkDecoder::setAudioTrack(uint32 track) {
if (_audioTracks.size() > 1 && track < _audioTracks.size())
_audioTrack = track;
}
} /* namespace Video */

View File

@ -1,57 +0,0 @@
/* Residual - A 3D game interpreter
*
* Residual is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the AUTHORS
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "video/bink_decoder.h"
#ifdef USE_BINK
#ifndef VIDEO_BINK_DECODER_SEEK_H
#define VIDEO_BINK_DECODER_SEEK_H
namespace Video {
class SeekableBinkDecoder: public Video::BinkDecoder,
public Video::SeekableVideoDecoder {
public:
// SeekableVideoDecoder API
void seekToFrame(uint32 frame);
void seekToTime(const Audio::Timestamp &time);
uint32 getDuration() const;
// Bink seek specific
void setAudioTrack(uint32 track);
protected:
/** Decode a video packet. */
void videoPacket(VideoFrame &video);
/** Find the keyframe needed to decode a frame */
uint32 findKeyFrame(uint32 frame) const;
/** Skip the next frame */
void skipNextFrame();
};
} /* namespace Video */
#endif // VIDEO_BINK_DECODER_SEEK_H
#endif // USE_BINK

View File

@ -1,12 +1,11 @@
MODULE := video
MODULE_OBJS := \
mpeg_player.o \
video_decoder.o
video_decoder.o \
mpegps_decoder.o
ifdef USE_BINK
MODULE_OBJS += \
bink_decoder.o \
bink_decoder_seek.o
bink_decoder.o
endif
# Include common rules

View File

@ -1,493 +0,0 @@
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
// The YUV to RGB conversion code is derived from SDL's YUV overlay code, which
// in turn appears to be derived from mpeg_play. The following copyright
// notices have been included in accordance with the original license. Please
// note that the term "software" in this context only applies to the
// buildLookup() and plotYUV*() functions below.
// Copyright (c) 1995 The Regents of the University of California.
// All rights reserved.
//
// Permission to use, copy, modify, and distribute this software and its
// documentation for any purpose, without fee, and without written agreement is
// hereby granted, provided that the above copyright notice and the following
// two paragraphs appear in all copies of this software.
//
// IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
// DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
// OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
// CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
// ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
// PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
// Copyright (c) 1995 Erik Corry
// All rights reserved.
//
// Permission to use, copy, modify, and distribute this software and its
// documentation for any purpose, without fee, and without written agreement is
// hereby granted, provided that the above copyright notice and the following
// two paragraphs appear in all copies of this software.
//
// IN NO EVENT SHALL ERIK CORRY BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
// SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
// THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF ERIK CORRY HAS BEEN ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ERIK CORRY SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
// BASIS, AND ERIK CORRY HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT,
// UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
// Portions of this software Copyright (c) 1995 Brown University.
// All rights reserved.
//
// Permission to use, copy, modify, and distribute this software and its
// documentation for any purpose, without fee, and without written agreement
// is hereby granted, provided that the above copyright notice and the
// following two paragraphs appear in all copies of this software.
//
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE TO ANY PARTY FOR
// DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
// OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF BROWN
// UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// BROWN UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
// BASIS, AND BROWN UNIVERSITY HAS NO OBLIGATION TO PROVIDE MAINTENANCE,
// SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#include "video/mpeg_player.h"
#include "common/file.h"
#include "common/system.h"
#include "common/util.h"
namespace Video {
BaseAnimationState::BaseAnimationState(OSystem *sys, int width, int height)
: _movieWidth(width), _movieHeight(height), _frameWidth(width), _frameHeight(height), _sys(sys) {
const int screenW = _sys->getOverlayWidth();
const int screenH = _sys->getOverlayHeight();
_movieScale = MIN(screenW / _movieWidth, screenH / _movieHeight);
assert(_movieScale >= 1);
if (_movieScale > 3)
_movieScale = 3;
_colorTab = NULL;
_rgbToPix = NULL;
memset(&_overlayFormat, 0, sizeof(_overlayFormat));
}
BaseAnimationState::~BaseAnimationState() {
#ifdef USE_MPEG2
if (_mpegDecoder)
mpeg2_close(_mpegDecoder);
delete _mpegFile;
_sys->hideOverlay();
free(_overlay);
free(_colorTab);
free(_rgbToPix);
#endif
}
bool BaseAnimationState::init(const char *name) {
#ifdef USE_MPEG2
char tempFile[512];
_mpegDecoder = NULL;
_mpegFile = NULL;
buildLookup();
_overlay = (OverlayColor *)calloc(_movieScale * _movieWidth * _movieScale * _movieHeight, sizeof(OverlayColor));
_sys->showOverlay();
// Open MPEG2 stream
_mpegFile = new Common::File();
sprintf(tempFile, "%s.mp2", name);
if (!_mpegFile->open(tempFile)) {
warning("Cutscene: Could not open %s", tempFile);
return false;
}
// Load and configure decoder
_mpegDecoder = mpeg2_init();
if (_mpegDecoder == NULL) {
warning("Cutscene: Could not allocate an MPEG2 decoder");
return false;
}
_mpegInfo = mpeg2_info(_mpegDecoder);
_frameNum = 0;
return true;
#else /* USE_MPEG2 */
return false;
#endif
}
bool BaseAnimationState::decodeFrame() {
#ifdef USE_MPEG2
mpeg2_state_t state;
const mpeg2_sequence_t *sequence_i;
size_t size = (size_t) -1;
static byte buf[BUFFER_SIZE];
do {
state = mpeg2_parse(_mpegDecoder);
sequence_i = _mpegInfo->sequence;
switch (state) {
case STATE_BUFFER:
size = _mpegFile->read(buf, BUFFER_SIZE);
mpeg2_buffer(_mpegDecoder, buf, buf + size);
break;
case STATE_SLICE:
case STATE_END:
if (_mpegInfo->display_fbuf) {
checkPaletteSwitch();
drawYUV(sequence_i->width, sequence_i->height, _mpegInfo->display_fbuf->buf);
_frameNum++;
return true;
}
break;
default:
break;
}
} while (size);
#endif
return false;
}
bool BaseAnimationState::checkPaletteSwitch() {
return false;
}
void BaseAnimationState::handleScreenChanged() {
const int screenW = _sys->getOverlayWidth();
const int screenH = _sys->getOverlayHeight();
int newScale = MIN(screenW / _movieWidth, screenH / _movieHeight);
assert(newScale >= 1);
if (newScale > 3)
newScale = 3;
if (newScale != _movieScale) {
// HACK: Since frames generally do not cover the entire screen,
// We need to undraw the old frame. This is a very hacky
// way of doing that.
OverlayColor *buf = (OverlayColor *)calloc(screenW * screenH, sizeof(OverlayColor));
_sys->copyRectToOverlay(buf, screenW, 0, 0, screenW, screenH);
free(buf);
free(_overlay);
_movieScale = newScale;
_overlay = (OverlayColor *)calloc(_movieScale * _movieWidth * _movieScale * _movieHeight, sizeof(OverlayColor));
}
buildLookup();
}
void BaseAnimationState::buildLookup() {
// Do we already have lookup tables for this bit format?
Graphics::PixelFormat format = _sys->getOverlayFormat();
if (format == _overlayFormat && _colorTab && _rgbToPix)
return;
free(_colorTab);
free(_rgbToPix);
_colorTab = (int16 *)malloc(4 * 256 * sizeof(int16));
int16 *Cr_r_tab = &_colorTab[0 * 256];
int16 *Cr_g_tab = &_colorTab[1 * 256];
int16 *Cb_g_tab = &_colorTab[2 * 256];
int16 *Cb_b_tab = &_colorTab[3 * 256];
_rgbToPix = (OverlayColor *)malloc(3 * 768 * sizeof(OverlayColor));
OverlayColor *r_2_pix_alloc = &_rgbToPix[0 * 768];
OverlayColor *g_2_pix_alloc = &_rgbToPix[1 * 768];
OverlayColor *b_2_pix_alloc = &_rgbToPix[2 * 768];
int16 CR, CB;
int i;
// Generate the tables for the display surface
for (i = 0; i < 256; i++) {
// Gamma correction (luminescence table) and chroma correction
// would be done here. See the Berkeley mpeg_play sources.
CR = CB = (i - 128);
Cr_r_tab[i] = (int16) ( (0.419 / 0.299) * CR) + 0 * 768 + 256;
Cr_g_tab[i] = (int16) (-(0.299 / 0.419) * CR) + 1 * 768 + 256;
Cb_g_tab[i] = (int16) (-(0.114 / 0.331) * CB);
Cb_b_tab[i] = (int16) ( (0.587 / 0.331) * CB) + 2 * 768 + 256;
}
// Set up entries 0-255 in rgb-to-pixel value tables.
for (i = 0; i < 256; i++) {
r_2_pix_alloc[i + 256] = format.RGBToColor(i, 0, 0);
g_2_pix_alloc[i + 256] = format.RGBToColor(0, i, 0);
b_2_pix_alloc[i + 256] = format.RGBToColor(0, 0, i);
}
// Spread out the values we have to the rest of the array so that we do
// not need to check for overflow.
for (i = 0; i < 256; i++) {
r_2_pix_alloc[i] = r_2_pix_alloc[256];
r_2_pix_alloc[i + 512] = r_2_pix_alloc[511];
g_2_pix_alloc[i] = g_2_pix_alloc[256];
g_2_pix_alloc[i + 512] = g_2_pix_alloc[511];
b_2_pix_alloc[i] = b_2_pix_alloc[256];
b_2_pix_alloc[i + 512] = b_2_pix_alloc[511];
}
_overlayFormat = format;
}
void BaseAnimationState::plotYUV(int width, int height, byte *const *dat) {
switch (_movieScale) {
case 1:
plotYUV1x(width, height, dat);
break;
case 2:
plotYUV2x(width, height, dat);
break;
case 3:
plotYUV3x(width, height, dat);
break;
}
}
void BaseAnimationState::plotYUV1x(int width, int height, byte *const *dat) {
byte *lum = dat[0];
byte *cr = dat[2];
byte *cb = dat[1];
byte *lum2 = lum + width;
int16 cr_r;
int16 crb_g;
int16 cb_b;
OverlayColor *row1 = _overlay;
OverlayColor *row2 = row1 + _movieWidth;
int x;
for (; height > 0; height -= 2) {
OverlayColor *r1 = row1;
OverlayColor *r2 = row2;
for (x = width; x > 0; x -= 2) {
register OverlayColor *L;
cr_r = _colorTab[*cr + 0 * 256];
crb_g = _colorTab[*cr + 1 * 256] + _colorTab[*cb + 2 * 256];
cb_b = _colorTab[*cb + 3 * 256];
++cr;
++cb;
L = &_rgbToPix[*lum++];
*r1++ = L[cr_r] | L[crb_g] | L[cb_b];
L = &_rgbToPix[*lum++];
*r1++ = L[cr_r] | L[crb_g] | L[cb_b];
// Now, do second row.
L = &_rgbToPix[*lum2++];
*r2++ = L[cr_r] | L[crb_g] | L[cb_b];
L = &_rgbToPix[*lum2++];
*r2++ = L[cr_r] | L[crb_g] | L[cb_b];
}
lum += width;
lum2 += width;
row1 += 2 * _movieWidth;
row2 += 2 * _movieWidth;
}
}
void BaseAnimationState::plotYUV2x(int width, int height, byte *const *dat) {
byte *lum = dat[0];
byte *cr = dat[2];
byte *cb = dat[1];
byte *lum2 = lum + width;
int16 cr_r;
int16 crb_g;
int16 cb_b;
OverlayColor *row1 = _overlay;
OverlayColor *row2 = row1 + 2 * 2 * _movieWidth;
int x;
for (; height > 0; height -= 2) {
OverlayColor *r1 = row1;
OverlayColor *r2 = row2;
for (x = width; x > 0; x -= 2) {
register OverlayColor *L;
register OverlayColor C;
cr_r = _colorTab[*cr + 0 * 256];
crb_g = _colorTab[*cr + 1 * 256] + _colorTab[*cb + 2 * 256];
cb_b = _colorTab[*cb + 3 * 256];
++cr;
++cb;
L = &_rgbToPix[*lum++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r1++ = C;
*r1++ = C;
L = &_rgbToPix[*lum++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r1++ = C;
*r1++ = C;
// Now, do second row.
L = &_rgbToPix[*lum2++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r2++ = C;
*r2++ = C;
L = &_rgbToPix[*lum2++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r2++ = C;
*r2++ = C;
}
memcpy(row1 + 2 * _movieWidth, row1, 2 * _movieWidth * sizeof(OverlayColor));
memcpy(row2 + 2 * _movieWidth, row2, 2 * _movieWidth * sizeof(OverlayColor));
lum += width;
lum2 += width;
row1 += 4 * 2 * _movieWidth;
row2 += 4 * 2 * _movieWidth;
}
}
void BaseAnimationState::plotYUV3x(int width, int height, byte *const *dat) {
byte *lum = dat[0];
byte *cr = dat[2];
byte *cb = dat[1];
byte *lum2 = lum + width;
int16 cr_r;
int16 crb_g;
int16 cb_b;
OverlayColor *row1 = _overlay;
OverlayColor *row2 = row1 + 3 * 3 * _movieWidth;
int x;
for (; height > 0; height -= 2) {
OverlayColor *r1 = row1;
OverlayColor *r2 = row2;
for (x = width; x > 0; x -= 2) {
register OverlayColor *L;
register OverlayColor C;
cr_r = _colorTab[*cr + 0 * 256];
crb_g = _colorTab[*cr + 1 * 256] + _colorTab[*cb + 2 * 256];
cb_b = _colorTab[*cb + 3 * 256];
++cr;
++cb;
L = &_rgbToPix[*lum++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r1++ = C;
*r1++ = C;
*r1++ = C;
L = &_rgbToPix[*lum++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r1++ = C;
*r1++ = C;
*r1++ = C;
// Now, do second row.
L = &_rgbToPix[*lum2++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r2++ = C;
*r2++ = C;
*r2++ = C;
L = &_rgbToPix[*lum2++];
C = L[cr_r] | L[crb_g] | L[cb_b];
*r2++ = C;
*r2++ = C;
*r2++ = C;
}
memcpy(row1 + 3 * _movieWidth, row1, 3 * _movieWidth * sizeof(OverlayColor));
memcpy(row1 + 2 * 3 * _movieWidth, row1, 3 * _movieWidth * sizeof(OverlayColor));
memcpy(row2 + 3 * _movieWidth, row2, 3 * _movieWidth * sizeof(OverlayColor));
memcpy(row2 + 2 * 3 * _movieWidth, row2, 3 * _movieWidth * sizeof(OverlayColor));
lum += width;
lum2 += width;
row1 += 6 * 3 * _movieWidth;
row2 += 6 * 3 * _movieWidth;
}
}
void BaseAnimationState::updateScreen() {
int width = _movieScale * _frameWidth;
int height = _movieScale * _frameHeight;
int pitch = _movieScale * _movieWidth;
const int screenW = _sys->getOverlayWidth();
const int screenH = _sys->getOverlayHeight();
int x = (screenW - _movieScale * _frameWidth) / 2;
int y = (screenH - _movieScale * _frameHeight) / 2;
_sys->copyRectToOverlay(_overlay, pitch, x, y, width, height);
_sys->updateScreen();
}
} // End of namespace Video

View File

@ -1,135 +0,0 @@
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#ifndef VIDEO_MPEG_PLAYER_H
#define VIDEO_MPEG_PLAYER_H
#include "common/scummsys.h"
#include "graphics/pixelformat.h"
// Uncomment this if you are using libmpeg2 0.3.1.
// #define USE_MPEG2_0_3_1
#ifdef USE_MPEG2
#if defined(__PLAYSTATION2__)
typedef uint8 uint8_t;
typedef uint16 uint16_t;
typedef uint32 uint32_t;
#elif defined(_WIN32_WCE)
typedef signed char int8_t;
typedef signed short int16_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
#elif defined(_MSC_VER)
typedef signed char int8_t;
typedef signed short int16_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
#if !defined(SDL_COMPILEDVERSION) || (SDL_COMPILEDVERSION < 1210)
typedef signed long int32_t;
typedef unsigned long uint32_t;
#endif
#else
# include <inttypes.h>
#endif
extern "C" {
#include <mpeg2dec/mpeg2.h>
}
#ifdef USE_MPEG2_0_3_1
typedef int mpeg2_state_t;
typedef sequence_t mpeg2_sequence_t;
#define STATE_BUFFER -1
#endif
#endif
#define SHIFT 1
#define BITDEPTH (1 << (8 - SHIFT))
#define ROUNDADD (1 << (SHIFT - 1))
#define BUFFER_SIZE 4096
namespace Common {
class File;
}
class OSystem;
namespace Video {
class BaseAnimationState {
protected:
const int _movieWidth;
const int _movieHeight;
int _frameWidth;
int _frameHeight;
int _movieScale;
OSystem *_sys;
uint _frameNum;
#ifdef USE_MPEG2
mpeg2dec_t *_mpegDecoder;
const mpeg2_info_t *_mpegInfo;
#endif
Common::File *_mpegFile;
OverlayColor *_overlay;
Graphics::PixelFormat _overlayFormat;
int16 *_colorTab;
OverlayColor *_rgbToPix;
public:
BaseAnimationState(OSystem *sys, int width, int height);
virtual ~BaseAnimationState();
bool init(const char *name);
bool decodeFrame();
void handleScreenChanged();
void updateScreen();
void buildLookup();
int getFrameWidth() { return _frameWidth; }
int getFrameHeight() { return _frameHeight; }
protected:
bool checkPaletteSwitch();
virtual void drawYUV(int width, int height, byte *const *dat) = 0;
void plotYUV(int width, int height, byte *const *dat);
void plotYUV1x(int width, int height, byte *const *dat);
void plotYUV2x(int width, int height, byte *const *dat);
void plotYUV3x(int width, int height, byte *const *dat);
};
} // End of namespace Video
#endif

832
video/mpegps_decoder.cpp Normal file
View File

@ -0,0 +1,832 @@
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#include "audio/audiostream.h"
#include "audio/decoders/raw.h"
#include "common/endian.h"
#include "common/stream.h"
#include "common/system.h"
#include "common/textconsole.h"
#include "graphics/yuv_to_rgb.h"
#include "video/mpegps_decoder.h"
// The demuxing code is based on libav's demuxing code
namespace Video {
#define PACK_START_CODE 0x1BA
#define SYSTEM_HEADER_START_CODE 0x1BB
#define PROGRAM_STREAM_MAP 0x1BC
#define PRIVATE_STREAM_1 0x1BD
#define PADDING_STREAM 0x1BE
#define PRIVATE_STREAM_2 0x1BF
MPEGPSDecoder::MPEGPSDecoder() {
_stream = 0;
memset(_psmESType, 0, 256);
}
MPEGPSDecoder::~MPEGPSDecoder() {
close();
}
bool MPEGPSDecoder::loadStream(Common::SeekableReadStream *stream) {
close();
_stream = stream;
if (!addFirstVideoTrack()) {
close();
return false;
}
_stream->seek(0);
return true;
}
void MPEGPSDecoder::close() {
VideoDecoder::close();
delete _stream;
_stream = 0;
_streamMap.clear();
memset(_psmESType, 0, 256);
}
void MPEGPSDecoder::readNextPacket() {
if (_stream->eos())
return;
for (;;) {
int32 startCode;
uint32 pts, dts;
int size = readNextPacketHeader(startCode, pts, dts);
if (size < 0) {
// End of stream
for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
((MPEGVideoTrack *)*it)->setEndOfTrack();
return;
}
MPEGStream *stream = 0;
Common::SeekableReadStream *packet = _stream->readStream(size);
if (_streamMap.contains(startCode)) {
// We already found the stream
stream = _streamMap[startCode];
} else {
// We haven't seen this before
if (startCode == 0x1BD) {
// Private stream 1
PrivateStreamType streamType = detectPrivateStreamType(packet);
packet->seek(0);
switch (streamType) {
case kPrivateStreamPS2Audio: {
// PS2 Audio stream
PS2AudioTrack *audioTrack = new PS2AudioTrack(packet);
stream = audioTrack;
_streamMap[startCode] = audioTrack;
addTrack(audioTrack);
break;
}
default:
// Unknown (silently ignore)
break;
}
} if (startCode >= 0x1E0 && startCode <= 0x1EF) {
// Video stream
// TODO: Multiple video streams
} else if (startCode >= 0x1C0 && startCode <= 0x1DF) {
#ifdef USE_MAD
// MPEG Audio stream
MPEGAudioTrack *audioTrack = new MPEGAudioTrack(packet);
stream = audioTrack;
_streamMap[startCode] = audioTrack;
addTrack(audioTrack);
#endif
}
}
if (stream) {
bool done = stream->sendPacket(packet, pts, dts);
if (done && stream->getStreamType() == MPEGStream::kStreamTypeVideo)
return;
} else {
delete packet;
}
}
}
#define MAX_SYNC_SIZE 100000
int MPEGPSDecoder::findNextStartCode(uint32 &size) {
size = MAX_SYNC_SIZE;
int32 state = 0xFF;
while (size > 0) {
byte v = _stream->readByte();
if (_stream->eos())
return -1;
size--;
if (state == 0x1)
return ((state << 8) | v) & 0xFFFFFF;
state = ((state << 8) | v) & 0xFFFFFF;
}
return -1;
}
int MPEGPSDecoder::readNextPacketHeader(int32 &startCode, uint32 &pts, uint32 &dts) {
for (;;) {
uint32 size;
startCode = findNextStartCode(size);
if (_stream->eos())
return -1;
if (startCode < 0)
continue;
uint32 lastSync = _stream->pos();
if (startCode == PACK_START_CODE || startCode == SYSTEM_HEADER_START_CODE)
continue;
int length = _stream->readUint16BE();
if (startCode == PADDING_STREAM || startCode == PRIVATE_STREAM_2) {
_stream->skip(length);
continue;
}
if (startCode == PROGRAM_STREAM_MAP) {
parseProgramStreamMap(length);
continue;
}
// Find matching stream
if (!((startCode >= 0x1C0 && startCode <= 0x1DF) ||
(startCode >= 0x1E0 && startCode <= 0x1EF) ||
startCode == 0x1BD || startCode == 0x1FD))
continue;
// Stuffing
byte c;
for (;;) {
if (length < 1) {
_stream->seek(lastSync);
continue;
}
c = _stream->readByte();
length--;
// XXX: for mpeg1, should test only bit 7
if (c != 0xFF)
break;
}
if ((c & 0xC0) == 0x40) {
// Buffer scale and size
_stream->readByte();
c = _stream->readByte();
length -= 2;
}
pts = 0xFFFFFFFF;
dts = 0xFFFFFFFF;
if ((c & 0xE0) == 0x20) {
dts = pts = readPTS(c);
length -= 4;
if (c & 0x10) {
dts = readPTS(-1);
length -= 5;
}
} else if ((c & 0xC0) == 0x80) {
// MPEG-2 PES
byte flags = _stream->readByte();
int headerLength = _stream->readByte();
length -= 2;
if (headerLength > length) {
_stream->seek(lastSync);
continue;
}
length -= headerLength;
if (flags & 0x80) {
dts = pts = readPTS(-1);
headerLength -= 5;
if (flags & 0x40) {
dts = readPTS(-1);
headerLength -= 5;
}
}
if (flags & 0x3F && headerLength == 0) {
flags &= 0xC0;
warning("Further flags set but no bytes left");
}
if (flags & 0x01) { // PES extension
byte pesExt =_stream->readByte();
headerLength--;
// Skip PES private data, program packet sequence
int skip = (pesExt >> 4) & 0xB;
skip += skip & 0x9;
if (pesExt & 0x40 || skip > headerLength) {
warning("pesExt %x is invalid", pesExt);
pesExt = skip = 0;
} else {
_stream->skip(skip);
headerLength -= skip;
}
if (pesExt & 0x01) { // PES extension 2
byte ext2Length = _stream->readByte();
headerLength--;
if ((ext2Length & 0x7F) != 0) {
byte idExt = _stream->readByte();
if ((idExt & 0x80) == 0)
startCode = (startCode & 0xFF) << 8;
headerLength--;
}
}
}
if (headerLength < 0) {
_stream->seek(lastSync);
continue;
}
_stream->skip(headerLength);
} else if (c != 0xF) {
continue;
}
if (length < 0) {
_stream->seek(lastSync);
continue;
}
return length;
}
}
uint32 MPEGPSDecoder::readPTS(int c) {
byte buf[5];
buf[0] = (c < 0) ? _stream->readByte() : c;
_stream->read(buf + 1, 4);
return ((buf[0] & 0x0E) << 29) | ((READ_BE_UINT16(buf + 1) >> 1) << 15) | (READ_BE_UINT16(buf + 3) >> 1);
}
void MPEGPSDecoder::parseProgramStreamMap(int length) {
_stream->readByte();
_stream->readByte();
// skip program stream info
_stream->skip(_stream->readUint16BE());
int esMapLength = _stream->readUint16BE();
while (esMapLength >= 4) {
byte type = _stream->readByte();
byte esID = _stream->readByte();
uint16 esInfoLength = _stream->readUint16BE();
// Remember mapping from stream id to stream type
_psmESType[esID] = type;
// Skip program stream info
_stream->skip(esInfoLength);
esMapLength -= 4 + esInfoLength;
}
_stream->readUint32BE(); // CRC32
}
bool MPEGPSDecoder::addFirstVideoTrack() {
for (;;) {
int32 startCode;
uint32 pts, dts;
int size = readNextPacketHeader(startCode, pts, dts);
// End of stream? We failed
if (size < 0)
return false;
if (startCode >= 0x1E0 && startCode <= 0x1EF) {
// Video stream
// Can be MPEG-1/2 or MPEG-4/h.264. We'll assume the former and
// I hope we never need the latter.
Common::SeekableReadStream *firstPacket = _stream->readStream(size);
MPEGVideoTrack *track = new MPEGVideoTrack(firstPacket, getDefaultHighColorFormat());
addTrack(track);
_streamMap[startCode] = track;
delete firstPacket;
break;
}
_stream->skip(size);
}
return true;
}
MPEGPSDecoder::PrivateStreamType MPEGPSDecoder::detectPrivateStreamType(Common::SeekableReadStream *packet) {
packet->seek(4);
if (packet->readUint32BE() == MKTAG('S', 'S', 'h', 'd'))
return kPrivateStreamPS2Audio;
return kPrivateStreamUnknown;
}
MPEGPSDecoder::MPEGVideoTrack::MPEGVideoTrack(Common::SeekableReadStream *firstPacket, const Graphics::PixelFormat &format) {
_endOfTrack = false;
_curFrame = -1;
_nextFrameStartTime = Audio::Timestamp(0, 27000000); // 27 MHz timer
findDimensions(firstPacket, format);
#ifdef USE_MPEG2
_mpegDecoder = mpeg2_init();
if (!_mpegDecoder)
error("Could not initialize libmpeg2");
_mpegInfo = mpeg2_info(_mpegDecoder);
#endif
}
MPEGPSDecoder::MPEGVideoTrack::~MPEGVideoTrack() {
#ifdef USE_MPEG2
mpeg2_close(_mpegDecoder);
#endif
_surface->free();
delete _surface;
}
uint16 MPEGPSDecoder::MPEGVideoTrack::getWidth() const {
return _surface->w;
}
uint16 MPEGPSDecoder::MPEGVideoTrack::getHeight() const {
return _surface->h;
}
Graphics::PixelFormat MPEGPSDecoder::MPEGVideoTrack::getPixelFormat() const {
return _surface->format;
}
const Graphics::Surface *MPEGPSDecoder::MPEGVideoTrack::decodeNextFrame() {
return _surface;
}
bool MPEGPSDecoder::MPEGVideoTrack::sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts) {
#ifdef USE_MPEG2
// Decode as much as we can out of this packet
uint32 size = 0xFFFFFFFF;
mpeg2_state_t state;
bool foundFrame = false;
do {
state = mpeg2_parse(_mpegDecoder);
switch (state) {
case STATE_BUFFER:
size = packet->read(_buffer, BUFFER_SIZE);
mpeg2_buffer(_mpegDecoder, _buffer, _buffer + size);
break;
case STATE_SLICE:
case STATE_END:
foundFrame = true;
_curFrame++;
if (_mpegInfo->display_fbuf) {
const mpeg2_sequence_t *sequence = _mpegInfo->sequence;
_nextFrameStartTime = _nextFrameStartTime.addFrames(sequence->frame_period);
YUVToRGBMan.convert420(_surface, Graphics::YUVToRGBManager::kScaleITU, _mpegInfo->display_fbuf->buf[0],
_mpegInfo->display_fbuf->buf[1], _mpegInfo->display_fbuf->buf[2], sequence->picture_width,
sequence->picture_height, sequence->width, sequence->chroma_width);
}
break;
default:
break;
}
} while (size != 0);
#endif
delete packet;
#ifdef USE_MPEG2
return foundFrame;
#else
return true;
#endif
}
void MPEGPSDecoder::MPEGVideoTrack::findDimensions(Common::SeekableReadStream *firstPacket, const Graphics::PixelFormat &format) {
// First, check for the picture start code
if (firstPacket->readUint32BE() != 0x1B3)
error("Failed to detect MPEG sequence start");
// This is part of the bitstream, but there's really no purpose
// to use Common::BitStream just for this: 12 bits width, 12 bits
// height
uint16 width = firstPacket->readByte() << 4;
uint16 height = firstPacket->readByte();
width |= (height & 0xF0) >> 4;
height = ((height & 0xF) << 8) | firstPacket->readByte();
debug(0, "MPEG dimensions: %dx%d", width, height);
_surface = new Graphics::Surface();
_surface->create(width, height, format);
firstPacket->seek(0);
}
#ifdef USE_MAD
// The audio code here is almost entirely based on what we do in mp3.cpp
MPEGPSDecoder::MPEGAudioTrack::MPEGAudioTrack(Common::SeekableReadStream *firstPacket) {
// The MAD_BUFFER_GUARD must always contain zeros (the reason
// for this is that the Layer III Huffman decoder of libMAD
// may read a few bytes beyond the end of the input buffer).
memset(_buf + BUFFER_SIZE, 0, MAD_BUFFER_GUARD);
_state = MP3_STATE_INIT;
_audStream = 0;
// Find out our audio parameters
initStream(firstPacket);
while (_state != MP3_STATE_EOS)
readHeader(firstPacket);
_audStream = Audio::makeQueuingAudioStream(_frame.header.samplerate, MAD_NCHANNELS(&_frame.header) == 2);
deinitStream();
firstPacket->seek(0);
_state = MP3_STATE_INIT;
}
MPEGPSDecoder::MPEGAudioTrack::~MPEGAudioTrack() {
deinitStream();
delete _audStream;
}
static inline int scaleSample(mad_fixed_t sample) {
// round
sample += (1L << (MAD_F_FRACBITS - 16));
// clip
if (sample > MAD_F_ONE - 1)
sample = MAD_F_ONE - 1;
else if (sample < -MAD_F_ONE)
sample = -MAD_F_ONE;
// quantize and scale to not saturate when mixing a lot of channels
return sample >> (MAD_F_FRACBITS + 1 - 16);
}
bool MPEGPSDecoder::MPEGAudioTrack::sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts) {
while (_state != MP3_STATE_EOS)
decodeMP3Data(packet);
_state = MP3_STATE_READY;
delete packet;
return true;
}
Audio::AudioStream *MPEGPSDecoder::MPEGAudioTrack::getAudioStream() const {
return _audStream;
}
void MPEGPSDecoder::MPEGAudioTrack::initStream(Common::SeekableReadStream *packet) {
if (_state != MP3_STATE_INIT)
deinitStream();
// Init MAD
mad_stream_init(&_stream);
mad_frame_init(&_frame);
mad_synth_init(&_synth);
// Reset the stream data
packet->seek(0, SEEK_SET);
// Update state
_state = MP3_STATE_READY;
// Read the first few sample bytes
readMP3Data(packet);
}
void MPEGPSDecoder::MPEGAudioTrack::deinitStream() {
if (_state == MP3_STATE_INIT)
return;
// Deinit MAD
mad_synth_finish(&_synth);
mad_frame_finish(&_frame);
mad_stream_finish(&_stream);
_state = MP3_STATE_EOS;
}
void MPEGPSDecoder::MPEGAudioTrack::readMP3Data(Common::SeekableReadStream *packet) {
uint32 remaining = 0;
// Give up immediately if we already used up all data in the stream
if (packet->eos()) {
_state = MP3_STATE_EOS;
return;
}
if (_stream.next_frame) {
// If there is still data in the MAD stream, we need to preserve it.
// Note that we use memmove, as we are reusing the same buffer,
// and hence the data regions we copy from and to may overlap.
remaining = _stream.bufend - _stream.next_frame;
assert(remaining < BUFFER_SIZE); // Paranoia check
memmove(_buf, _stream.next_frame, remaining);
}
memset(_buf + remaining, 0, BUFFER_SIZE - remaining);
// Try to read the next block
uint32 size = packet->read(_buf + remaining, BUFFER_SIZE - remaining);
if (size == 0) {
_state = MP3_STATE_EOS;
return;
}
// Feed the data we just read into the stream decoder
_stream.error = MAD_ERROR_NONE;
mad_stream_buffer(&_stream, _buf, size + remaining);
}
void MPEGPSDecoder::MPEGAudioTrack::readHeader(Common::SeekableReadStream *packet) {
if (_state != MP3_STATE_READY)
return;
// If necessary, load more data into the stream decoder
if (_stream.error == MAD_ERROR_BUFLEN)
readMP3Data(packet);
while (_state != MP3_STATE_EOS) {
_stream.error = MAD_ERROR_NONE;
// Decode the next header. Note: mad_frame_decode would do this for us, too.
// However, for seeking we don't want to decode the full frame (else it would
// be far too slow). Hence we perform this explicitly in a separate step.
if (mad_header_decode(&_frame.header, &_stream) == -1) {
if (_stream.error == MAD_ERROR_BUFLEN) {
readMP3Data(packet); // Read more data
continue;
} else if (MAD_RECOVERABLE(_stream.error)) {
debug(6, "MPEGAudioTrack::readHeader(): Recoverable error in mad_header_decode (%s)", mad_stream_errorstr(&_stream));
continue;
} else {
warning("MPEGAudioTrack::readHeader(): Unrecoverable error in mad_header_decode (%s)", mad_stream_errorstr(&_stream));
break;
}
}
break;
}
if (_stream.error != MAD_ERROR_NONE)
_state = MP3_STATE_EOS;
}
void MPEGPSDecoder::MPEGAudioTrack::decodeMP3Data(Common::SeekableReadStream *packet) {
if (_state == MP3_STATE_INIT)
initStream(packet);
if (_state == MP3_STATE_EOS)
return;
do {
// If necessary, load more data into the stream decoder
if (_stream.error == MAD_ERROR_BUFLEN)
readMP3Data(packet);
while (_state == MP3_STATE_READY) {
_stream.error = MAD_ERROR_NONE;
// Decode the next frame
if (mad_frame_decode(&_frame, &_stream) == -1) {
if (_stream.error == MAD_ERROR_BUFLEN) {
break; // Read more data
} else if (MAD_RECOVERABLE(_stream.error)) {
// Note: we will occasionally see MAD_ERROR_BADDATAPTR errors here.
// These are normal and expected (caused by our frame skipping (i.e. "seeking")
// code above).
debug(6, "MPEGAudioTrack::decodeMP3Data(): Recoverable error in mad_frame_decode (%s)", mad_stream_errorstr(&_stream));
continue;
} else {
warning("MPEGAudioTrack::decodeMP3Data(): Unrecoverable error in mad_frame_decode (%s)", mad_stream_errorstr(&_stream));
break;
}
}
// Synthesize PCM data
mad_synth_frame(&_synth, &_frame);
// Output it to our queue
if (_synth.pcm.length != 0) {
byte *buffer = (byte *)malloc(_synth.pcm.length * 2 * MAD_NCHANNELS(&_frame.header));
int16 *ptr = (int16 *)buffer;
for (int i = 0; i < _synth.pcm.length; i++) {
*ptr++ = (int16)scaleSample(_synth.pcm.samples[0][i]);
if (MAD_NCHANNELS(&_frame.header) == 2)
*ptr++ = (int16)scaleSample(_synth.pcm.samples[1][i]);
}
int flags = Audio::FLAG_16BITS;
if (_audStream->isStereo())
flags |= Audio::FLAG_STEREO;
#ifdef SCUMM_LITTLE_ENDIAN
flags |= Audio::FLAG_LITTLE_ENDIAN;
#endif
_audStream->queueBuffer(buffer, _synth.pcm.length * 2 * MAD_NCHANNELS(&_frame.header), DisposeAfterUse::YES, flags);
}
break;
}
} while (_state != MP3_STATE_EOS && _stream.error == MAD_ERROR_BUFLEN);
if (_stream.error != MAD_ERROR_NONE)
_state = MP3_STATE_EOS;
}
#endif
MPEGPSDecoder::PS2AudioTrack::PS2AudioTrack(Common::SeekableReadStream *firstPacket) {
firstPacket->seek(12); // unknown data (4), 'SShd', header size (4)
_soundType = firstPacket->readUint32LE();
if (_soundType == PS2_ADPCM)
error("Unhandled PS2 ADPCM sound in MPEG-PS video");
else if (_soundType != PS2_PCM)
error("Unknown PS2 sound type %x", _soundType);
uint32 sampleRate = firstPacket->readUint32LE();
_channels = firstPacket->readUint32LE();
_interleave = firstPacket->readUint32LE();
_blockBuffer = new byte[_interleave * _channels];
_blockPos = _blockUsed = 0;
_audStream = Audio::makeQueuingAudioStream(sampleRate, _channels == 2);
_isFirstPacket = true;
firstPacket->seek(0);
}
MPEGPSDecoder::PS2AudioTrack::~PS2AudioTrack() {
delete[] _blockBuffer;
delete _audStream;
}
bool MPEGPSDecoder::PS2AudioTrack::sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts) {
packet->skip(4);
if (_isFirstPacket) {
// Skip over the header which we already parsed
packet->skip(4);
packet->skip(packet->readUint32LE());
if (packet->readUint32BE() != MKTAG('S', 'S', 'b', 'd'))
error("Failed to find 'SSbd' tag");
packet->readUint32LE(); // body size
_isFirstPacket = false;
}
uint32 size = packet->size() - packet->pos();
uint32 bytesPerChunk = _interleave * _channels;
uint32 sampleCount = calculateSampleCount(size);
byte *buffer = (byte *)malloc(sampleCount * 2);
int16 *ptr = (int16 *)buffer;
// Handle any full chunks first
while (size >= bytesPerChunk) {
packet->read(_blockBuffer + _blockPos, bytesPerChunk - _blockPos);
size -= bytesPerChunk - _blockPos;
_blockPos = 0;
for (uint32 i = _blockUsed; i < _interleave / 2; i++)
for (uint32 j = 0; j < _channels; j++)
*ptr++ = READ_UINT16(_blockBuffer + i * 2 + j * _interleave);
_blockUsed = 0;
}
// Then fallback on loading any leftover
if (size > 0) {
packet->read(_blockBuffer, size);
_blockPos = size;
if (size > (_channels - 1) * _interleave) {
_blockUsed = (size - (_channels - 1) * _interleave) / 2;
for (uint32 i = 0; i < _blockUsed; i++)
for (uint32 j = 0; j < _channels; j++)
*ptr++ = READ_UINT16(_blockBuffer + i * 2 + j * _interleave);
}
}
byte flags = Audio::FLAG_16BITS | Audio::FLAG_LITTLE_ENDIAN;
if (_audStream->isStereo())
flags |= Audio::FLAG_STEREO;
_audStream->queueBuffer((byte *)buffer, sampleCount * 2, DisposeAfterUse::YES, flags);
delete packet;
return true;
}
Audio::AudioStream *MPEGPSDecoder::PS2AudioTrack::getAudioStream() const {
return _audStream;
}
uint32 MPEGPSDecoder::PS2AudioTrack::calculateSampleCount(uint32 packetSize) const {
uint32 bytesPerChunk = _interleave * _channels, result = 0;
// If we have a partial block, subtract the remainder from the size. That
// gets put towards reading the partial block
if (_blockPos != 0) {
packetSize -= bytesPerChunk - _blockPos;
result += (_interleave / 2) - _blockUsed;
}
// Round the number of whole chunks down and then calculate how many samples that gives us
result += (packetSize / bytesPerChunk) * _interleave / 2;
// Total up anything we can get from the remainder
packetSize %= bytesPerChunk;
if (packetSize > (_channels - 1) * _interleave)
result += (packetSize - (_channels - 1) * _interleave) / 2;
return result * _channels;
}
} // End of namespace Video

243
video/mpegps_decoder.h Normal file
View File

@ -0,0 +1,243 @@
/* ScummVM - Graphic Adventure Engine
*
* ScummVM is the legal property of its developers, whose names
* are too numerous to list here. Please refer to the COPYRIGHT
* file distributed with this source distribution.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*/
#ifndef VIDEO_MPEG_DECODER_H
#define VIDEO_MPEG_DECODER_H
#include "common/hashmap.h"
#include "common/rational.h"
#include "common/rect.h"
#include "common/str.h"
#include "graphics/surface.h"
#include "video/video_decoder.h"
#ifdef USE_MAD
#include <mad.h>
#endif
#ifdef USE_MPEG2
#if defined(__PLAYSTATION2__)
typedef uint8 uint8_t;
typedef uint16 uint16_t;
typedef uint32 uint32_t;
#elif defined(_WIN32_WCE)
typedef signed char int8_t;
typedef signed short int16_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
#elif defined(_MSC_VER)
typedef signed char int8_t;
typedef signed short int16_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
#if !defined(SDL_COMPILEDVERSION) || (SDL_COMPILEDVERSION < 1210)
typedef signed long int32_t;
typedef unsigned long uint32_t;
#endif
#else
# include <inttypes.h>
#endif
extern "C" {
#include <mpeg2dec/mpeg2.h>
}
#endif
namespace Audio {
class QueuingAudioStream;
}
namespace Common {
class BitStream;
class Huffman;
class SeekableReadStream;
}
namespace Graphics {
struct PixelFormat;
}
namespace Video {
class MPEGPSDecoder : public VideoDecoder {
public:
MPEGPSDecoder();
virtual ~MPEGPSDecoder();
bool loadStream(Common::SeekableReadStream *stream);
void close();
protected:
void readNextPacket();
bool useAudioSync() const { return false; }
private:
class MPEGStream {
public:
virtual ~MPEGStream() {}
enum StreamType {
kStreamTypeVideo,
kStreamTypeAudio
};
virtual bool sendPacket(Common::SeekableReadStream *firstPacket, uint32 pts, uint32 dts) = 0;
virtual StreamType getStreamType() const = 0;
};
class MPEGVideoTrack : public VideoTrack, public MPEGStream {
public:
MPEGVideoTrack(Common::SeekableReadStream *firstPacket, const Graphics::PixelFormat &format);
~MPEGVideoTrack();
bool endOfTrack() const { return _endOfTrack; }
uint16 getWidth() const;
uint16 getHeight() const;
Graphics::PixelFormat getPixelFormat() const;
int getCurFrame() const { return _curFrame; }
uint32 getNextFrameStartTime() const { return _nextFrameStartTime.msecs(); }
const Graphics::Surface *decodeNextFrame();
bool sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts);
StreamType getStreamType() const { return kStreamTypeVideo; }
void setEndOfTrack() { _endOfTrack = true; }
private:
bool _endOfTrack;
int _curFrame;
Audio::Timestamp _nextFrameStartTime;
Graphics::Surface *_surface;
void findDimensions(Common::SeekableReadStream *firstPacket, const Graphics::PixelFormat &format);
#ifdef USE_MPEG2
enum {
BUFFER_SIZE = 4096
};
byte _buffer[BUFFER_SIZE];
mpeg2dec_t *_mpegDecoder;
const mpeg2_info_t *_mpegInfo;
bool _hasData;
#endif
};
#ifdef USE_MAD
class MPEGAudioTrack : public AudioTrack, public MPEGStream {
public:
MPEGAudioTrack(Common::SeekableReadStream *firstPacket);
~MPEGAudioTrack();
bool sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts);
StreamType getStreamType() const { return kStreamTypeAudio; }
protected:
Audio::AudioStream *getAudioStream() const;
private:
Audio::QueuingAudioStream *_audStream;
enum State {
MP3_STATE_INIT, // Need to init the decoder
MP3_STATE_READY, // ready for processing data
MP3_STATE_EOS // end of data reached (may need to loop)
};
State _state;
mad_stream _stream;
mad_frame _frame;
mad_synth _synth;
enum {
BUFFER_SIZE = 5 * 8192
};
// This buffer contains a slab of input data
byte _buf[BUFFER_SIZE + MAD_BUFFER_GUARD];
void initStream(Common::SeekableReadStream *packet);
void deinitStream();
void readMP3Data(Common::SeekableReadStream *packet);
void readHeader(Common::SeekableReadStream *packet);
void decodeMP3Data(Common::SeekableReadStream *packet);
};
#endif
class PS2AudioTrack : public AudioTrack, public MPEGStream {
public:
PS2AudioTrack(Common::SeekableReadStream *firstPacket);
~PS2AudioTrack();
bool sendPacket(Common::SeekableReadStream *packet, uint32 pts, uint32 dts);
StreamType getStreamType() const { return kStreamTypeAudio; }
protected:
Audio::AudioStream *getAudioStream() const;
private:
Audio::QueuingAudioStream *_audStream;
enum {
PS2_PCM = 0x01,
PS2_ADPCM = 0x10
};
uint32 _channels;
uint32 _soundType;
uint32 _interleave;
bool _isFirstPacket;
byte *_blockBuffer;
uint32 _blockPos, _blockUsed;
uint32 calculateSampleCount(uint32 packetSize) const;
};
enum PrivateStreamType {
kPrivateStreamUnknown,
kPrivateStreamPS2Audio
};
bool addFirstVideoTrack();
int readNextPacketHeader(int32 &startCode, uint32 &pts, uint32 &dts);
int findNextStartCode(uint32 &size);
uint32 readPTS(int c);
void parseProgramStreamMap(int length);
byte _psmESType[256];
PrivateStreamType detectPrivateStreamType(Common::SeekableReadStream *packet);
typedef Common::HashMap<int, MPEGStream *> StreamMap;
StreamMap _streamMap;
Common::SeekableReadStream *_stream;
};
} // End of namespace Video
#endif

View File

@ -22,6 +22,7 @@
#include "video/video_decoder.h"
#include "audio/audiostream.h"
#include "audio/mixer.h" // for kMaxChannelVolume
#include "common/rational.h"
@ -33,7 +34,45 @@
namespace Video {
VideoDecoder::VideoDecoder() {
reset();
_startTime = 0;
_dirtyPalette = false;
_palette = 0;
_playbackRate = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
_pauseLevel = 0;
_needsUpdate = false;
_lastTimeChange = 0;
_endTime = 0;
_endTimeSet = false;
_nextVideoTrack = 0;
// Find the best format for output
_defaultHighColorFormat = g_system->getScreenFormat();
if (_defaultHighColorFormat.bytesPerPixel == 1)
_defaultHighColorFormat = Graphics::PixelFormat(4, 8, 8, 8, 8, 8, 16, 24, 0);
}
void VideoDecoder::close() {
if (isPlaying())
stop();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
delete *it;
_tracks.clear();
_dirtyPalette = false;
_palette = 0;
_startTime = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
_pauseLevel = 0;
_needsUpdate = false;
_lastTimeChange = 0;
_endTime = 0;
_endTimeSet = false;
_nextVideoTrack = 0;
}
bool VideoDecoder::loadFile(const Common::String &filename) {
@ -47,28 +86,8 @@ bool VideoDecoder::loadFile(const Common::String &filename) {
return loadStream(file);
}
uint32 VideoDecoder::getTime() const {
return g_system->getMillis() - _startTime;
}
void VideoDecoder::setSystemPalette() {
g_system->getPaletteManager()->setPalette(getPalette(), 0, 256);
}
bool VideoDecoder::needsUpdate() const {
return !endOfVideo() && getTimeToNextFrame() == 0;
}
void VideoDecoder::reset() {
_curFrame = -1;
_startTime = 0;
_pauseLevel = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
}
bool VideoDecoder::endOfVideo() const {
return !isVideoLoaded() || (getCurFrame() >= (int32)getFrameCount() - 1);
return hasFramesLeft() && getTimeToNextFrame() == 0;
}
void VideoDecoder::pauseVideo(bool pause) {
@ -86,10 +105,14 @@ void VideoDecoder::pauseVideo(bool pause) {
if (_pauseLevel == 1 && pause) {
_pauseStartTime = g_system->getMillis(); // Store the starting time from pausing to keep it for later
pauseVideoIntern(true);
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(true);
} else if (_pauseLevel == 0) {
pauseVideoIntern(false);
addPauseTime(g_system->getMillis() - _pauseStartTime);
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(false);
_startTime += (g_system->getMillis() - _pauseStartTime);
}
}
@ -100,33 +123,637 @@ void VideoDecoder::resetPauseStartTime() {
void VideoDecoder::setVolume(byte volume) {
_audioVolume = volume;
updateVolume();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->setVolume(_audioVolume);
}
void VideoDecoder::setBalance(int8 balance) {
_audioBalance = balance;
updateBalance();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->setBalance(_audioBalance);
}
uint32 FixedRateVideoDecoder::getTimeToNextFrame() const {
if (endOfVideo() || _curFrame < 0)
bool VideoDecoder::isVideoLoaded() const {
return !_tracks.empty();
}
uint16 VideoDecoder::getWidth() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getWidth();
return 0;
}
uint16 VideoDecoder::getHeight() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getHeight();
return 0;
}
Graphics::PixelFormat VideoDecoder::getPixelFormat() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getPixelFormat();
return Graphics::PixelFormat();
}
const Graphics::Surface *VideoDecoder::decodeNextFrame() {
_needsUpdate = false;
readNextPacket();
// If we have no next video track at this point, there shouldn't be
// any frame available for us to display.
if (!_nextVideoTrack)
return 0;
const Graphics::Surface *frame = _nextVideoTrack->decodeNextFrame();
if (_nextVideoTrack->hasDirtyPalette()) {
_palette = _nextVideoTrack->getPalette();
_dirtyPalette = true;
}
// Look for the next video track here for the next decode.
findNextVideoTrack();
return frame;
}
const byte *VideoDecoder::getPalette() {
_dirtyPalette = false;
return _palette;
}
int VideoDecoder::getCurFrame() const {
int32 frame = -1;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
frame += ((VideoTrack *)*it)->getCurFrame() + 1;
return frame;
}
uint32 VideoDecoder::getFrameCount() const {
int count = 0;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
count += ((VideoTrack *)*it)->getFrameCount();
return count;
}
uint32 VideoDecoder::getTime() const {
if (!isPlaying())
return _lastTimeChange.msecs();
if (isPaused())
return (_playbackRate * (_pauseStartTime - _startTime)).toInt();
if (useAudioSync()) {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if ((*it)->getTrackType() == Track::kTrackTypeAudio && !(*it)->endOfTrack()) {
uint32 time = ((const AudioTrack *)*it)->getRunningTime();
if (time != 0)
return time + _lastTimeChange.msecs();
}
}
}
return (_playbackRate * (g_system->getMillis() - _startTime)).toInt();
}
uint32 VideoDecoder::getTimeToNextFrame() const {
if (endOfVideo() || _needsUpdate || !_nextVideoTrack)
return 0;
uint32 elapsedTime = getTime();
uint32 nextFrameStartTime = getFrameBeginTime(_curFrame + 1);
uint32 nextFrameStartTime = _nextVideoTrack->getNextFrameStartTime();
// If the time that the next frame should be shown has past
// the frame should be shown ASAP.
if (nextFrameStartTime <= elapsedTime)
return 0;
return nextFrameStartTime - elapsedTime;
}
uint32 FixedRateVideoDecoder::getFrameBeginTime(uint32 frame) const {
Common::Rational beginTime = frame * 1000;
beginTime /= getFrameRate();
return beginTime.toInt();
bool VideoDecoder::endOfVideo() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->endOfTrack() && (!isPlaying() || (*it)->getTrackType() != Track::kTrackTypeVideo || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
return false;
return true;
}
bool VideoDecoder::isRewindable() const {
if (!isVideoLoaded())
return false;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->isRewindable())
return false;
return true;
}
bool VideoDecoder::rewind() {
if (!isRewindable())
return false;
// Stop all tracks so they can be rewound
if (isPlaying())
stopAudio();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->rewind())
return false;
// Now that we've rewound, start all tracks again
if (isPlaying())
startAudio();
_lastTimeChange = 0;
_startTime = g_system->getMillis();
resetPauseStartTime();
findNextVideoTrack();
return true;
}
bool VideoDecoder::isSeekable() const {
if (!isVideoLoaded())
return false;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->isSeekable())
return false;
return true;
}
bool VideoDecoder::seek(const Audio::Timestamp &time) {
if (!isSeekable())
return false;
// Stop all tracks so they can be seeked
if (isPlaying())
stopAudio();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->seek(time))
return false;
_lastTimeChange = time;
// Now that we've seeked, start all tracks again
// Also reset our start time
if (isPlaying()) {
startAudio();
_startTime = g_system->getMillis() - time.msecs();
}
resetPauseStartTime();
findNextVideoTrack();
_needsUpdate = true;
return true;
}
bool VideoDecoder::seekToFrame(uint frame) {
VideoTrack *track = 0;
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if (!(*it)->isSeekable())
return false;
if ((*it)->getTrackType() == Track::kTrackTypeVideo) {
// We only allow seeking by frame when one video track
// is present
if (track)
return false;
track = (VideoTrack *)*it;
}
}
// If we didn't find a video track, we can't seek by frame (of course)
if (!track)
return false;
Audio::Timestamp time = track->getFrameTime(frame);
if (time < 0)
return false;
return seek(time);
}
void VideoDecoder::start() {
if (!isPlaying())
setRate(1);
}
void VideoDecoder::stop() {
if (!isPlaying())
return;
// Stop audio here so we don't have it affect getTime()
stopAudio();
// Keep the time marked down in case we start up again
// We do this before _playbackRate is set so we don't get
// _lastTimeChange returned, but before _pauseLevel is
// reset.
_lastTimeChange = getTime();
_playbackRate = 0;
_startTime = 0;
_palette = 0;
_dirtyPalette = false;
_needsUpdate = false;
// Also reset the pause state.
_pauseLevel = 0;
// Reset the pause state of the tracks too
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(false);
}
void VideoDecoder::setRate(const Common::Rational &rate) {
if (!isVideoLoaded() || _playbackRate == rate)
return;
if (rate == 0) {
stop();
return;
} else if (rate != 1 && hasAudio()) {
warning("Cannot set custom rate in videos with audio");
return;
}
Common::Rational targetRate = rate;
if (rate < 0) {
// TODO: Implement support for this
warning("Cannot set custom rate to backwards");
targetRate = 1;
if (_playbackRate == targetRate)
return;
}
if (_playbackRate != 0)
_lastTimeChange = getTime();
_playbackRate = targetRate;
_startTime = g_system->getMillis();
// Adjust start time if we've seeked to something besides zero time
if (_lastTimeChange.totalNumberOfFrames() != 0)
_startTime -= (_lastTimeChange.msecs() / _playbackRate).toInt();
startAudio();
}
bool VideoDecoder::isPlaying() const {
return _playbackRate != 0;
}
Audio::Timestamp VideoDecoder::getDuration() const {
Audio::Timestamp maxDuration(0, 1000);
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) {
Audio::Timestamp duration = (*it)->getDuration();
if (duration > maxDuration)
maxDuration = duration;
}
return maxDuration;
}
VideoDecoder::Track::Track() {
_paused = false;
}
bool VideoDecoder::Track::isRewindable() const {
return isSeekable();
}
bool VideoDecoder::Track::rewind() {
return seek(Audio::Timestamp(0, 1000));
}
void VideoDecoder::Track::pause(bool shouldPause) {
_paused = shouldPause;
pauseIntern(shouldPause);
}
Audio::Timestamp VideoDecoder::Track::getDuration() const {
return Audio::Timestamp(0, 1000);
}
bool VideoDecoder::VideoTrack::endOfTrack() const {
return getCurFrame() >= (getFrameCount() - 1);
}
Audio::Timestamp VideoDecoder::VideoTrack::getFrameTime(uint frame) const {
// Default implementation: Return an invalid (negative) number
return Audio::Timestamp().addFrames(-1);
}
uint32 VideoDecoder::FixedRateVideoTrack::getNextFrameStartTime() const {
if (endOfTrack() || getCurFrame() < 0)
return 0;
return getFrameTime(getCurFrame() + 1).msecs();
}
Audio::Timestamp VideoDecoder::FixedRateVideoTrack::getFrameTime(uint frame) const {
// Try to get as accurate as possible, considering we have a fractional frame rate
// (which Audio::Timestamp doesn't support).
Common::Rational frameRate = getFrameRate();
if (frameRate == frameRate.toInt()) // The nice case (a whole number)
return Audio::Timestamp(0, frame, frameRate.toInt());
// Just convert to milliseconds.
Common::Rational time = frame * 1000;
time /= frameRate;
return Audio::Timestamp(time.toInt(), 1000);
}
uint VideoDecoder::FixedRateVideoTrack::getFrameAtTime(const Audio::Timestamp &time) const {
Common::Rational frameRate = getFrameRate();
// Easy conversion
if (frameRate == time.framerate())
return time.totalNumberOfFrames();
// Default case
return (time.totalNumberOfFrames() * frameRate / time.framerate()).toInt();
}
Audio::Timestamp VideoDecoder::FixedRateVideoTrack::getDuration() const {
return getFrameTime(getFrameCount());
}
bool VideoDecoder::AudioTrack::endOfTrack() const {
Audio::AudioStream *stream = getAudioStream();
return !stream || !g_system->getMixer()->isSoundHandleActive(_handle) || stream->endOfData();
}
void VideoDecoder::AudioTrack::setVolume(byte volume) {
_volume = volume;
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->setChannelVolume(_handle, _volume);
}
void VideoDecoder::AudioTrack::setBalance(int8 balance) {
_balance = balance;
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->setChannelBalance(_handle, _balance);
}
void VideoDecoder::AudioTrack::start() {
stop();
Audio::AudioStream *stream = getAudioStream();
assert(stream);
g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::NO);
// Pause the audio again if we're still paused
if (isPaused())
g_system->getMixer()->pauseHandle(_handle, true);
}
void VideoDecoder::AudioTrack::stop() {
g_system->getMixer()->stopHandle(_handle);
}
void VideoDecoder::AudioTrack::start(const Audio::Timestamp &limit) {
stop();
Audio::AudioStream *stream = getAudioStream();
assert(stream);
stream = Audio::makeLimitingAudioStream(stream, limit, DisposeAfterUse::NO);
g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::YES);
// Pause the audio again if we're still paused
if (isPaused())
g_system->getMixer()->pauseHandle(_handle, true);
}
uint32 VideoDecoder::AudioTrack::getRunningTime() const {
if (g_system->getMixer()->isSoundHandleActive(_handle))
return g_system->getMixer()->getSoundElapsedTime(_handle);
return 0;
}
void VideoDecoder::AudioTrack::pauseIntern(bool shouldPause) {
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->pauseHandle(_handle, shouldPause);
}
Audio::AudioStream *VideoDecoder::RewindableAudioTrack::getAudioStream() const {
return getRewindableAudioStream();
}
bool VideoDecoder::RewindableAudioTrack::rewind() {
Audio::RewindableAudioStream *stream = getRewindableAudioStream();
assert(stream);
return stream->rewind();
}
Audio::Timestamp VideoDecoder::SeekableAudioTrack::getDuration() const {
Audio::SeekableAudioStream *stream = getSeekableAudioStream();
assert(stream);
return stream->getLength();
}
Audio::AudioStream *VideoDecoder::SeekableAudioTrack::getAudioStream() const {
return getSeekableAudioStream();
}
bool VideoDecoder::SeekableAudioTrack::seek(const Audio::Timestamp &time) {
Audio::SeekableAudioStream *stream = getSeekableAudioStream();
assert(stream);
return stream->seek(time);
}
VideoDecoder::StreamFileAudioTrack::StreamFileAudioTrack() {
_stream = 0;
}
VideoDecoder::StreamFileAudioTrack::~StreamFileAudioTrack() {
delete _stream;
}
bool VideoDecoder::StreamFileAudioTrack::loadFromFile(const Common::String &baseName) {
// TODO: Make sure the stream isn't being played
delete _stream;
_stream = Audio::SeekableAudioStream::openStreamFile(baseName);
return _stream != 0;
}
void VideoDecoder::addTrack(Track *track) {
_tracks.push_back(track);
if (track->getTrackType() == Track::kTrackTypeAudio) {
// Update volume settings if it's an audio track
((AudioTrack *)track)->setVolume(_audioVolume);
((AudioTrack *)track)->setBalance(_audioBalance);
} else if (track->getTrackType() == Track::kTrackTypeVideo) {
// If this track has a better time, update _nextVideoTrack
if (!_nextVideoTrack || ((VideoTrack *)track)->getNextFrameStartTime() < _nextVideoTrack->getNextFrameStartTime())
_nextVideoTrack = (VideoTrack *)track;
}
// Keep the track paused if we're paused
if (isPaused())
track->pause(true);
// Start the track if we're playing
if (isPlaying() && track->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)track)->start();
}
bool VideoDecoder::addStreamFileTrack(const Common::String &baseName) {
// Only allow adding external tracks if a video is already loaded
if (!isVideoLoaded())
return false;
StreamFileAudioTrack *track = new StreamFileAudioTrack();
bool result = track->loadFromFile(baseName);
if (result)
addTrack(track);
return result;
}
void VideoDecoder::setEndTime(const Audio::Timestamp &endTime) {
Audio::Timestamp startTime = 0;
if (isPlaying()) {
startTime = getTime();
stopAudio();
}
_endTime = endTime;
_endTimeSet = true;
if (startTime > endTime)
return;
if (isPlaying()) {
// We'll assume the audio track is going to start up at the same time it just was
// and therefore not do any seeking.
// Might want to set it anyway if we're seekable.
startAudioLimit(_endTime.msecs() - startTime.msecs());
_lastTimeChange = startTime;
}
}
VideoDecoder::Track *VideoDecoder::getTrack(uint track) {
if (track > _tracks.size())
return 0;
return _tracks[track];
}
const VideoDecoder::Track *VideoDecoder::getTrack(uint track) const {
if (track > _tracks.size())
return 0;
return _tracks[track];
}
bool VideoDecoder::endOfVideoTracks() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack())
return false;
return true;
}
VideoDecoder::VideoTrack *VideoDecoder::findNextVideoTrack() {
_nextVideoTrack = 0;
uint32 bestTime = 0xFFFFFFFF;
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) {
VideoTrack *track = (VideoTrack *)*it;
uint32 time = track->getNextFrameStartTime();
if (time < bestTime) {
bestTime = time;
_nextVideoTrack = track;
}
}
}
return _nextVideoTrack;
}
void VideoDecoder::startAudio() {
if (_endTimeSet) {
// HACK: Timestamp's subtraction asserts out when subtracting two times
// with different rates.
startAudioLimit(_endTime - _lastTimeChange.convertToFramerate(_endTime.framerate()));
return;
}
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->start();
}
void VideoDecoder::stopAudio() {
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->stop();
}
void VideoDecoder::startAudioLimit(const Audio::Timestamp &limit) {
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->start(limit);
}
bool VideoDecoder::hasFramesLeft() const {
// This is similar to endOfVideo(), except it doesn't take Audio into account (and returns true if not the end of the video)
// This is only used for needsUpdate() atm so that setEndTime() works properly
// And unlike endOfVideoTracks(), this takes into account _endTime
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack() && (!isPlaying() || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
return true;
return false;
}
bool VideoDecoder::hasAudio() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
return true;
return false;
}
} // End of namespace Video

View File

@ -23,18 +23,24 @@
#ifndef VIDEO_DECODER_H
#define VIDEO_DECODER_H
#include "common/str.h"
#include "audio/mixer.h"
#include "audio/timestamp.h" // TODO: Move this to common/ ?
#include "common/array.h"
#include "common/rational.h"
#include "common/str.h"
#include "graphics/pixelformat.h"
namespace Audio {
class AudioStream;
class RewindableAudioStream;
class SeekableAudioStream;
}
namespace Common {
class Rational;
class SeekableReadStream;
}
namespace Graphics {
struct PixelFormat;
struct Surface;
}
@ -48,10 +54,14 @@ public:
VideoDecoder();
virtual ~VideoDecoder() {}
/////////////////////////////////////////
// Opening/Closing a Video
/////////////////////////////////////////
/**
* Load a video from a file with the given name.
*
* A default implementation using loadStream is provided.
* A default implementation using Common::File and loadStream is provided.
*
* @param filename the filename to load
* @return whether loading the file succeeded
@ -62,6 +72,10 @@ public:
* Load a video from a generic read stream. The ownership of the
* stream object transfers to this VideoDecoder instance, which is
* hence also responsible for eventually deleting it.
*
* Implementations of this function are required to call addTrack()
* for each track in the video upon success.
*
* @param stream the stream to load
* @return whether loading the stream succeeded
*/
@ -69,103 +83,107 @@ public:
/**
* Close the active video stream and free any associated resources.
*
* All subclasses that need to close their own resources should still
* call the base class' close() function at the start of their function.
*/
virtual void close() = 0;
virtual void close();
/**
* Returns if a video stream is currently loaded or not.
*/
virtual bool isVideoLoaded() const = 0;
bool isVideoLoaded() const;
/////////////////////////////////////////
// Playback Control
/////////////////////////////////////////
/**
* Returns the width of the video's frames.
* @return the width of the video's frames
*/
virtual uint16 getWidth() const = 0;
/**
* Returns the height of the video's frames.
* @return the height of the video's frames
*/
virtual uint16 getHeight() const = 0;
/**
* Get the pixel format of the currently loaded video.
*/
virtual Graphics::PixelFormat getPixelFormat() const = 0;
/**
* Get the palette for the video in RGB format (if 8bpp or less).
*/
virtual const byte *getPalette() { return 0; }
/**
* Returns if the palette is dirty or not.
*/
virtual bool hasDirtyPalette() const { return false; }
/**
* Set the system palette to the palette returned by getPalette.
* @see getPalette
*/
void setSystemPalette();
/**
* Returns the current frame number of the video.
* @return the last frame decoded by the video
*/
virtual int32 getCurFrame() const { return _curFrame; }
/**
* Returns the number of frames in the video.
* @return the number of frames in the video
*/
virtual uint32 getFrameCount() const = 0;
/**
* Returns the time position (in ms) of the current video.
* This can be based on the "wall clock" time as determined by
* OSystem::getMillis() or the current time of any audio track
* running in the video, and takes pausing the video into account.
* Begin playback of the video at normal speed.
*
* As such, it will differ from what multiplying getCurFrame() by
* some constant would yield, e.g. for a video with non-constant
* frame rate.
* @note This has no effect if the video is already playing.
*/
void start();
/**
* Stop playback of the video.
*
* Due to the nature of the timing, this value may not always be
* completely accurate (since our mixer does not have precise
* timing).
* @note This has no effect if the video is not playing.
*/
virtual uint32 getTime() const;
void stop();
/**
* Return the time (in ms) until the next frame should be displayed.
* Set the rate of playback.
*
* For instance, a rate of 0 would stop the video, while a rate of 1
* would play the video normally. Passing 2 to this function would
* play the video at twice the normal speed.
*
* @note This function does not work for non-0/1 rates on videos that
* have audio tracks.
*
* @todo This currently does not implement backwards playback, but will
* be implemented soon.
*/
virtual uint32 getTimeToNextFrame() const = 0;
void setRate(const Common::Rational &rate);
/**
* Check whether a new frame should be decoded, i.e. because enough
* time has elapsed since the last frame was decoded.
* @return whether a new frame should be decoded or not
* Returns the rate at which the video is being played.
*/
virtual bool needsUpdate() const;
Common::Rational getRate() const { return _playbackRate; }
/**
* Decode the next frame into a surface and return the latter.
* @return a surface containing the decoded frame, or 0
* @note Ownership of the returned surface stays with the VideoDecoder,
* hence the caller must *not* free it.
* @note this may return 0, in which case the last frame should be kept on screen
* Returns if the video is currently playing or not.
*
* This is not equivalent to the inverse of endOfVideo(). A video keeps
* its playing status even after reaching the end of the video. This will
* return true after calling start() and will continue to return true
* until stop() (or close()) is called.
*/
virtual const Graphics::Surface *decodeNextFrame() = 0;
bool isPlaying() const;
/**
* Returns if the video has finished playing or not.
* @return true if the video has finished playing or if none is loaded, false otherwise
* Returns if a video is rewindable or not. The default implementation
* polls each track for rewindability.
*/
virtual bool endOfVideo() const;
virtual bool isRewindable() const;
/**
* Rewind a video to its beginning.
*
* If the video is playing, it will continue to play. The default
* implementation will rewind each track.
*
* @return true on success, false otherwise
*/
virtual bool rewind();
/**
* Returns if a video is seekable or not. The default implementation
* polls each track for seekability.
*/
virtual bool isSeekable() const;
/**
* Seek to a given time in the video.
*
* If the video is playing, it will continue to play. The default
* implementation will seek each track and must still be called
* from any other implementation.
*
* @param time The time to seek to
* @return true on success, false otherwise
*/
virtual bool seek(const Audio::Timestamp &time);
/**
* Seek to a given frame.
*
* This only works when one video track is present, and that track
* supports getFrameTime(). This calls seek() internally.
*/
bool seekToFrame(uint frame);
/**
* Pause or resume the video. This should stop/resume any audio playback
@ -185,56 +203,518 @@ public:
*/
bool isPaused() const { return _pauseLevel != 0; }
/**
* Set the time for this video to end at. At this time in the video,
* all audio will stop and endOfVideo() will return true.
*
* While the setting is stored even if a video is not playing,
* endOfVideo() is only affected when the video is playing.
*/
void setEndTime(const Audio::Timestamp &endTime);
/**
* Get the stop time of the video (if not set, zero)
*/
Audio::Timestamp getEndTime() const { return _endTime; }
/////////////////////////////////////////
// Playback Status
/////////////////////////////////////////
/**
* Returns if the video has reached the end or not.
* @return true if the video has finished playing or if none is loaded, false otherwise
*/
bool endOfVideo() const;
/**
* Returns the current frame number of the video.
* @return the last frame decoded by the video
*/
int getCurFrame() const;
/**
* Returns the number of frames in the video.
* @return the number of frames in the video
*/
uint32 getFrameCount() const;
/**
* Returns the time position (in ms) of the current video.
* This can be based on the "wall clock" time as determined by
* OSystem::getMillis() or the current time of any audio track
* running in the video, and takes pausing the video into account.
*
* As such, it will differ from what multiplying getCurFrame() by
* some constant would yield, e.g. for a video with non-constant
* frame rate.
*
* Due to the nature of the timing, this value may not always be
* completely accurate (since our mixer does not have precise
* timing).
*/
uint32 getTime() const;
/////////////////////////////////////////
// Video Info
/////////////////////////////////////////
/**
* Returns the width of the video's frames.
*
* By default, this finds the largest width between all of the loaded
* tracks. However, a subclass may override this if it does any kind
* of post-processing on it.
*
* @return the width of the video's frames
*/
virtual uint16 getWidth() const;
/**
* Returns the height of the video's frames.
*
* By default, this finds the largest height between all of the loaded
* tracks. However, a subclass may override this if it does any kind
* of post-processing on it.
*
* @return the height of the video's frames
*/
virtual uint16 getHeight() const;
/**
* Get the pixel format of the currently loaded video.
*/
Graphics::PixelFormat getPixelFormat() const;
/**
* Get the duration of the video.
*
* If the duration is unknown, this will return 0. If this is not
* overriden, it will take the length of the longest track.
*/
virtual Audio::Timestamp getDuration() const;
/////////////////////////////////////////
// Frame Decoding
/////////////////////////////////////////
/**
* Get the palette for the video in RGB format (if 8bpp or less).
*
* The palette's format is the same as PaletteManager's palette
* (interleaved RGB values).
*/
const byte *getPalette();
/**
* Returns if the palette is dirty or not.
*/
bool hasDirtyPalette() const { return _dirtyPalette; }
/**
* Return the time (in ms) until the next frame should be displayed.
*/
uint32 getTimeToNextFrame() const;
/**
* Check whether a new frame should be decoded, i.e. because enough
* time has elapsed since the last frame was decoded.
* @return whether a new frame should be decoded or not
*/
bool needsUpdate() const;
/**
* Decode the next frame into a surface and return the latter.
*
* A subclass may override this, but must still call this function. As an
* example, a subclass may do this to apply some global video scale to
* individual track's frame.
*
* Note that this will call readNextPacket() internally first before calling
* the next video track's decodeNextFrame() function.
*
* @return a surface containing the decoded frame, or 0
* @note Ownership of the returned surface stays with the VideoDecoder,
* hence the caller must *not* free it.
* @note this may return 0, in which case the last frame should be kept on screen
*/
virtual const Graphics::Surface *decodeNextFrame();
/**
* Set the default high color format for videos that convert from YUV.
*
* By default, VideoDecoder will attempt to use the screen format
* if it's >8bpp and use a 32bpp format when not.
*
* This must be set before calling loadStream().
*/
void setDefaultHighColorFormat(const Graphics::PixelFormat &format) { _defaultHighColorFormat = format; }
/////////////////////////////////////////
// Audio Control
/////////////////////////////////////////
/**
* Get the current volume at which the audio in the video is being played
* @return the current volume at which the audio in the video is being played
*/
virtual byte getVolume() const { return _audioVolume; }
byte getVolume() const { return _audioVolume; }
/**
* Set the volume at which the audio in the video should be played.
* This setting remains until reset() is called (which may be called
* from loadStream() or close()). The default volume is the maximum.
*
* @note This function calls updateVolume() by default.
* This setting remains until close() is called (which may be called
* from loadStream()). The default volume is the maximum.
*
* @param volume The volume at which to play the audio in the video
*/
virtual void setVolume(byte volume);
void setVolume(byte volume);
/**
* Get the current balance at which the audio in the video is being played
* @return the current balance at which the audio in the video is being played
*/
virtual int8 getBalance() const { return _audioBalance; }
int8 getBalance() const { return _audioBalance; }
/**
* Set the balance at which the audio in the video should be played.
* This setting remains until reset() is called (which may be called
* from loadStream() or close()). The default balance is 0.
*
* @note This function calls updateBalance() by default.
* This setting remains until close() is called (which may be called
* from loadStream()). The default balance is 0.
*
* @param balance The balance at which to play the audio in the video
*/
virtual void setBalance(int8 balance);
void setBalance(int8 balance);
/**
* Add an audio track from a stream file.
*
* This calls SeekableAudioStream::openStreamFile() internally
*/
bool addStreamFileTrack(const Common::String &baseName);
protected:
/**
* Resets _curFrame and _startTime. Should be called from every close() function.
* An abstract representation of a track in a movie. Since tracks here are designed
* to work independently, they should not reference any other track(s) in the video.
*/
void reset();
class Track {
public:
Track();
virtual ~Track() {}
/**
* The types of tracks this class can be.
*/
enum TrackType {
kTrackTypeNone,
kTrackTypeVideo,
kTrackTypeAudio
};
/**
* Get the type of track.
*/
virtual TrackType getTrackType() const = 0;
/**
* Return if the track has finished.
*/
virtual bool endOfTrack() const = 0;
/**
* Return if the track is rewindable.
*
* If a video is seekable, it does not need to implement this
* for it to also be rewindable.
*/
virtual bool isRewindable() const;
/**
* Rewind the video to the beginning.
*
* If a video is seekable, it does not need to implement this
* for it to also be rewindable.
*
* @return true on success, false otherwise.
*/
virtual bool rewind();
/**
* Return if the track is seekable.
*/
virtual bool isSeekable() const { return false; }
/**
* Seek to the given time.
* @param time The time to seek to, from the beginning of the video.
* @return true on success, false otherwise.
*/
virtual bool seek(const Audio::Timestamp &time) { return false; }
/**
* Set the pause status of the track.
*/
void pause(bool shouldPause);
/**
* Return if the track is paused.
*/
bool isPaused() const { return _paused; }
/**
* Get the duration of the track (starting from this track's start time).
*
* By default, this returns 0 for unknown.
*/
virtual Audio::Timestamp getDuration() const;
protected:
/**
* Function called by pause() for subclasses to implement.
*/
virtual void pauseIntern(bool shouldPause) {}
private:
bool _paused;
};
/**
* Actual implementation of pause by subclasses. See pause()
* for details.
* An abstract representation of a video track.
*/
virtual void pauseVideoIntern(bool pause) {}
class VideoTrack : public Track {
public:
VideoTrack() {}
virtual ~VideoTrack() {}
TrackType getTrackType() const { return kTrackTypeVideo; }
virtual bool endOfTrack() const;
/**
* Get the width of this track
*/
virtual uint16 getWidth() const = 0;
/**
* Get the height of this track
*/
virtual uint16 getHeight() const = 0;
/**
* Get the pixel format of this track
*/
virtual Graphics::PixelFormat getPixelFormat() const = 0;
/**
* Get the current frame of this track
*
* @see VideoDecoder::getCurFrame()
*/
virtual int getCurFrame() const = 0;
/**
* Get the frame count of this track
*
* @note If the frame count is unknown, return 0 (which is also
* the default implementation of the function). However, one must
* also implement endOfTrack() in that case.
*/
virtual int getFrameCount() const { return 0; }
/**
* Get the start time of the next frame in milliseconds since
* the start of the video
*/
virtual uint32 getNextFrameStartTime() const = 0;
/**
* Decode the next frame
*/
virtual const Graphics::Surface *decodeNextFrame() = 0;
/**
* Get the palette currently in use by this track
*/
virtual const byte *getPalette() const { return 0; }
/**
* Does the palette currently in use by this track need to be updated?
*/
virtual bool hasDirtyPalette() const { return false; }
/**
* Get the time the given frame should be shown.
*
* By default, this returns a negative (invalid) value. This function
* should only be used by VideoDecoder::seekToFrame().
*/
virtual Audio::Timestamp getFrameTime(uint frame) const;
};
/**
* Add the time the video has been paused to maintain sync
* A VideoTrack that is played at a constant rate.
*
* If the frame count is unknown, you must override endOfTrack().
*/
virtual void addPauseTime(uint32 ms) { _startTime += ms; }
class FixedRateVideoTrack : public VideoTrack {
public:
FixedRateVideoTrack() {}
virtual ~FixedRateVideoTrack() {}
uint32 getNextFrameStartTime() const;
virtual Audio::Timestamp getDuration() const;
Audio::Timestamp getFrameTime(uint frame) const;
protected:
/**
* Get the rate at which this track is played.
*/
virtual Common::Rational getFrameRate() const = 0;
/**
* Get the frame that should be displaying at the given time. This is
* helpful for someone implementing seek().
*/
uint getFrameAtTime(const Audio::Timestamp &time) const;
};
/**
* An abstract representation of an audio track.
*/
class AudioTrack : public Track {
public:
AudioTrack() {}
virtual ~AudioTrack() {}
TrackType getTrackType() const { return kTrackTypeAudio; }
virtual bool endOfTrack() const;
/**
* Start playing this track
*/
void start();
/**
* Stop playing this track
*/
void stop();
void start(const Audio::Timestamp &limit);
/**
* Get the volume for this track
*/
byte getVolume() const { return _volume; }
/**
* Set the volume for this track
*/
void setVolume(byte volume);
/**
* Get the balance for this track
*/
int8 getBalance() const { return _balance; }
/**
* Set the balance for this track
*/
void setBalance(int8 balance);
/**
* Get the time the AudioStream behind this track has been
* running
*/
uint32 getRunningTime() const;
/**
* Get the sound type to be used when playing this audio track
*/
virtual Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kPlainSoundType; }
protected:
void pauseIntern(bool shouldPause);
/**
* Get the AudioStream that is the representation of this AudioTrack
*/
virtual Audio::AudioStream *getAudioStream() const = 0;
private:
Audio::SoundHandle _handle;
byte _volume;
int8 _balance;
};
/**
* An AudioTrack that implements isRewindable() and rewind() using
* RewindableAudioStream.
*/
class RewindableAudioTrack : public AudioTrack {
public:
RewindableAudioTrack() {}
virtual ~RewindableAudioTrack() {}
bool isRewindable() const { return true; }
bool rewind();
protected:
Audio::AudioStream *getAudioStream() const;
/**
* Get the RewindableAudioStream pointer to be used by this class
* for rewind() and getAudioStream()
*/
virtual Audio::RewindableAudioStream *getRewindableAudioStream() const = 0;
};
/**
* An AudioTrack that implements isSeekable() and seek() using
* SeekableAudioStream.
*/
class SeekableAudioTrack : public AudioTrack {
public:
SeekableAudioTrack() {}
virtual ~SeekableAudioTrack() {}
bool isSeekable() const { return true; }
bool seek(const Audio::Timestamp &time);
Audio::Timestamp getDuration() const;
protected:
Audio::AudioStream *getAudioStream() const;
/**
* Get the SeekableAudioStream pointer to be used by this class
* for seek(), getDuration(), and getAudioStream()
*/
virtual Audio::SeekableAudioStream *getSeekableAudioStream() const = 0;
};
/**
* A SeekableAudioTrack that constructs its SeekableAudioStream using
* SeekableAudioStream::openStreamFile()
*/
class StreamFileAudioTrack : public SeekableAudioTrack {
public:
StreamFileAudioTrack();
~StreamFileAudioTrack();
/**
* Load the track from a file with the given base name.
*
* @return true on success, false otherwise
*/
bool loadFromFile(const Common::String &baseName);
protected:
Audio::SeekableAudioStream *_stream;
Audio::SeekableAudioStream *getSeekableAudioStream() const { return _stream; }
};
/**
* Reset the pause start time (which should be called when seeking)
@ -242,81 +722,110 @@ protected:
void resetPauseStartTime();
/**
* Update currently playing audio tracks with the new volume setting
* Decode enough data for the next frame and enough audio to last that long.
*
* This function is used by this class' decodeNextFrame() function. A subclass
* of a Track may decide to just have its decodeNextFrame() function read
* and decode the frame, but only if it is the only track in the video.
*/
virtual void updateVolume() {}
virtual void readNextPacket() {}
/**
* Update currently playing audio tracks with the new balance setting
* Define a track to be used by this class.
*
* The pointer is then owned by this base class.
*/
virtual void updateBalance() {}
void addTrack(Track *track);
int32 _curFrame;
int32 _startTime;
/**
* Whether or not getTime() will sync with a playing audio track.
*
* A subclass can override this to disable this feature.
*/
virtual bool useAudioSync() const { return true; }
/**
* Get the given track based on its index.
*
* @return A valid track pointer on success, 0 otherwise
*/
Track *getTrack(uint track);
/**
* Get the given track based on its index
*
* @return A valid track pointer on success, 0 otherwise
*/
const Track *getTrack(uint track) const;
/**
* Find out if all video tracks have finished
*
* This is useful if one wants to figure out if they need to buffer all
* remaining audio in a file.
*/
bool endOfVideoTracks() const;
/**
* Get the default high color format
*/
Graphics::PixelFormat getDefaultHighColorFormat() const { return _defaultHighColorFormat; }
/**
* Set _nextVideoTrack to the video track with the lowest start time for the next frame.
*
* @return _nextVideoTrack
*/
VideoTrack *findNextVideoTrack();
/**
* Typedef helpers for accessing tracks
*/
typedef Common::Array<Track *> TrackList;
typedef TrackList::iterator TrackListIterator;
/**
* Get the begin iterator of the tracks
*/
TrackListIterator getTrackListBegin() { return _tracks.begin(); }
/**
* Get the end iterator of the tracks
*/
TrackListIterator getTrackListEnd() { return _tracks.end(); }
private:
// Tracks owned by this VideoDecoder
TrackList _tracks;
// Current playback status
bool _needsUpdate;
Audio::Timestamp _lastTimeChange, _endTime;
bool _endTimeSet;
Common::Rational _playbackRate;
VideoTrack *_nextVideoTrack;
// Palette settings from individual tracks
mutable bool _dirtyPalette;
const byte *_palette;
// Default PixelFormat settings
Graphics::PixelFormat _defaultHighColorFormat;
// Internal helper functions
void stopAudio();
void startAudio();
void startAudioLimit(const Audio::Timestamp &limit);
bool hasFramesLeft() const;
bool hasAudio() const;
int32 _startTime;
uint32 _pauseLevel;
uint32 _pauseStartTime;
byte _audioVolume;
int8 _audioBalance;
};
/**
* A VideoDecoder wrapper that implements getTimeToNextFrame() based on getFrameRate().
*/
class FixedRateVideoDecoder : public virtual VideoDecoder {
public:
uint32 getTimeToNextFrame() const;
protected:
/**
* Return the frame rate in frames per second.
* This returns a Rational because videos can have rates that are not integers and
* there are some videos with frame rates < 1.
*/
virtual Common::Rational getFrameRate() const = 0;
private:
uint32 getFrameBeginTime(uint32 frame) const;
};
/**
* A VideoDecoder that can be rewound back to the beginning.
*/
class RewindableVideoDecoder : public virtual VideoDecoder {
public:
/**
* Rewind to the beginning of the video.
*/
virtual void rewind() = 0;
};
/**
* A VideoDecoder that can seek to a frame or point in time.
*/
class SeekableVideoDecoder : public virtual RewindableVideoDecoder {
public:
/**
* Seek to the specified time.
*/
virtual void seekToTime(const Audio::Timestamp &time) = 0;
/**
* Seek to the specified time (in ms).
*/
void seekToTime(uint32 msecs) { seekToTime(Audio::Timestamp(msecs, 1000)); }
/**
* Implementation of RewindableVideoDecoder::rewind().
*/
virtual void rewind() { seekToTime(0); }
/**
* Get the total duration of the video (in ms).
*/
virtual uint32 getDuration() const = 0;
};
} // End of namespace Video
#endif