VIDEO: Add in the files for the new VideoInterface from ScummVM. (And disable Bink/Smush for now)

This commit is contained in:
Einar Johan Trøan Sømåen 2012-11-26 23:02:56 +01:00
parent 62367556c7
commit 57e8393ac5
9 changed files with 2131 additions and 1027 deletions

2
configure vendored
View File

@ -135,7 +135,7 @@ _build_scalers=yes
_build_hq_scalers=yes
_enable_prof=no
_global_constructors=no
_bink=yes
_bink=no
# Default vkeybd/keymapper options
_vkeybd=no
_keymapper=no

View File

@ -70,10 +70,8 @@ MODULE_OBJS := \
movie/codecs/blocky8.o \
movie/codecs/blocky16.o \
movie/codecs/vima.o \
movie/codecs/smush_decoder.o \
movie/bink.o \
movie/mpeg.o \
movie/smush.o \
movie/movie.o \
update/packfile.o \
update/mscab.o \

View File

@ -215,6 +215,11 @@ void MoviePlayer::restoreState(SaveGame *state) {
#define NEED_NULLPLAYER
#endif
// Temporary fix while reworking codecs:
#ifndef NEED_NULLPLAYER
#define NEED_NULLPLAYER
#endif
// Fallback for when USE_MPEG2 / USE_BINK isnt defined
#ifdef NEED_NULLPLAYER
@ -240,6 +245,10 @@ private:
};
#endif
MoviePlayer *CreateSmushPlayer(bool demo) {
return new NullPlayer("SMUSH");
}
#ifndef USE_MPEG2
MoviePlayer *CreateMpegPlayer() {
return new NullPlayer("MPEG2");

View File

@ -29,9 +29,9 @@
namespace Grim {
MoviePlayer *CreateSmushPlayer(bool demo) {
/*MoviePlayer *CreateSmushPlayer(bool demo) {
return new SmushPlayer(demo);
}
}*/
SmushPlayer::SmushPlayer(bool demo) : MoviePlayer(), _demo(demo) {
_smushDecoder = new SmushDecoder();

File diff suppressed because it is too large Load Diff

View File

@ -31,22 +31,27 @@
#ifndef VIDEO_BINK_DECODER_H
#define VIDEO_BINK_DECODER_H
#include "audio/audiostream.h"
#include "audio/mixer.h"
#include "common/array.h"
#include "common/rational.h"
#include "graphics/surface.h"
#include "video/video_decoder.h"
namespace Common {
class SeekableReadStream;
class BitStream;
class Huffman;
namespace Audio {
class AudioStream;
class QueuingAudioStream;
}
class RDFT;
class DCT;
namespace Common {
class SeekableReadStream;
class BitStream;
class Huffman;
class RDFT;
class DCT;
}
namespace Graphics {
struct Surface;
}
namespace Video {
@ -57,92 +62,28 @@ namespace Video {
* Video decoder used in engines:
* - scumm (he)
*/
class BinkDecoder : public FixedRateVideoDecoder {
class BinkDecoder : public VideoDecoder {
public:
BinkDecoder();
~BinkDecoder();
// VideoDecoder API
bool loadStream(Common::SeekableReadStream *stream);
void close();
bool isVideoLoaded() const { return _bink != 0; }
uint16 getWidth() const { return _surface.w; }
uint16 getHeight() const { return _surface.h; }
Graphics::PixelFormat getPixelFormat() const { return _surface.format; }
uint32 getFrameCount() const { return _frames.size(); }
uint32 getTime() const;
const Graphics::Surface *decodeNextFrame();
// FixedRateVideoDecoder
Common::Rational getFrameRate() const { return _frameRate; }
// Bink specific
bool loadStream(Common::SeekableReadStream *stream, const Graphics::PixelFormat &format);
protected:
// VideoDecoder API
void updateVolume();
void updateBalance();
void readNextPacket();
private:
static const int kAudioChannelsMax = 2;
static const int kAudioBlockSizeMax = (kAudioChannelsMax << 11);
/** IDs for different data types used in Bink video codec. */
enum Source {
kSourceBlockTypes = 0, ///< 8x8 block types.
kSourceSubBlockTypes , ///< 16x16 block types (a subset of 8x8 block types).
kSourceColors , ///< Pixel values used for different block types.
kSourcePattern , ///< 8-bit values for 2-color pattern fill.
kSourceXOff , ///< X components of motion value.
kSourceYOff , ///< Y components of motion value.
kSourceIntraDC , ///< DC values for intrablocks with DCT.
kSourceInterDC , ///< DC values for interblocks with DCT.
kSourceRun , ///< Run lengths for special fill block.
kSourceMAX
};
/** Bink video block types. */
enum BlockType {
kBlockSkip = 0, ///< Skipped block.
kBlockScaled , ///< Block has size 16x16.
kBlockMotion , ///< Block is copied from previous frame with some offset.
kBlockRun , ///< Block is composed from runs of colors with custom scan order.
kBlockResidue , ///< Motion block with some difference added.
kBlockIntra , ///< Intra DCT block.
kBlockFill , ///< Block is filled with single color.
kBlockInter , ///< Motion block with DCT applied to the difference.
kBlockPattern , ///< Block is filled with two colors following custom pattern.
kBlockRaw ///< Uncoded 8x8 block.
};
/** Data structure for decoding and tranlating Huffman'd data. */
struct Huffman {
int index; ///< Index of the Huffman codebook to use.
byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list.
};
/** Data structure used for decoding a single Bink data type. */
struct Bundle {
int countLengths[2]; ///< Lengths of number of entries to decode (in bits).
int countLength; ///< Length of number of entries to decode (in bits) for the current plane.
Huffman huffman; ///< Huffman codebook.
byte *data; ///< Buffer for decoded symbols.
byte *dataEnd; ///< Buffer end.
byte *curDec; ///< Pointer to the data that wasn't yet decoded.
byte *curPtr; ///< Pointer to the data that wasn't yet read.
};
enum AudioCodec {
kAudioCodecDCT,
kAudioCodecRDFT
};
/** An audio track. */
struct AudioTrack {
struct AudioInfo {
uint16 flags;
uint32 sampleRate;
@ -177,8 +118,8 @@ protected:
Common::RDFT *rdft;
Common::DCT *dct;
AudioTrack();
~AudioTrack();
AudioInfo();
~AudioInfo();
};
/** A video frame. */
@ -194,149 +135,220 @@ protected:
~VideoFrame();
};
/** A decoder state. */
struct DecodeContext {
VideoFrame *video;
class BinkVideoTrack : public FixedRateVideoTrack {
public:
BinkVideoTrack(uint32 width, uint32 height, const Graphics::PixelFormat &format, uint32 frameCount, const Common::Rational &frameRate, bool swapPlanes, bool hasAlpha, uint32 id);
~BinkVideoTrack();
uint32 planeIdx;
uint16 getWidth() const { return _surface.w; }
uint16 getHeight() const { return _surface.h; }
Graphics::PixelFormat getPixelFormat() const { return _surface.format; }
int getCurFrame() const { return _curFrame; }
int getFrameCount() const { return _frameCount; }
const Graphics::Surface *decodeNextFrame() { return &_surface; }
uint32 blockX;
uint32 blockY;
/** Decode a video packet. */
void decodePacket(VideoFrame &frame);
byte *dest;
byte *prev;
protected:
Common::Rational getFrameRate() const { return _frameRate; }
byte *destStart, *destEnd;
byte *prevStart, *prevEnd;
private:
/** A decoder state. */
struct DecodeContext {
VideoFrame *video;
uint32 pitch;
uint32 planeIdx;
int coordMap[64];
int coordScaledMap1[64];
int coordScaledMap2[64];
int coordScaledMap3[64];
int coordScaledMap4[64];
uint32 blockX;
uint32 blockY;
byte *dest;
byte *prev;
byte *destStart, *destEnd;
byte *prevStart, *prevEnd;
uint32 pitch;
int coordMap[64];
int coordScaledMap1[64];
int coordScaledMap2[64];
int coordScaledMap3[64];
int coordScaledMap4[64];
};
/** IDs for different data types used in Bink video codec. */
enum Source {
kSourceBlockTypes = 0, ///< 8x8 block types.
kSourceSubBlockTypes , ///< 16x16 block types (a subset of 8x8 block types).
kSourceColors , ///< Pixel values used for different block types.
kSourcePattern , ///< 8-bit values for 2-color pattern fill.
kSourceXOff , ///< X components of motion value.
kSourceYOff , ///< Y components of motion value.
kSourceIntraDC , ///< DC values for intrablocks with DCT.
kSourceInterDC , ///< DC values for interblocks with DCT.
kSourceRun , ///< Run lengths for special fill block.
kSourceMAX
};
/** Bink video block types. */
enum BlockType {
kBlockSkip = 0, ///< Skipped block.
kBlockScaled , ///< Block has size 16x16.
kBlockMotion , ///< Block is copied from previous frame with some offset.
kBlockRun , ///< Block is composed from runs of colors with custom scan order.
kBlockResidue , ///< Motion block with some difference added.
kBlockIntra , ///< Intra DCT block.
kBlockFill , ///< Block is filled with single color.
kBlockInter , ///< Motion block with DCT applied to the difference.
kBlockPattern , ///< Block is filled with two colors following custom pattern.
kBlockRaw ///< Uncoded 8x8 block.
};
/** Data structure for decoding and tranlating Huffman'd data. */
struct Huffman {
int index; ///< Index of the Huffman codebook to use.
byte symbols[16]; ///< Huffman symbol => Bink symbol tranlation list.
};
/** Data structure used for decoding a single Bink data type. */
struct Bundle {
int countLengths[2]; ///< Lengths of number of entries to decode (in bits).
int countLength; ///< Length of number of entries to decode (in bits) for the current plane.
Huffman huffman; ///< Huffman codebook.
byte *data; ///< Buffer for decoded symbols.
byte *dataEnd; ///< Buffer end.
byte *curDec; ///< Pointer to the data that wasn't yet decoded.
byte *curPtr; ///< Pointer to the data that wasn't yet read.
};
int _curFrame;
int _frameCount;
Graphics::Surface _surface;
int _surfaceWidth; ///< The actual surface width
int _surfaceHeight; ///< The actual surface height
uint32 _id; ///< The BIK FourCC.
bool _hasAlpha; ///< Do video frames have alpha?
bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV?
Common::Rational _frameRate;
Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types.
Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding.
/** Huffman codebooks to use for decoding high nibbles in color data types. */
Huffman _colHighHuffman[16];
/** Value of the last decoded high nibble in color data types. */
int _colLastVal;
byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame.
byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame.
/** Initialize the bundles. */
void initBundles();
/** Deinitialize the bundles. */
void deinitBundles();
/** Initialize the Huffman decoders. */
void initHuffman();
/** Decode a plane. */
void decodePlane(VideoFrame &video, int planeIdx, bool isChroma);
/** Read/Initialize a bundle for decoding a plane. */
void readBundle(VideoFrame &video, Source source);
/** Read the symbols for a Huffman code. */
void readHuffman(VideoFrame &video, Huffman &huffman);
/** Merge two Huffman symbol lists. */
void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size);
/** Read and translate a symbol out of a Huffman code. */
byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman);
/** Get a direct value out of a bundle. */
int32 getBundleValue(Source source);
/** Read a count value out of a bundle. */
uint32 readBundleCount(VideoFrame &video, Bundle &bundle);
// Handle the block types
void blockSkip (DecodeContext &ctx);
void blockScaledSkip (DecodeContext &ctx);
void blockScaledRun (DecodeContext &ctx);
void blockScaledIntra (DecodeContext &ctx);
void blockScaledFill (DecodeContext &ctx);
void blockScaledPattern(DecodeContext &ctx);
void blockScaledRaw (DecodeContext &ctx);
void blockScaled (DecodeContext &ctx);
void blockMotion (DecodeContext &ctx);
void blockRun (DecodeContext &ctx);
void blockResidue (DecodeContext &ctx);
void blockIntra (DecodeContext &ctx);
void blockFill (DecodeContext &ctx);
void blockInter (DecodeContext &ctx);
void blockPattern (DecodeContext &ctx);
void blockRaw (DecodeContext &ctx);
// Read the bundles
void readRuns (VideoFrame &video, Bundle &bundle);
void readMotionValues(VideoFrame &video, Bundle &bundle);
void readBlockTypes (VideoFrame &video, Bundle &bundle);
void readPatterns (VideoFrame &video, Bundle &bundle);
void readColors (VideoFrame &video, Bundle &bundle);
void readDCS (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign);
void readDCTCoeffs (VideoFrame &video, int16 *block, bool isIntra);
void readResidue (VideoFrame &video, int16 *block, int masksCount);
// Bink video IDCT
void IDCT(int16 *block);
void IDCTPut(DecodeContext &ctx, int16 *block);
void IDCTAdd(DecodeContext &ctx, int16 *block);
};
class BinkAudioTrack : public AudioTrack {
public:
BinkAudioTrack(AudioInfo &audio);
~BinkAudioTrack();
/** Decode an audio packet. */
void decodePacket();
protected:
Audio::AudioStream *getAudioStream() const;
private:
AudioInfo *_audioInfo;
Audio::QueuingAudioStream *_audioStream;
float getFloat();
/** Decode an audio block. */
void audioBlock(int16 *out);
/** Decode a DCT'd audio block. */
void audioBlockDCT();
/** Decode a RDFT'd audio block. */
void audioBlockRDFT();
void readAudioCoeffs(float *coeffs);
static void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels);
};
Common::SeekableReadStream *_bink;
uint32 _id; ///< The BIK FourCC.
Common::Rational _frameRate;
Graphics::Surface _surface;
Audio::SoundHandle _audioHandle;
Audio::QueuingAudioStream *_audioStream;
int32 _audioStartOffset;
uint32 _videoFlags; ///< Video frame features.
bool _hasAlpha; ///< Do video frames have alpha?
bool _swapPlanes; ///< Are the planes ordered (A)YVU instead of (A)YUV?
Common::Array<AudioTrack> _audioTracks; ///< All audio tracks.
Common::Array<AudioInfo> _audioTracks; ///< All audio tracks.
Common::Array<VideoFrame> _frames; ///< All video frames.
uint32 _audioTrack; ///< Audio track to use.
Common::Huffman *_huffman[16]; ///< The 16 Huffman codebooks used in Bink decoding.
Bundle _bundles[kSourceMAX]; ///< Bundles for decoding all data types.
/** Huffman codebooks to use for decoding high nibbles in color data types. */
Huffman _colHighHuffman[16];
/** Value of the last decoded high nibble in color data types. */
int _colLastVal;
byte *_curPlanes[4]; ///< The 4 color planes, YUVA, current frame.
byte *_oldPlanes[4]; ///< The 4 color planes, YUVA, last frame.
/** Initialize the bundles. */
void initBundles();
/** Deinitialize the bundles. */
void deinitBundles();
/** Initialize the Huffman decoders. */
void initHuffman();
/** Decode an audio packet. */
void audioPacket(AudioTrack &audio);
/** Decode a video packet. */
virtual void videoPacket(VideoFrame &video);
/** Decode a plane. */
void decodePlane(VideoFrame &video, int planeIdx, bool isChroma);
/** Read/Initialize a bundle for decoding a plane. */
void readBundle(VideoFrame &video, Source source);
/** Read the symbols for a Huffman code. */
void readHuffman(VideoFrame &video, Huffman &huffman);
/** Merge two Huffman symbol lists. */
void mergeHuffmanSymbols(VideoFrame &video, byte *dst, const byte *src, int size);
/** Read and translate a symbol out of a Huffman code. */
byte getHuffmanSymbol(VideoFrame &video, Huffman &huffman);
/** Get a direct value out of a bundle. */
int32 getBundleValue(Source source);
/** Read a count value out of a bundle. */
uint32 readBundleCount(VideoFrame &video, Bundle &bundle);
// Handle the block types
void blockSkip (DecodeContext &ctx);
void blockScaledSkip (DecodeContext &ctx);
void blockScaledRun (DecodeContext &ctx);
void blockScaledIntra (DecodeContext &ctx);
void blockScaledFill (DecodeContext &ctx);
void blockScaledPattern(DecodeContext &ctx);
void blockScaledRaw (DecodeContext &ctx);
void blockScaled (DecodeContext &ctx);
void blockMotion (DecodeContext &ctx);
void blockRun (DecodeContext &ctx);
void blockResidue (DecodeContext &ctx);
void blockIntra (DecodeContext &ctx);
void blockFill (DecodeContext &ctx);
void blockInter (DecodeContext &ctx);
void blockPattern (DecodeContext &ctx);
void blockRaw (DecodeContext &ctx);
// Read the bundles
void readRuns (VideoFrame &video, Bundle &bundle);
void readMotionValues(VideoFrame &video, Bundle &bundle);
void readBlockTypes (VideoFrame &video, Bundle &bundle);
void readPatterns (VideoFrame &video, Bundle &bundle);
void readColors (VideoFrame &video, Bundle &bundle);
void readDCS (VideoFrame &video, Bundle &bundle, int startBits, bool hasSign);
void readDCTCoeffs (VideoFrame &video, int16 *block, bool isIntra);
void readResidue (VideoFrame &video, int16 *block, int masksCount);
void initAudioTrack(AudioTrack &audio);
float getFloat(AudioTrack &audio);
/** Decode an audio block. */
void audioBlock (AudioTrack &audio, int16 *out);
/** Decode a DCT'd audio block. */
void audioBlockDCT (AudioTrack &audio);
/** Decode a RDFT'd audio block. */
void audioBlockRDFT(AudioTrack &audio);
void readAudioCoeffs(AudioTrack &audio, float *coeffs);
void floatToInt16Interleave(int16 *dst, const float **src, uint32 length, uint8 channels);
// Bink video IDCT
void IDCT(int16 *block);
void IDCTPut(DecodeContext &ctx, int16 *block);
void IDCTAdd(DecodeContext &ctx, int16 *block);
/** Start playing the audio track */
void startAudio();
/** Stop playing the audio track */
void stopAudio();
void initAudioTrack(AudioInfo &audio);
};
} // End of namespace Video

View File

@ -5,8 +5,7 @@ MODULE_OBJS := \
video_decoder.o
ifdef USE_BINK
MODULE_OBJS += \
bink_decoder.o \
bink_decoder_seek.o
bink_decoder.o
endif
# Include common rules

View File

@ -22,6 +22,7 @@
#include "video/video_decoder.h"
#include "audio/audiostream.h"
#include "audio/mixer.h" // for kMaxChannelVolume
#include "common/rational.h"
@ -33,7 +34,43 @@
namespace Video {
VideoDecoder::VideoDecoder() {
reset();
_startTime = 0;
_dirtyPalette = false;
_palette = 0;
_playbackRate = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
_pauseLevel = 0;
_needsUpdate = false;
_lastTimeChange = 0;
_endTime = 0;
_endTimeSet = false;
// Find the best format for output
_defaultHighColorFormat = g_system->getScreenFormat();
if (_defaultHighColorFormat.bytesPerPixel == 1)
_defaultHighColorFormat = Graphics::PixelFormat(4, 8, 8, 8, 8, 8, 16, 24, 0);
}
void VideoDecoder::close() {
if (isPlaying())
stop();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
delete *it;
_tracks.clear();
_dirtyPalette = false;
_palette = 0;
_startTime = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
_pauseLevel = 0;
_needsUpdate = false;
_lastTimeChange = 0;
_endTime = 0;
_endTimeSet = false;
}
bool VideoDecoder::loadFile(const Common::String &filename) {
@ -47,28 +84,8 @@ bool VideoDecoder::loadFile(const Common::String &filename) {
return loadStream(file);
}
uint32 VideoDecoder::getTime() const {
return g_system->getMillis() - _startTime;
}
void VideoDecoder::setSystemPalette() {
g_system->getPaletteManager()->setPalette(getPalette(), 0, 256);
}
bool VideoDecoder::needsUpdate() const {
return !endOfVideo() && getTimeToNextFrame() == 0;
}
void VideoDecoder::reset() {
_curFrame = -1;
_startTime = 0;
_pauseLevel = 0;
_audioVolume = Audio::Mixer::kMaxChannelVolume;
_audioBalance = 0;
}
bool VideoDecoder::endOfVideo() const {
return !isVideoLoaded() || (getCurFrame() >= (int32)getFrameCount() - 1);
return hasFramesLeft() && getTimeToNextFrame() == 0;
}
void VideoDecoder::pauseVideo(bool pause) {
@ -86,10 +103,14 @@ void VideoDecoder::pauseVideo(bool pause) {
if (_pauseLevel == 1 && pause) {
_pauseStartTime = g_system->getMillis(); // Store the starting time from pausing to keep it for later
pauseVideoIntern(true);
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(true);
} else if (_pauseLevel == 0) {
pauseVideoIntern(false);
addPauseTime(g_system->getMillis() - _pauseStartTime);
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(false);
_startTime += (g_system->getMillis() - _pauseStartTime);
}
}
@ -100,33 +121,651 @@ void VideoDecoder::resetPauseStartTime() {
void VideoDecoder::setVolume(byte volume) {
_audioVolume = volume;
updateVolume();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->setVolume(_audioVolume);
}
void VideoDecoder::setBalance(int8 balance) {
_audioBalance = balance;
updateBalance();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->setBalance(_audioBalance);
}
uint32 FixedRateVideoDecoder::getTimeToNextFrame() const {
if (endOfVideo() || _curFrame < 0)
bool VideoDecoder::isVideoLoaded() const {
return !_tracks.empty();
}
uint16 VideoDecoder::getWidth() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getWidth();
return 0;
}
uint16 VideoDecoder::getHeight() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getHeight();
return 0;
}
Graphics::PixelFormat VideoDecoder::getPixelFormat() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
return ((VideoTrack *)*it)->getPixelFormat();
return Graphics::PixelFormat();
}
const Graphics::Surface *VideoDecoder::decodeNextFrame() {
_needsUpdate = false;
readNextPacket();
VideoTrack *track = findNextVideoTrack();
if (!track)
return 0;
const Graphics::Surface *frame = track->decodeNextFrame();
if (track->hasDirtyPalette()) {
_palette = track->getPalette();
_dirtyPalette = true;
}
return frame;
}
const byte *VideoDecoder::getPalette() {
_dirtyPalette = false;
return _palette;
}
int VideoDecoder::getCurFrame() const {
int32 frame = -1;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
frame += ((VideoTrack *)*it)->getCurFrame() + 1;
return frame;
}
uint32 VideoDecoder::getFrameCount() const {
int count = 0;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo)
count += ((VideoTrack *)*it)->getFrameCount();
return count;
}
uint32 VideoDecoder::getTime() const {
if (!isPlaying())
return _lastTimeChange.msecs();
if (isPaused())
return (_playbackRate * (_pauseStartTime - _startTime)).toInt();
if (useAudioSync()) {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if ((*it)->getTrackType() == Track::kTrackTypeAudio && !(*it)->endOfTrack()) {
uint32 time = ((const AudioTrack *)*it)->getRunningTime();
if (time != 0)
return time + _lastTimeChange.msecs();
}
}
}
return (_playbackRate * (g_system->getMillis() - _startTime)).toInt();
}
uint32 VideoDecoder::getTimeToNextFrame() const {
if (endOfVideo() || _needsUpdate)
return 0;
const VideoTrack *track = findNextVideoTrack();
if (!track)
return 0;
uint32 elapsedTime = getTime();
uint32 nextFrameStartTime = getFrameBeginTime(_curFrame + 1);
uint32 nextFrameStartTime = track->getNextFrameStartTime();
// If the time that the next frame should be shown has past
// the frame should be shown ASAP.
if (nextFrameStartTime <= elapsedTime)
return 0;
return nextFrameStartTime - elapsedTime;
}
uint32 FixedRateVideoDecoder::getFrameBeginTime(uint32 frame) const {
Common::Rational beginTime = frame * 1000;
beginTime /= getFrameRate();
return beginTime.toInt();
bool VideoDecoder::endOfVideo() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->endOfTrack() && (!isPlaying() || (*it)->getTrackType() != Track::kTrackTypeVideo || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
return false;
return true;
}
bool VideoDecoder::isRewindable() const {
if (!isVideoLoaded())
return false;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->isRewindable())
return false;
return true;
}
bool VideoDecoder::rewind() {
if (!isRewindable())
return false;
// Stop all tracks so they can be rewound
if (isPlaying())
stopAudio();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->rewind())
return false;
// Now that we've rewound, start all tracks again
if (isPlaying())
startAudio();
_lastTimeChange = 0;
_startTime = g_system->getMillis();
resetPauseStartTime();
return true;
}
bool VideoDecoder::isSeekable() const {
if (!isVideoLoaded())
return false;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->isSeekable())
return false;
return true;
}
bool VideoDecoder::seek(const Audio::Timestamp &time) {
if (!isSeekable())
return false;
// Stop all tracks so they can be seeked
if (isPlaying())
stopAudio();
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if (!(*it)->seek(time))
return false;
_lastTimeChange = time;
// Now that we've seeked, start all tracks again
// Also reset our start time
if (isPlaying()) {
startAudio();
_startTime = g_system->getMillis() - time.msecs();
}
resetPauseStartTime();
_needsUpdate = true;
return true;
}
bool VideoDecoder::seekToFrame(uint frame) {
VideoTrack *track = 0;
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if (!(*it)->isSeekable())
return false;
if ((*it)->getTrackType() == Track::kTrackTypeVideo) {
// We only allow seeking by frame when one video track
// is present
if (track)
return false;
track = (VideoTrack *)*it;
}
}
// If we didn't find a video track, we can't seek by frame (of course)
if (!track)
return false;
Audio::Timestamp time = track->getFrameTime(frame);
if (time < 0)
return false;
return seek(time);
}
void VideoDecoder::start() {
if (!isPlaying())
setRate(1);
}
void VideoDecoder::stop() {
if (!isPlaying())
return;
// Stop audio here so we don't have it affect getTime()
stopAudio();
// Keep the time marked down in case we start up again
// We do this before _playbackRate is set so we don't get
// _lastTimeChange returned, but before _pauseLevel is
// reset.
_lastTimeChange = getTime();
_playbackRate = 0;
_startTime = 0;
_palette = 0;
_dirtyPalette = false;
_needsUpdate = false;
// Also reset the pause state.
_pauseLevel = 0;
// Reset the pause state of the tracks too
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
(*it)->pause(false);
}
void VideoDecoder::setRate(const Common::Rational &rate) {
if (!isVideoLoaded() || _playbackRate == rate)
return;
if (rate == 0) {
stop();
return;
} else if (rate != 1 && hasAudio()) {
warning("Cannot set custom rate in videos with audio");
return;
}
Common::Rational targetRate = rate;
if (rate < 0) {
// TODO: Implement support for this
warning("Cannot set custom rate to backwards");
targetRate = 1;
if (_playbackRate == targetRate)
return;
}
if (_playbackRate != 0)
_lastTimeChange = getTime();
_playbackRate = targetRate;
_startTime = g_system->getMillis();
// Adjust start time if we've seeked to something besides zero time
if (_lastTimeChange.totalNumberOfFrames() != 0)
_startTime -= (_lastTimeChange.msecs() / _playbackRate).toInt();
startAudio();
}
bool VideoDecoder::isPlaying() const {
return _playbackRate != 0;
}
Audio::Timestamp VideoDecoder::getDuration() const {
Audio::Timestamp maxDuration(0, 1000);
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) {
Audio::Timestamp duration = (*it)->getDuration();
if (duration > maxDuration)
maxDuration = duration;
}
return maxDuration;
}
VideoDecoder::Track::Track() {
_paused = false;
}
bool VideoDecoder::Track::isRewindable() const {
return isSeekable();
}
bool VideoDecoder::Track::rewind() {
return seek(Audio::Timestamp(0, 1000));
}
void VideoDecoder::Track::pause(bool shouldPause) {
_paused = shouldPause;
pauseIntern(shouldPause);
}
Audio::Timestamp VideoDecoder::Track::getDuration() const {
return Audio::Timestamp(0, 1000);
}
bool VideoDecoder::VideoTrack::endOfTrack() const {
return getCurFrame() >= (getFrameCount() - 1);
}
Audio::Timestamp VideoDecoder::VideoTrack::getFrameTime(uint frame) const {
// Default implementation: Return an invalid (negative) number
return Audio::Timestamp().addFrames(-1);
}
uint32 VideoDecoder::FixedRateVideoTrack::getNextFrameStartTime() const {
if (endOfTrack() || getCurFrame() < 0)
return 0;
return getFrameTime(getCurFrame() + 1).msecs();
}
Audio::Timestamp VideoDecoder::FixedRateVideoTrack::getFrameTime(uint frame) const {
// Try to get as accurate as possible, considering we have a fractional frame rate
// (which Audio::Timestamp doesn't support).
Common::Rational frameRate = getFrameRate();
if (frameRate == frameRate.toInt()) // The nice case (a whole number)
return Audio::Timestamp(0, frame, frameRate.toInt());
// Just convert to milliseconds.
Common::Rational time = frame * 1000;
time /= frameRate;
return Audio::Timestamp(time.toInt(), 1000);
}
uint VideoDecoder::FixedRateVideoTrack::getFrameAtTime(const Audio::Timestamp &time) const {
Common::Rational frameRate = getFrameRate();
// Easy conversion
if (frameRate == time.framerate())
return time.totalNumberOfFrames();
// Default case
return (time.totalNumberOfFrames() * frameRate / time.framerate()).toInt();
}
Audio::Timestamp VideoDecoder::FixedRateVideoTrack::getDuration() const {
return getFrameTime(getFrameCount());
}
bool VideoDecoder::AudioTrack::endOfTrack() const {
Audio::AudioStream *stream = getAudioStream();
return !stream || !g_system->getMixer()->isSoundHandleActive(_handle) || stream->endOfData();
}
void VideoDecoder::AudioTrack::setVolume(byte volume) {
_volume = volume;
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->setChannelVolume(_handle, _volume);
}
void VideoDecoder::AudioTrack::setBalance(int8 balance) {
_balance = balance;
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->setChannelBalance(_handle, _balance);
}
void VideoDecoder::AudioTrack::start() {
stop();
Audio::AudioStream *stream = getAudioStream();
assert(stream);
g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::NO);
// Pause the audio again if we're still paused
if (isPaused())
g_system->getMixer()->pauseHandle(_handle, true);
}
void VideoDecoder::AudioTrack::stop() {
g_system->getMixer()->stopHandle(_handle);
}
void VideoDecoder::AudioTrack::start(const Audio::Timestamp &limit) {
stop();
Audio::AudioStream *stream = getAudioStream();
assert(stream);
stream = Audio::makeLimitingAudioStream(stream, limit, DisposeAfterUse::NO);
g_system->getMixer()->playStream(getSoundType(), &_handle, stream, -1, getVolume(), getBalance(), DisposeAfterUse::YES);
// Pause the audio again if we're still paused
if (isPaused())
g_system->getMixer()->pauseHandle(_handle, true);
}
uint32 VideoDecoder::AudioTrack::getRunningTime() const {
if (g_system->getMixer()->isSoundHandleActive(_handle))
return g_system->getMixer()->getSoundElapsedTime(_handle);
return 0;
}
void VideoDecoder::AudioTrack::pauseIntern(bool shouldPause) {
if (g_system->getMixer()->isSoundHandleActive(_handle))
g_system->getMixer()->pauseHandle(_handle, shouldPause);
}
Audio::AudioStream *VideoDecoder::RewindableAudioTrack::getAudioStream() const {
return getRewindableAudioStream();
}
bool VideoDecoder::RewindableAudioTrack::rewind() {
Audio::RewindableAudioStream *stream = getRewindableAudioStream();
assert(stream);
return stream->rewind();
}
Audio::Timestamp VideoDecoder::SeekableAudioTrack::getDuration() const {
Audio::SeekableAudioStream *stream = getSeekableAudioStream();
assert(stream);
return stream->getLength();
}
Audio::AudioStream *VideoDecoder::SeekableAudioTrack::getAudioStream() const {
return getSeekableAudioStream();
}
bool VideoDecoder::SeekableAudioTrack::seek(const Audio::Timestamp &time) {
Audio::SeekableAudioStream *stream = getSeekableAudioStream();
assert(stream);
return stream->seek(time);
}
VideoDecoder::StreamFileAudioTrack::StreamFileAudioTrack() {
_stream = 0;
}
VideoDecoder::StreamFileAudioTrack::~StreamFileAudioTrack() {
delete _stream;
}
bool VideoDecoder::StreamFileAudioTrack::loadFromFile(const Common::String &baseName) {
// TODO: Make sure the stream isn't being played
delete _stream;
_stream = Audio::SeekableAudioStream::openStreamFile(baseName);
return _stream != 0;
}
void VideoDecoder::addTrack(Track *track) {
_tracks.push_back(track);
// Update volume settings if it's an audio track
if (track->getTrackType() == Track::kTrackTypeAudio) {
((AudioTrack *)track)->setVolume(_audioVolume);
((AudioTrack *)track)->setBalance(_audioBalance);
}
// Keep the track paused if we're paused
if (isPaused())
track->pause(true);
// Start the track if we're playing
if (isPlaying() && track->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)track)->start();
}
bool VideoDecoder::addStreamFileTrack(const Common::String &baseName) {
// Only allow adding external tracks if a video is already loaded
if (!isVideoLoaded())
return false;
StreamFileAudioTrack *track = new StreamFileAudioTrack();
bool result = track->loadFromFile(baseName);
if (result)
addTrack(track);
return result;
}
void VideoDecoder::setEndTime(const Audio::Timestamp &endTime) {
Audio::Timestamp startTime = 0;
if (isPlaying()) {
startTime = getTime();
stopAudio();
}
_endTime = endTime;
_endTimeSet = true;
if (startTime > endTime)
return;
if (isPlaying()) {
// We'll assume the audio track is going to start up at the same time it just was
// and therefore not do any seeking.
// Might want to set it anyway if we're seekable.
startAudioLimit(_endTime.msecs() - startTime.msecs());
_lastTimeChange = startTime;
}
}
VideoDecoder::Track *VideoDecoder::getTrack(uint track) {
if (track > _tracks.size())
return 0;
return _tracks[track];
}
const VideoDecoder::Track *VideoDecoder::getTrack(uint track) const {
if (track > _tracks.size())
return 0;
return _tracks[track];
}
bool VideoDecoder::endOfVideoTracks() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack())
return false;
return true;
}
VideoDecoder::VideoTrack *VideoDecoder::findNextVideoTrack() {
VideoTrack *bestTrack = 0;
uint32 bestTime = 0xFFFFFFFF;
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) {
VideoTrack *track = (VideoTrack *)*it;
uint32 time = track->getNextFrameStartTime();
if (time < bestTime) {
bestTime = time;
bestTrack = track;
}
}
}
return bestTrack;
}
const VideoDecoder::VideoTrack *VideoDecoder::findNextVideoTrack() const {
const VideoTrack *bestTrack = 0;
uint32 bestTime = 0xFFFFFFFF;
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++) {
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack()) {
const VideoTrack *track = (const VideoTrack *)*it;
uint32 time = track->getNextFrameStartTime();
if (time < bestTime) {
bestTime = time;
bestTrack = track;
}
}
}
return bestTrack;
}
void VideoDecoder::startAudio() {
if (_endTimeSet) {
// HACK: Timestamp's subtraction asserts out when subtracting two times
// with different rates.
startAudioLimit(_endTime - _lastTimeChange.convertToFramerate(_endTime.framerate()));
return;
}
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->start();
}
void VideoDecoder::stopAudio() {
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->stop();
}
void VideoDecoder::startAudioLimit(const Audio::Timestamp &limit) {
for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
((AudioTrack *)*it)->start(limit);
}
bool VideoDecoder::hasFramesLeft() const {
// This is similar to endOfVideo(), except it doesn't take Audio into account (and returns true if not the end of the video)
// This is only used for needsUpdate() atm so that setEndTime() works properly
// And unlike endOfVideoTracks(), this takes into account _endTime
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeVideo && !(*it)->endOfTrack() && (!isPlaying() || !_endTimeSet || ((VideoTrack *)*it)->getNextFrameStartTime() < (uint)_endTime.msecs()))
return true;
return false;
}
bool VideoDecoder::hasAudio() const {
for (TrackList::const_iterator it = _tracks.begin(); it != _tracks.end(); it++)
if ((*it)->getTrackType() == Track::kTrackTypeAudio)
return true;
return false;
}
} // End of namespace Video

View File

@ -23,18 +23,24 @@
#ifndef VIDEO_DECODER_H
#define VIDEO_DECODER_H
#include "common/str.h"
#include "audio/mixer.h"
#include "audio/timestamp.h" // TODO: Move this to common/ ?
#include "common/array.h"
#include "common/rational.h"
#include "common/str.h"
#include "graphics/pixelformat.h"
namespace Audio {
class AudioStream;
class RewindableAudioStream;
class SeekableAudioStream;
}
namespace Common {
class Rational;
class SeekableReadStream;
}
namespace Graphics {
struct PixelFormat;
struct Surface;
}
@ -48,10 +54,14 @@ public:
VideoDecoder();
virtual ~VideoDecoder() {}
/////////////////////////////////////////
// Opening/Closing a Video
/////////////////////////////////////////
/**
* Load a video from a file with the given name.
*
* A default implementation using loadStream is provided.
* A default implementation using Common::File and loadStream is provided.
*
* @param filename the filename to load
* @return whether loading the file succeeded
@ -62,6 +72,10 @@ public:
* Load a video from a generic read stream. The ownership of the
* stream object transfers to this VideoDecoder instance, which is
* hence also responsible for eventually deleting it.
*
* Implementations of this function are required to call addTrack()
* for each track in the video upon success.
*
* @param stream the stream to load
* @return whether loading the stream succeeded
*/
@ -69,103 +83,107 @@ public:
/**
* Close the active video stream and free any associated resources.
*
* All subclasses that need to close their own resources should still
* call the base class' close() function at the start of their function.
*/
virtual void close() = 0;
virtual void close();
/**
* Returns if a video stream is currently loaded or not.
*/
virtual bool isVideoLoaded() const = 0;
bool isVideoLoaded() const;
/////////////////////////////////////////
// Playback Control
/////////////////////////////////////////
/**
* Returns the width of the video's frames.
* @return the width of the video's frames
*/
virtual uint16 getWidth() const = 0;
/**
* Returns the height of the video's frames.
* @return the height of the video's frames
*/
virtual uint16 getHeight() const = 0;
/**
* Get the pixel format of the currently loaded video.
*/
virtual Graphics::PixelFormat getPixelFormat() const = 0;
/**
* Get the palette for the video in RGB format (if 8bpp or less).
*/
virtual const byte *getPalette() { return 0; }
/**
* Returns if the palette is dirty or not.
*/
virtual bool hasDirtyPalette() const { return false; }
/**
* Set the system palette to the palette returned by getPalette.
* @see getPalette
*/
void setSystemPalette();
/**
* Returns the current frame number of the video.
* @return the last frame decoded by the video
*/
virtual int32 getCurFrame() const { return _curFrame; }
/**
* Returns the number of frames in the video.
* @return the number of frames in the video
*/
virtual uint32 getFrameCount() const = 0;
/**
* Returns the time position (in ms) of the current video.
* This can be based on the "wall clock" time as determined by
* OSystem::getMillis() or the current time of any audio track
* running in the video, and takes pausing the video into account.
* Begin playback of the video at normal speed.
*
* As such, it will differ from what multiplying getCurFrame() by
* some constant would yield, e.g. for a video with non-constant
* frame rate.
* @note This has no effect if the video is already playing.
*/
void start();
/**
* Stop playback of the video.
*
* Due to the nature of the timing, this value may not always be
* completely accurate (since our mixer does not have precise
* timing).
* @note This has no effect if the video is not playing.
*/
virtual uint32 getTime() const;
void stop();
/**
* Return the time (in ms) until the next frame should be displayed.
* Set the rate of playback.
*
* For instance, a rate of 0 would stop the video, while a rate of 1
* would play the video normally. Passing 2 to this function would
* play the video at twice the normal speed.
*
* @note This function does not work for non-0/1 rates on videos that
* have audio tracks.
*
* @todo This currently does not implement backwards playback, but will
* be implemented soon.
*/
virtual uint32 getTimeToNextFrame() const = 0;
void setRate(const Common::Rational &rate);
/**
* Check whether a new frame should be decoded, i.e. because enough
* time has elapsed since the last frame was decoded.
* @return whether a new frame should be decoded or not
* Returns the rate at which the video is being played.
*/
virtual bool needsUpdate() const;
Common::Rational getRate() const { return _playbackRate; }
/**
* Decode the next frame into a surface and return the latter.
* @return a surface containing the decoded frame, or 0
* @note Ownership of the returned surface stays with the VideoDecoder,
* hence the caller must *not* free it.
* @note this may return 0, in which case the last frame should be kept on screen
* Returns if the video is currently playing or not.
*
* This is not equivalent to the inverse of endOfVideo(). A video keeps
* its playing status even after reaching the end of the video. This will
* return true after calling start() and will continue to return true
* until stop() (or close()) is called.
*/
virtual const Graphics::Surface *decodeNextFrame() = 0;
bool isPlaying() const;
/**
* Returns if the video has finished playing or not.
* @return true if the video has finished playing or if none is loaded, false otherwise
* Returns if a video is rewindable or not. The default implementation
* polls each track for rewindability.
*/
virtual bool endOfVideo() const;
virtual bool isRewindable() const;
/**
* Rewind a video to its beginning.
*
* If the video is playing, it will continue to play. The default
* implementation will rewind each track.
*
* @return true on success, false otherwise
*/
virtual bool rewind();
/**
* Returns if a video is seekable or not. The default implementation
* polls each track for seekability.
*/
virtual bool isSeekable() const;
/**
* Seek to a given time in the video.
*
* If the video is playing, it will continue to play. The default
* implementation will seek each track and must still be called
* from any other implementation.
*
* @param time The time to seek to
* @return true on success, false otherwise
*/
virtual bool seek(const Audio::Timestamp &time);
/**
* Seek to a given frame.
*
* This only works when one video track is present, and that track
* supports getFrameTime(). This calls seek() internally.
*/
bool seekToFrame(uint frame);
/**
* Pause or resume the video. This should stop/resume any audio playback
@ -185,56 +203,518 @@ public:
*/
bool isPaused() const { return _pauseLevel != 0; }
/**
* Set the time for this video to end at. At this time in the video,
* all audio will stop and endOfVideo() will return true.
*
* While the setting is stored even if a video is not playing,
* endOfVideo() is only affected when the video is playing.
*/
void setEndTime(const Audio::Timestamp &endTime);
/**
* Get the stop time of the video (if not set, zero)
*/
Audio::Timestamp getEndTime() const { return _endTime; }
/////////////////////////////////////////
// Playback Status
/////////////////////////////////////////
/**
* Returns if the video has reached the end or not.
* @return true if the video has finished playing or if none is loaded, false otherwise
*/
bool endOfVideo() const;
/**
* Returns the current frame number of the video.
* @return the last frame decoded by the video
*/
int getCurFrame() const;
/**
* Returns the number of frames in the video.
* @return the number of frames in the video
*/
uint32 getFrameCount() const;
/**
* Returns the time position (in ms) of the current video.
* This can be based on the "wall clock" time as determined by
* OSystem::getMillis() or the current time of any audio track
* running in the video, and takes pausing the video into account.
*
* As such, it will differ from what multiplying getCurFrame() by
* some constant would yield, e.g. for a video with non-constant
* frame rate.
*
* Due to the nature of the timing, this value may not always be
* completely accurate (since our mixer does not have precise
* timing).
*/
uint32 getTime() const;
/////////////////////////////////////////
// Video Info
/////////////////////////////////////////
/**
* Returns the width of the video's frames.
*
* By default, this finds the largest width between all of the loaded
* tracks. However, a subclass may override this if it does any kind
* of post-processing on it.
*
* @return the width of the video's frames
*/
virtual uint16 getWidth() const;
/**
* Returns the height of the video's frames.
*
* By default, this finds the largest height between all of the loaded
* tracks. However, a subclass may override this if it does any kind
* of post-processing on it.
*
* @return the height of the video's frames
*/
virtual uint16 getHeight() const;
/**
* Get the pixel format of the currently loaded video.
*/
Graphics::PixelFormat getPixelFormat() const;
/**
* Get the duration of the video.
*
* If the duration is unknown, this will return 0. If this is not
* overriden, it will take the length of the longest track.
*/
virtual Audio::Timestamp getDuration() const;
/////////////////////////////////////////
// Frame Decoding
/////////////////////////////////////////
/**
* Get the palette for the video in RGB format (if 8bpp or less).
*
* The palette's format is the same as PaletteManager's palette
* (interleaved RGB values).
*/
const byte *getPalette();
/**
* Returns if the palette is dirty or not.
*/
bool hasDirtyPalette() const { return _dirtyPalette; }
/**
* Return the time (in ms) until the next frame should be displayed.
*/
uint32 getTimeToNextFrame() const;
/**
* Check whether a new frame should be decoded, i.e. because enough
* time has elapsed since the last frame was decoded.
* @return whether a new frame should be decoded or not
*/
bool needsUpdate() const;
/**
* Decode the next frame into a surface and return the latter.
*
* A subclass may override this, but must still call this function. As an
* example, a subclass may do this to apply some global video scale to
* individual track's frame.
*
* Note that this will call readNextPacket() internally first before calling
* the next video track's decodeNextFrame() function.
*
* @return a surface containing the decoded frame, or 0
* @note Ownership of the returned surface stays with the VideoDecoder,
* hence the caller must *not* free it.
* @note this may return 0, in which case the last frame should be kept on screen
*/
virtual const Graphics::Surface *decodeNextFrame();
/**
* Set the default high color format for videos that convert from YUV.
*
* By default, VideoDecoder will attempt to use the screen format
* if it's >8bpp and use a 32bpp format when not.
*
* This must be set before calling loadStream().
*/
void setDefaultHighColorFormat(const Graphics::PixelFormat &format) { _defaultHighColorFormat = format; }
/////////////////////////////////////////
// Audio Control
/////////////////////////////////////////
/**
* Get the current volume at which the audio in the video is being played
* @return the current volume at which the audio in the video is being played
*/
virtual byte getVolume() const { return _audioVolume; }
byte getVolume() const { return _audioVolume; }
/**
* Set the volume at which the audio in the video should be played.
* This setting remains until reset() is called (which may be called
* from loadStream() or close()). The default volume is the maximum.
*
* @note This function calls updateVolume() by default.
* This setting remains until close() is called (which may be called
* from loadStream()). The default volume is the maximum.
*
* @param volume The volume at which to play the audio in the video
*/
virtual void setVolume(byte volume);
void setVolume(byte volume);
/**
* Get the current balance at which the audio in the video is being played
* @return the current balance at which the audio in the video is being played
*/
virtual int8 getBalance() const { return _audioBalance; }
int8 getBalance() const { return _audioBalance; }
/**
* Set the balance at which the audio in the video should be played.
* This setting remains until reset() is called (which may be called
* from loadStream() or close()). The default balance is 0.
*
* @note This function calls updateBalance() by default.
* This setting remains until close() is called (which may be called
* from loadStream()). The default balance is 0.
*
* @param balance The balance at which to play the audio in the video
*/
virtual void setBalance(int8 balance);
void setBalance(int8 balance);
/**
* Add an audio track from a stream file.
*
* This calls SeekableAudioStream::openStreamFile() internally
*/
bool addStreamFileTrack(const Common::String &baseName);
protected:
/**
* Resets _curFrame and _startTime. Should be called from every close() function.
* An abstract representation of a track in a movie. Since tracks here are designed
* to work independently, they should not reference any other track(s) in the video.
*/
void reset();
class Track {
public:
Track();
virtual ~Track() {}
/**
* The types of tracks this class can be.
*/
enum TrackType {
kTrackTypeNone,
kTrackTypeVideo,
kTrackTypeAudio
};
/**
* Get the type of track.
*/
virtual TrackType getTrackType() const = 0;
/**
* Return if the track has finished.
*/
virtual bool endOfTrack() const = 0;
/**
* Return if the track is rewindable.
*
* If a video is seekable, it does not need to implement this
* for it to also be rewindable.
*/
virtual bool isRewindable() const;
/**
* Rewind the video to the beginning.
*
* If a video is seekable, it does not need to implement this
* for it to also be rewindable.
*
* @return true on success, false otherwise.
*/
virtual bool rewind();
/**
* Return if the track is seekable.
*/
virtual bool isSeekable() const { return false; }
/**
* Seek to the given time.
* @param time The time to seek to, from the beginning of the video.
* @return true on success, false otherwise.
*/
virtual bool seek(const Audio::Timestamp &time) { return false; }
/**
* Set the pause status of the track.
*/
void pause(bool shouldPause);
/**
* Return if the track is paused.
*/
bool isPaused() const { return _paused; }
/**
* Get the duration of the track (starting from this track's start time).
*
* By default, this returns 0 for unknown.
*/
virtual Audio::Timestamp getDuration() const;
protected:
/**
* Function called by pause() for subclasses to implement.
*/
virtual void pauseIntern(bool shouldPause) {}
private:
bool _paused;
};
/**
* Actual implementation of pause by subclasses. See pause()
* for details.
* An abstract representation of a video track.
*/
virtual void pauseVideoIntern(bool pause) {}
class VideoTrack : public Track {
public:
VideoTrack() {}
virtual ~VideoTrack() {}
TrackType getTrackType() const { return kTrackTypeVideo; }
virtual bool endOfTrack() const;
/**
* Get the width of this track
*/
virtual uint16 getWidth() const = 0;
/**
* Get the height of this track
*/
virtual uint16 getHeight() const = 0;
/**
* Get the pixel format of this track
*/
virtual Graphics::PixelFormat getPixelFormat() const = 0;
/**
* Get the current frame of this track
*
* @see VideoDecoder::getCurFrame()
*/
virtual int getCurFrame() const = 0;
/**
* Get the frame count of this track
*
* @note If the frame count is unknown, return 0 (which is also
* the default implementation of the function). However, one must
* also implement endOfTrack() in that case.
*/
virtual int getFrameCount() const { return 0; }
/**
* Get the start time of the next frame in milliseconds since
* the start of the video
*/
virtual uint32 getNextFrameStartTime() const = 0;
/**
* Decode the next frame
*/
virtual const Graphics::Surface *decodeNextFrame() = 0;
/**
* Get the palette currently in use by this track
*/
virtual const byte *getPalette() const { return 0; }
/**
* Does the palette currently in use by this track need to be updated?
*/
virtual bool hasDirtyPalette() const { return false; }
/**
* Get the time the given frame should be shown.
*
* By default, this returns a negative (invalid) value. This function
* should only be used by VideoDecoder::seekToFrame().
*/
virtual Audio::Timestamp getFrameTime(uint frame) const;
};
/**
* Add the time the video has been paused to maintain sync
* A VideoTrack that is played at a constant rate.
*
* If the frame count is unknown, you must override endOfTrack().
*/
virtual void addPauseTime(uint32 ms) { _startTime += ms; }
class FixedRateVideoTrack : public VideoTrack {
public:
FixedRateVideoTrack() {}
virtual ~FixedRateVideoTrack() {}
uint32 getNextFrameStartTime() const;
virtual Audio::Timestamp getDuration() const;
Audio::Timestamp getFrameTime(uint frame) const;
protected:
/**
* Get the rate at which this track is played.
*/
virtual Common::Rational getFrameRate() const = 0;
/**
* Get the frame that should be displaying at the given time. This is
* helpful for someone implementing seek().
*/
uint getFrameAtTime(const Audio::Timestamp &time) const;
};
/**
* An abstract representation of an audio track.
*/
class AudioTrack : public Track {
public:
AudioTrack() {}
virtual ~AudioTrack() {}
TrackType getTrackType() const { return kTrackTypeAudio; }
virtual bool endOfTrack() const;
/**
* Start playing this track
*/
void start();
/**
* Stop playing this track
*/
void stop();
void start(const Audio::Timestamp &limit);
/**
* Get the volume for this track
*/
byte getVolume() const { return _volume; }
/**
* Set the volume for this track
*/
void setVolume(byte volume);
/**
* Get the balance for this track
*/
int8 getBalance() const { return _balance; }
/**
* Set the balance for this track
*/
void setBalance(int8 balance);
/**
* Get the time the AudioStream behind this track has been
* running
*/
uint32 getRunningTime() const;
/**
* Get the sound type to be used when playing this audio track
*/
virtual Audio::Mixer::SoundType getSoundType() const { return Audio::Mixer::kPlainSoundType; }
protected:
void pauseIntern(bool shouldPause);
/**
* Get the AudioStream that is the representation of this AudioTrack
*/
virtual Audio::AudioStream *getAudioStream() const = 0;
private:
Audio::SoundHandle _handle;
byte _volume;
int8 _balance;
};
/**
* An AudioTrack that implements isRewindable() and rewind() using
* RewindableAudioStream.
*/
class RewindableAudioTrack : public AudioTrack {
public:
RewindableAudioTrack() {}
virtual ~RewindableAudioTrack() {}
bool isRewindable() const { return true; }
bool rewind();
protected:
Audio::AudioStream *getAudioStream() const;
/**
* Get the RewindableAudioStream pointer to be used by this class
* for rewind() and getAudioStream()
*/
virtual Audio::RewindableAudioStream *getRewindableAudioStream() const = 0;
};
/**
* An AudioTrack that implements isSeekable() and seek() using
* SeekableAudioStream.
*/
class SeekableAudioTrack : public AudioTrack {
public:
SeekableAudioTrack() {}
virtual ~SeekableAudioTrack() {}
bool isSeekable() const { return true; }
bool seek(const Audio::Timestamp &time);
Audio::Timestamp getDuration() const;
protected:
Audio::AudioStream *getAudioStream() const;
/**
* Get the SeekableAudioStream pointer to be used by this class
* for seek(), getDuration(), and getAudioStream()
*/
virtual Audio::SeekableAudioStream *getSeekableAudioStream() const = 0;
};
/**
* A SeekableAudioTrack that constructs its SeekableAudioStream using
* SeekableAudioStream::openStreamFile()
*/
class StreamFileAudioTrack : public SeekableAudioTrack {
public:
StreamFileAudioTrack();
~StreamFileAudioTrack();
/**
* Load the track from a file with the given base name.
*
* @return true on success, false otherwise
*/
bool loadFromFile(const Common::String &baseName);
protected:
Audio::SeekableAudioStream *_stream;
Audio::SeekableAudioStream *getSeekableAudioStream() const { return _stream; }
};
/**
* Reset the pause start time (which should be called when seeking)
@ -242,81 +722,112 @@ protected:
void resetPauseStartTime();
/**
* Update currently playing audio tracks with the new volume setting
* Decode enough data for the next frame and enough audio to last that long.
*
* This function is used by this class' decodeNextFrame() function. A subclass
* of a Track may decide to just have its decodeNextFrame() function read
* and decode the frame, but only if it is the only track in the video.
*/
virtual void updateVolume() {}
virtual void readNextPacket() {}
/**
* Update currently playing audio tracks with the new balance setting
* Define a track to be used by this class.
*
* The pointer is then owned by this base class.
*/
virtual void updateBalance() {}
void addTrack(Track *track);
int32 _curFrame;
int32 _startTime;
/**
* Whether or not getTime() will sync with a playing audio track.
*
* A subclass can override this to disable this feature.
*/
virtual bool useAudioSync() const { return true; }
/**
* Get the given track based on its index.
*
* @return A valid track pointer on success, 0 otherwise
*/
Track *getTrack(uint track);
/**
* Get the given track based on its index
*
* @return A valid track pointer on success, 0 otherwise
*/
const Track *getTrack(uint track) const;
/**
* Find out if all video tracks have finished
*
* This is useful if one wants to figure out if they need to buffer all
* remaining audio in a file.
*/
bool endOfVideoTracks() const;
/**
* Get the default high color format
*/
Graphics::PixelFormat getDefaultHighColorFormat() const { return _defaultHighColorFormat; }
/**
* Find the video track with the lowest start time for the next frame
*/
VideoTrack *findNextVideoTrack();
/**
* Find the video track with the lowest start time for the next frame
*/
const VideoTrack *findNextVideoTrack() const;
/**
* Typedef helpers for accessing tracks
*/
typedef Common::Array<Track *> TrackList;
typedef TrackList::iterator TrackListIterator;
/**
* Get the begin iterator of the tracks
*/
TrackListIterator getTrackListBegin() { return _tracks.begin(); }
/**
* Get the end iterator of the tracks
*/
TrackListIterator getTrackListEnd() { return _tracks.end(); }
private:
// Tracks owned by this VideoDecoder
TrackList _tracks;
// Current playback status
bool _needsUpdate;
Audio::Timestamp _lastTimeChange, _endTime;
bool _endTimeSet;
Common::Rational _playbackRate;
// Palette settings from individual tracks
mutable bool _dirtyPalette;
const byte *_palette;
// Default PixelFormat settings
Graphics::PixelFormat _defaultHighColorFormat;
// Internal helper functions
void stopAudio();
void startAudio();
void startAudioLimit(const Audio::Timestamp &limit);
bool hasFramesLeft() const;
bool hasAudio() const;
int32 _startTime;
uint32 _pauseLevel;
uint32 _pauseStartTime;
byte _audioVolume;
int8 _audioBalance;
};
/**
* A VideoDecoder wrapper that implements getTimeToNextFrame() based on getFrameRate().
*/
class FixedRateVideoDecoder : public virtual VideoDecoder {
public:
uint32 getTimeToNextFrame() const;
protected:
/**
* Return the frame rate in frames per second.
* This returns a Rational because videos can have rates that are not integers and
* there are some videos with frame rates < 1.
*/
virtual Common::Rational getFrameRate() const = 0;
private:
uint32 getFrameBeginTime(uint32 frame) const;
};
/**
* A VideoDecoder that can be rewound back to the beginning.
*/
class RewindableVideoDecoder : public virtual VideoDecoder {
public:
/**
* Rewind to the beginning of the video.
*/
virtual void rewind() = 0;
};
/**
* A VideoDecoder that can seek to a frame or point in time.
*/
class SeekableVideoDecoder : public virtual RewindableVideoDecoder {
public:
/**
* Seek to the specified time.
*/
virtual void seekToTime(const Audio::Timestamp &time) = 0;
/**
* Seek to the specified time (in ms).
*/
void seekToTime(uint32 msecs) { seekToTime(Audio::Timestamp(msecs, 1000)); }
/**
* Implementation of RewindableVideoDecoder::rewind().
*/
virtual void rewind() { seekToTime(0); }
/**
* Get the total duration of the video (in ms).
*/
virtual uint32 getDuration() const = 0;
};
} // End of namespace Video
#endif