2007-05-30 21:56:52 +00:00
|
|
|
/* ScummVM - Graphic Adventure Engine
|
|
|
|
*
|
|
|
|
* ScummVM is the legal property of its developers, whose names
|
|
|
|
* are too numerous to list here. Please refer to the COPYRIGHT
|
|
|
|
* file distributed with this source distribution.
|
2003-07-25 13:42:05 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
2005-10-18 01:30:26 +00:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
2003-07-25 13:42:05 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2009-01-30 05:25:17 +00:00
|
|
|
#include "common/debug.h"
|
2003-07-31 01:21:38 +00:00
|
|
|
#include "common/file.h"
|
2011-04-24 08:34:27 +00:00
|
|
|
#include "common/mutex.h"
|
|
|
|
#include "common/textconsole.h"
|
2010-01-08 22:04:30 +00:00
|
|
|
#include "common/queue.h"
|
2003-07-29 01:37:03 +00:00
|
|
|
#include "common/util.h"
|
2006-03-29 15:59:37 +00:00
|
|
|
|
2011-02-09 01:09:01 +00:00
|
|
|
#include "audio/audiostream.h"
|
|
|
|
#include "audio/decoders/flac.h"
|
|
|
|
#include "audio/decoders/mp3.h"
|
2011-05-31 17:58:05 +00:00
|
|
|
#include "audio/decoders/quicktime.h"
|
2011-02-09 01:09:01 +00:00
|
|
|
#include "audio/decoders/raw.h"
|
|
|
|
#include "audio/decoders/vorbis.h"
|
2007-02-28 12:54:59 +00:00
|
|
|
|
|
|
|
|
2006-04-29 22:33:31 +00:00
|
|
|
namespace Audio {
|
|
|
|
|
2004-02-22 14:11:16 +00:00
|
|
|
struct StreamFileFormat {
|
|
|
|
/** Decodername */
|
2010-01-25 01:27:14 +00:00
|
|
|
const char *decoderName;
|
|
|
|
const char *fileExtension;
|
2005-07-30 21:11:48 +00:00
|
|
|
/**
|
2004-02-22 14:11:16 +00:00
|
|
|
* Pointer to a function which tries to open a file of type StreamFormat.
|
2005-07-30 21:11:48 +00:00
|
|
|
* Return NULL in case of an error (invalid/nonexisting file).
|
2004-02-22 14:11:16 +00:00
|
|
|
*/
|
2010-01-16 21:36:08 +00:00
|
|
|
SeekableAudioStream *(*openStreamFile)(Common::SeekableReadStream *stream, DisposeAfterUse::Flag disposeAfterUse);
|
2004-02-22 14:11:16 +00:00
|
|
|
};
|
|
|
|
|
2005-07-30 21:11:48 +00:00
|
|
|
static const StreamFileFormat STREAM_FILEFORMATS[] = {
|
2011-05-31 17:58:05 +00:00
|
|
|
/* decoderName, fileExt, openStreamFunction */
|
2004-02-22 14:11:16 +00:00
|
|
|
#ifdef USE_FLAC
|
2010-02-03 09:42:11 +00:00
|
|
|
{ "FLAC", ".flac", makeFLACStream },
|
|
|
|
{ "FLAC", ".fla", makeFLACStream },
|
2005-08-10 06:16:26 +00:00
|
|
|
#endif
|
2005-08-10 12:42:56 +00:00
|
|
|
#ifdef USE_VORBIS
|
2010-01-25 01:27:14 +00:00
|
|
|
{ "Ogg Vorbis", ".ogg", makeVorbisStream },
|
2005-08-10 06:16:26 +00:00
|
|
|
#endif
|
2004-02-22 14:11:16 +00:00
|
|
|
#ifdef USE_MAD
|
2010-01-25 01:27:14 +00:00
|
|
|
{ "MPEG Layer 3", ".mp3", makeMP3Stream },
|
2005-08-10 06:16:26 +00:00
|
|
|
#endif
|
2011-05-31 17:58:05 +00:00
|
|
|
{ "MPEG-4 Audio", ".m4a", makeQuickTimeStream },
|
2004-02-22 14:11:16 +00:00
|
|
|
|
|
|
|
{ NULL, NULL, NULL } // Terminator
|
|
|
|
};
|
|
|
|
|
2010-01-06 15:23:33 +00:00
|
|
|
SeekableAudioStream *SeekableAudioStream::openStreamFile(const Common::String &basename) {
|
2010-01-05 02:27:24 +00:00
|
|
|
SeekableAudioStream *stream = NULL;
|
2005-05-10 22:56:25 +00:00
|
|
|
Common::File *fileHandle = new Common::File();
|
2004-02-22 14:11:16 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < ARRAYSIZE(STREAM_FILEFORMATS)-1 && stream == NULL; ++i) {
|
2007-04-14 17:12:43 +00:00
|
|
|
Common::String filename = basename + STREAM_FILEFORMATS[i].fileExtension;
|
|
|
|
fileHandle->open(filename);
|
2007-02-24 23:53:35 +00:00
|
|
|
if (fileHandle->isOpen()) {
|
2007-04-14 17:12:43 +00:00
|
|
|
// Create the stream object
|
2010-01-16 21:36:08 +00:00
|
|
|
stream = STREAM_FILEFORMATS[i].openStreamFile(fileHandle, DisposeAfterUse::YES);
|
2007-02-24 23:53:35 +00:00
|
|
|
fileHandle = 0;
|
|
|
|
break;
|
|
|
|
}
|
2004-02-22 14:11:16 +00:00
|
|
|
}
|
2005-07-30 21:11:48 +00:00
|
|
|
|
2007-02-24 23:53:35 +00:00
|
|
|
delete fileHandle;
|
2004-02-22 14:11:16 +00:00
|
|
|
|
2010-02-09 22:03:59 +00:00
|
|
|
if (stream == NULL)
|
2010-01-17 23:09:40 +00:00
|
|
|
debug(1, "SeekableAudioStream::openStreamFile: Could not open compressed AudioFile %s", basename.c_str());
|
2004-02-22 14:11:16 +00:00
|
|
|
|
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
2010-01-07 14:20:36 +00:00
|
|
|
#pragma mark -
|
|
|
|
#pragma mark --- LoopingAudioStream ---
|
|
|
|
#pragma mark -
|
|
|
|
|
2010-01-16 21:36:08 +00:00
|
|
|
LoopingAudioStream::LoopingAudioStream(RewindableAudioStream *stream, uint loops, DisposeAfterUse::Flag disposeAfterUse)
|
2010-01-07 14:20:36 +00:00
|
|
|
: _parent(stream), _disposeAfterUse(disposeAfterUse), _loops(loops), _completeIterations(0) {
|
2010-06-09 13:34:15 +00:00
|
|
|
assert(stream);
|
|
|
|
|
|
|
|
if (!stream->rewind()) {
|
|
|
|
// TODO: Properly indicate error
|
|
|
|
_loops = _completeIterations = 1;
|
|
|
|
}
|
2010-01-07 14:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
LoopingAudioStream::~LoopingAudioStream() {
|
2010-01-16 21:36:08 +00:00
|
|
|
if (_disposeAfterUse == DisposeAfterUse::YES)
|
2010-01-07 14:20:36 +00:00
|
|
|
delete _parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
int LoopingAudioStream::readBuffer(int16 *buffer, const int numSamples) {
|
2010-02-06 16:42:15 +00:00
|
|
|
if ((_loops && _completeIterations == _loops) || !numSamples)
|
|
|
|
return 0;
|
|
|
|
|
2010-01-07 14:20:36 +00:00
|
|
|
int samplesRead = _parent->readBuffer(buffer, numSamples);
|
|
|
|
|
|
|
|
if (_parent->endOfStream()) {
|
|
|
|
++_completeIterations;
|
|
|
|
if (_completeIterations == _loops)
|
|
|
|
return samplesRead;
|
|
|
|
|
2010-02-06 16:42:15 +00:00
|
|
|
const int remainingSamples = numSamples - samplesRead;
|
2010-01-07 14:20:36 +00:00
|
|
|
|
|
|
|
if (!_parent->rewind()) {
|
|
|
|
// TODO: Properly indicate error
|
|
|
|
_loops = _completeIterations = 1;
|
|
|
|
return samplesRead;
|
|
|
|
}
|
|
|
|
|
2010-02-06 16:42:15 +00:00
|
|
|
return samplesRead + readBuffer(buffer + samplesRead, remainingSamples);
|
2010-01-07 14:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return samplesRead;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LoopingAudioStream::endOfData() const {
|
|
|
|
return (_loops != 0 && (_completeIterations == _loops));
|
|
|
|
}
|
|
|
|
|
2010-01-07 16:18:03 +00:00
|
|
|
AudioStream *makeLoopingAudioStream(RewindableAudioStream *stream, uint loops) {
|
|
|
|
if (loops != 1)
|
|
|
|
return new LoopingAudioStream(stream, loops);
|
|
|
|
else
|
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
2010-01-08 16:25:51 +00:00
|
|
|
AudioStream *makeLoopingAudioStream(SeekableAudioStream *stream, Timestamp start, Timestamp end, uint loops) {
|
|
|
|
if (!start.totalNumberOfFrames() && (!end.totalNumberOfFrames() || end == stream->getLength())) {
|
|
|
|
return makeLoopingAudioStream(stream, loops);
|
|
|
|
} else {
|
|
|
|
if (!end.totalNumberOfFrames())
|
|
|
|
end = stream->getLength();
|
|
|
|
|
2010-01-08 16:38:51 +00:00
|
|
|
if (start >= end) {
|
|
|
|
warning("makeLoopingAudioStream: start (%d) >= end (%d)", start.msecs(), end.msecs());
|
2010-01-08 16:25:51 +00:00
|
|
|
delete stream;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return makeLoopingAudioStream(new SubSeekableAudioStream(stream, start, end), loops);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-10 15:20:14 +00:00
|
|
|
#pragma mark -
|
|
|
|
#pragma mark --- SubLoopingAudioStream ---
|
|
|
|
#pragma mark -
|
|
|
|
|
|
|
|
SubLoopingAudioStream::SubLoopingAudioStream(SeekableAudioStream *stream,
|
2010-02-09 22:03:59 +00:00
|
|
|
uint loops,
|
|
|
|
const Timestamp loopStart,
|
|
|
|
const Timestamp loopEnd,
|
|
|
|
DisposeAfterUse::Flag disposeAfterUse)
|
2010-01-10 15:20:14 +00:00
|
|
|
: _parent(stream), _disposeAfterUse(disposeAfterUse), _loops(loops),
|
|
|
|
_pos(0, getRate() * (isStereo() ? 2 : 1)),
|
2010-01-27 08:08:33 +00:00
|
|
|
_loopStart(convertTimeToStreamPos(loopStart, getRate(), isStereo())),
|
|
|
|
_loopEnd(convertTimeToStreamPos(loopEnd, getRate(), isStereo())),
|
2010-01-10 15:20:14 +00:00
|
|
|
_done(false) {
|
2010-03-22 15:54:56 +00:00
|
|
|
assert(loopStart < loopEnd);
|
|
|
|
|
2010-01-10 15:20:14 +00:00
|
|
|
if (!_parent->rewind())
|
|
|
|
_done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
SubLoopingAudioStream::~SubLoopingAudioStream() {
|
2010-01-16 21:36:08 +00:00
|
|
|
if (_disposeAfterUse == DisposeAfterUse::YES)
|
2010-01-10 15:20:14 +00:00
|
|
|
delete _parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
int SubLoopingAudioStream::readBuffer(int16 *buffer, const int numSamples) {
|
2010-02-09 21:51:47 +00:00
|
|
|
if (_done)
|
|
|
|
return 0;
|
|
|
|
|
2010-01-10 15:20:14 +00:00
|
|
|
int framesLeft = MIN(_loopEnd.frameDiff(_pos), numSamples);
|
|
|
|
int framesRead = _parent->readBuffer(buffer, framesLeft);
|
|
|
|
_pos = _pos.addFrames(framesRead);
|
|
|
|
|
2010-02-09 22:03:02 +00:00
|
|
|
if (framesRead < framesLeft && _parent->endOfData()) {
|
2010-02-09 21:52:46 +00:00
|
|
|
// TODO: Proper error indication.
|
|
|
|
_done = true;
|
|
|
|
return framesRead;
|
|
|
|
} else if (_pos == _loopEnd) {
|
2010-01-10 15:20:14 +00:00
|
|
|
if (_loops != 0) {
|
|
|
|
--_loops;
|
|
|
|
if (!_loops) {
|
|
|
|
_done = true;
|
|
|
|
return framesRead;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!_parent->seek(_loopStart)) {
|
2010-02-09 21:52:46 +00:00
|
|
|
// TODO: Proper error indication.
|
2010-01-10 15:20:14 +00:00
|
|
|
_done = true;
|
|
|
|
return framesRead;
|
|
|
|
}
|
|
|
|
|
|
|
|
_pos = _loopStart;
|
|
|
|
framesLeft = numSamples - framesLeft;
|
2010-02-09 21:51:47 +00:00
|
|
|
return framesRead + readBuffer(buffer + framesRead, framesLeft);
|
|
|
|
} else {
|
|
|
|
return framesRead;
|
2010-01-10 15:20:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-07 14:20:36 +00:00
|
|
|
#pragma mark -
|
|
|
|
#pragma mark --- SubSeekableAudioStream ---
|
|
|
|
#pragma mark -
|
|
|
|
|
2010-01-16 21:36:08 +00:00
|
|
|
SubSeekableAudioStream::SubSeekableAudioStream(SeekableAudioStream *parent, const Timestamp start, const Timestamp end, DisposeAfterUse::Flag disposeAfterUse)
|
2010-01-07 20:13:02 +00:00
|
|
|
: _parent(parent), _disposeAfterUse(disposeAfterUse),
|
2010-01-27 08:08:33 +00:00
|
|
|
_start(convertTimeToStreamPos(start, getRate(), isStereo())),
|
2011-02-13 21:07:34 +00:00
|
|
|
_pos(0, getRate() * (isStereo() ? 2 : 1)),
|
|
|
|
_length(convertTimeToStreamPos(end, getRate(), isStereo()) - _start) {
|
2010-01-11 10:17:55 +00:00
|
|
|
|
2010-01-17 23:10:38 +00:00
|
|
|
assert(_length.totalNumberOfFrames() % (isStereo() ? 2 : 1) == 0);
|
2010-01-07 14:20:36 +00:00
|
|
|
_parent->seek(_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
SubSeekableAudioStream::~SubSeekableAudioStream() {
|
|
|
|
if (_disposeAfterUse)
|
|
|
|
delete _parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
int SubSeekableAudioStream::readBuffer(int16 *buffer, const int numSamples) {
|
2010-01-07 20:13:02 +00:00
|
|
|
int framesLeft = MIN(_length.frameDiff(_pos), numSamples);
|
2010-01-07 14:20:36 +00:00
|
|
|
int framesRead = _parent->readBuffer(buffer, framesLeft);
|
2010-01-07 20:13:02 +00:00
|
|
|
_pos = _pos.addFrames(framesRead);
|
2010-01-07 14:20:36 +00:00
|
|
|
return framesRead;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SubSeekableAudioStream::seek(const Timestamp &where) {
|
2010-01-27 08:08:33 +00:00
|
|
|
_pos = convertTimeToStreamPos(where, getRate(), isStereo());
|
2010-01-07 20:13:02 +00:00
|
|
|
if (_pos > _length) {
|
|
|
|
_pos = _length;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-01-11 10:17:55 +00:00
|
|
|
if (_parent->seek(_pos + _start)) {
|
2010-01-07 14:20:36 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
_pos = _length;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-07-28 17:28:29 +00:00
|
|
|
#pragma mark -
|
2010-01-19 00:54:06 +00:00
|
|
|
#pragma mark --- Queueing audio stream ---
|
2003-07-28 17:28:29 +00:00
|
|
|
#pragma mark -
|
2003-07-25 13:42:05 +00:00
|
|
|
|
2010-01-10 15:20:14 +00:00
|
|
|
|
2010-01-19 00:56:29 +00:00
|
|
|
void QueuingAudioStream::queueBuffer(byte *data, uint32 size, DisposeAfterUse::Flag disposeAfterUse, byte flags) {
|
2010-01-30 15:28:07 +00:00
|
|
|
AudioStream *stream = makeRawStream(data, size, getRate(), flags, disposeAfterUse);
|
2010-01-19 00:54:06 +00:00
|
|
|
queueAudioStream(stream, DisposeAfterUse::YES);
|
2010-01-10 15:20:14 +00:00
|
|
|
}
|
2010-01-08 22:04:30 +00:00
|
|
|
|
|
|
|
|
2010-01-08 22:06:04 +00:00
|
|
|
class QueuingAudioStreamImpl : public QueuingAudioStream {
|
2010-01-08 22:04:30 +00:00
|
|
|
private:
|
|
|
|
/**
|
|
|
|
* We queue a number of (pointers to) audio stream objects.
|
|
|
|
* In addition, we need to remember for each stream whether
|
|
|
|
* to dispose it after all data has been read from it.
|
|
|
|
* Hence, we don't store pointers to stream objects directly,
|
|
|
|
* but rather StreamHolder structs.
|
|
|
|
*/
|
|
|
|
struct StreamHolder {
|
|
|
|
AudioStream *_stream;
|
2010-01-16 21:36:08 +00:00
|
|
|
DisposeAfterUse::Flag _disposeAfterUse;
|
|
|
|
StreamHolder(AudioStream *stream, DisposeAfterUse::Flag disposeAfterUse)
|
2010-01-25 01:27:14 +00:00
|
|
|
: _stream(stream),
|
|
|
|
_disposeAfterUse(disposeAfterUse) {}
|
2010-01-08 22:04:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The sampling rate of this audio stream.
|
|
|
|
*/
|
|
|
|
const int _rate;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Whether this audio stream is mono (=false) or stereo (=true).
|
|
|
|
*/
|
|
|
|
const int _stereo;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This flag is set by the finish() method only. See there for more details.
|
|
|
|
*/
|
|
|
|
bool _finished;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A mutex to avoid access problems (causing e.g. corruption of
|
|
|
|
* the linked list) in thread aware environments.
|
|
|
|
*/
|
|
|
|
Common::Mutex _mutex;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The queue of audio streams.
|
|
|
|
*/
|
|
|
|
Common::Queue<StreamHolder> _queue;
|
|
|
|
|
|
|
|
public:
|
2010-01-08 22:06:04 +00:00
|
|
|
QueuingAudioStreamImpl(int rate, bool stereo)
|
2010-01-25 01:27:14 +00:00
|
|
|
: _rate(rate), _stereo(stereo), _finished(false) {}
|
2010-01-08 22:06:04 +00:00
|
|
|
~QueuingAudioStreamImpl();
|
2010-01-08 22:04:30 +00:00
|
|
|
|
|
|
|
// Implement the AudioStream API
|
|
|
|
virtual int readBuffer(int16 *buffer, const int numSamples);
|
|
|
|
virtual bool isStereo() const { return _stereo; }
|
|
|
|
virtual int getRate() const { return _rate; }
|
|
|
|
virtual bool endOfData() const {
|
|
|
|
//Common::StackLock lock(_mutex);
|
|
|
|
return _queue.empty();
|
|
|
|
}
|
2010-01-15 23:16:43 +00:00
|
|
|
virtual bool endOfStream() const { return _finished && _queue.empty(); }
|
2010-01-08 22:04:30 +00:00
|
|
|
|
2010-01-08 22:06:04 +00:00
|
|
|
// Implement the QueuingAudioStream API
|
2010-01-16 21:36:08 +00:00
|
|
|
virtual void queueAudioStream(AudioStream *stream, DisposeAfterUse::Flag disposeAfterUse);
|
2010-01-08 22:04:30 +00:00
|
|
|
virtual void finish() { _finished = true; }
|
|
|
|
|
|
|
|
uint32 numQueuedStreams() const {
|
|
|
|
//Common::StackLock lock(_mutex);
|
|
|
|
return _queue.size();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2010-01-08 22:06:04 +00:00
|
|
|
QueuingAudioStreamImpl::~QueuingAudioStreamImpl() {
|
2010-01-08 22:04:30 +00:00
|
|
|
while (!_queue.empty()) {
|
|
|
|
StreamHolder tmp = _queue.pop();
|
2010-01-16 21:36:08 +00:00
|
|
|
if (tmp._disposeAfterUse == DisposeAfterUse::YES)
|
2010-01-08 22:04:30 +00:00
|
|
|
delete tmp._stream;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-16 21:36:08 +00:00
|
|
|
void QueuingAudioStreamImpl::queueAudioStream(AudioStream *stream, DisposeAfterUse::Flag disposeAfterUse) {
|
2010-02-23 22:54:23 +00:00
|
|
|
assert(!_finished);
|
2010-01-08 22:04:30 +00:00
|
|
|
if ((stream->getRate() != getRate()) || (stream->isStereo() != isStereo()))
|
2010-01-08 22:06:04 +00:00
|
|
|
error("QueuingAudioStreamImpl::queueAudioStream: stream has mismatched parameters");
|
2010-01-08 22:04:30 +00:00
|
|
|
|
|
|
|
Common::StackLock lock(_mutex);
|
|
|
|
_queue.push(StreamHolder(stream, disposeAfterUse));
|
|
|
|
}
|
|
|
|
|
2010-01-08 22:06:04 +00:00
|
|
|
int QueuingAudioStreamImpl::readBuffer(int16 *buffer, const int numSamples) {
|
2010-01-08 22:04:30 +00:00
|
|
|
Common::StackLock lock(_mutex);
|
|
|
|
int samplesDecoded = 0;
|
|
|
|
|
|
|
|
while (samplesDecoded < numSamples && !_queue.empty()) {
|
|
|
|
AudioStream *stream = _queue.front()._stream;
|
|
|
|
samplesDecoded += stream->readBuffer(buffer + samplesDecoded, numSamples - samplesDecoded);
|
|
|
|
|
2010-02-09 22:03:59 +00:00
|
|
|
if (stream->endOfData()) {
|
2010-01-08 22:04:30 +00:00
|
|
|
StreamHolder tmp = _queue.pop();
|
2010-01-16 21:36:08 +00:00
|
|
|
if (tmp._disposeAfterUse == DisposeAfterUse::YES)
|
2010-01-08 22:04:30 +00:00
|
|
|
delete stream;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return samplesDecoded;
|
|
|
|
}
|
|
|
|
|
2010-01-08 22:06:04 +00:00
|
|
|
QueuingAudioStream *makeQueuingAudioStream(int rate, bool stereo) {
|
|
|
|
return new QueuingAudioStreamImpl(rate, stereo);
|
2010-01-08 22:04:30 +00:00
|
|
|
}
|
|
|
|
|
2010-01-27 08:08:33 +00:00
|
|
|
Timestamp convertTimeToStreamPos(const Timestamp &where, int rate, bool isStereo) {
|
|
|
|
Timestamp result(where.convertToFramerate(rate * (isStereo ? 2 : 1)));
|
|
|
|
|
|
|
|
// When the Stream is a stereo stream, we have to assure
|
|
|
|
// that the sample position is an even number.
|
|
|
|
if (isStereo && (result.totalNumberOfFrames() & 1))
|
2010-02-09 21:52:27 +00:00
|
|
|
result = result.addFrames(-1); // We cut off one sample here.
|
|
|
|
|
|
|
|
// Since Timestamp allows sub-frame-precision it might lead to odd behaviors
|
|
|
|
// when we would just return result.
|
|
|
|
//
|
|
|
|
// An example is when converting the timestamp 500ms to a 11025 Hz based
|
|
|
|
// stream. It would have an internal frame counter of 5512.5. Now when
|
2010-07-21 18:17:51 +00:00
|
|
|
// doing calculations at frame precision, this might lead to unexpected
|
2010-02-09 21:52:27 +00:00
|
|
|
// results: The frame difference between a timestamp 1000ms and the above
|
|
|
|
// mentioned timestamp (both with 11025 as framerate) would be 5512,
|
|
|
|
// instead of 5513, which is what a frame-precision based code would expect.
|
|
|
|
//
|
|
|
|
// By creating a new Timestamp with the given parameters, we create a
|
|
|
|
// Timestamp with frame-precision, which just drops a sub-frame-precision
|
|
|
|
// information (i.e. rounds down).
|
|
|
|
return Timestamp(result.secs(), result.numberOfFrames(), result.framerate());
|
2010-01-27 08:08:33 +00:00
|
|
|
}
|
|
|
|
|
2006-04-29 22:33:31 +00:00
|
|
|
} // End of namespace Audio
|