Merge commit '1647da89dd8ac09a55c111589f7a30d7e6b87d90'

* commit '1647da89dd8ac09a55c111589f7a30d7e6b87d90':
  lavr: make sure that the mix function is reset even if no mixing will be done
  lavr: print out the mix matrix in ff_audio_mix_set_matrix()
  ws-snd1: decode directly to the user-provided AVFrame
  wmavoice: decode directly to the user-provided AVFrame

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-02-13 12:54:03 +01:00
commit 91043de825
3 changed files with 72 additions and 76 deletions

View File

@ -134,7 +134,6 @@ typedef struct {
* @name Global values specified in the stream header / extradata or used all over.
* @{
*/
AVFrame frame;
GetBitContext gb; ///< packet bitreader. During decoder init,
///< it contains the extradata from the
///< demuxer. During decoding, it contains
@ -443,9 +442,6 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
ctx->channel_layout = AV_CH_LAYOUT_MONO;
ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avcodec_get_frame_defaults(&s->frame);
ctx->coded_frame = &s->frame;
return 0;
}
@ -1733,7 +1729,8 @@ static int check_bits_for_superframe(GetBitContext *orig_gb,
* @return 0 on success, <0 on error or 1 if there was not enough data to
* fully parse the superframe
*/
static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
static int synth_superframe(AVCodecContext *ctx, AVFrame *frame,
int *got_frame_ptr)
{
WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb, s_gb;
@ -1801,13 +1798,13 @@ static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
}
/* get output buffer */
s->frame.nb_samples = 480;
if ((res = ff_get_buffer(ctx, &s->frame)) < 0) {
frame->nb_samples = 480;
if ((res = ff_get_buffer(ctx, frame)) < 0) {
av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
s->frame.nb_samples = n_samples;
samples = (float *)s->frame.data[0];
frame->nb_samples = n_samples;
samples = (float *)frame->data[0];
/* Parse frames, optionally preceded by per-frame (independent) LSPs. */
for (n = 0; n < 3; n++) {
@ -1964,11 +1961,10 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
flush_put_bits(&s->pb);
s->sframe_cache_size += s->spillover_nbits;
if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 &&
if ((res = synth_superframe(ctx, data, got_frame_ptr)) == 0 &&
*got_frame_ptr) {
cnt += s->spillover_nbits;
s->skip_bits_next = cnt & 7;
*(AVFrame *)data = s->frame;
return cnt >> 3;
} else
skip_bits_long (gb, s->spillover_nbits - cnt +
@ -1983,12 +1979,11 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
s->sframe_cache_size = 0;
s->skip_bits_next = 0;
pos = get_bits_left(gb);
if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) {
if ((res = synth_superframe(ctx, data, got_frame_ptr)) < 0) {
return res;
} else if (*got_frame_ptr) {
int cnt = get_bits_count(gb);
s->skip_bits_next = cnt & 7;
*(AVFrame *)data = s->frame;
return cnt >> 3;
} else if ((s->sframe_cache_size = pos) > 0) {
/* rewind bit reader to start of last (incomplete) superframe... */

View File

@ -41,28 +41,19 @@ static const int8_t ws_adpcm_4bit[] = {
0, 1, 2, 3, 4, 5, 6, 8
};
typedef struct WSSndContext {
AVFrame frame;
} WSSndContext;
static av_cold int ws_snd_decode_init(AVCodecContext *avctx)
{
WSSndContext *s = avctx->priv_data;
avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0;
}
static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
WSSndContext *s = avctx->priv_data;
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@ -89,18 +80,17 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
}
/* get output buffer */
s->frame.nb_samples = out_size;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
frame->nb_samples = out_size;
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = s->frame.data[0];
samples = frame->data[0];
samples_end = samples + out_size;
if (in_size == out_size) {
memcpy(samples, buf, out_size);
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size;
}
@ -176,9 +166,8 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
}
}
s->frame.nb_samples = samples - s->frame.data[0];
frame->nb_samples = samples - frame->data[0];
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size;
}
@ -187,7 +176,6 @@ AVCodec ff_ws_snd1_decoder = {
.name = "ws_snd1",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_WESTWOOD_SND1,
.priv_data_size = sizeof(WSSndContext),
.init = ws_snd_decode_init,
.decode = ws_snd_decode_frame,
.capabilities = CODEC_CAP_DR1,

View File

@ -370,9 +370,6 @@ AudioMix *ff_audio_mix_alloc(AVAudioResampleContext *avr)
goto error;
av_freep(&avr->mix_matrix);
} else {
int i, j;
char in_layout_name[128];
char out_layout_name[128];
double *matrix_dbl = av_mallocz(avr->out_channels * avr->in_channels *
sizeof(*matrix_dbl));
if (!matrix_dbl)
@ -399,27 +396,6 @@ AudioMix *ff_audio_mix_alloc(AVAudioResampleContext *avr)
goto error;
}
av_get_channel_layout_string(in_layout_name, sizeof(in_layout_name),
avr->in_channels, avr->in_channel_layout);
av_get_channel_layout_string(out_layout_name, sizeof(out_layout_name),
avr->out_channels, avr->out_channel_layout);
av_log(avr, AV_LOG_DEBUG, "audio_mix: %s to %s\n",
in_layout_name, out_layout_name);
av_log(avr, AV_LOG_DEBUG, "matrix size: %d x %d\n",
am->in_matrix_channels, am->out_matrix_channels);
for (i = 0; i < avr->out_channels; i++) {
for (j = 0; j < avr->in_channels; j++) {
if (am->output_zero[i])
av_log(avr, AV_LOG_DEBUG, " (ZERO)");
else if (am->input_skip[j] || am->output_skip[i])
av_log(avr, AV_LOG_DEBUG, " (SKIP)");
else
av_log(avr, AV_LOG_DEBUG, " %0.3f ",
matrix_dbl[i * avr->in_channels + j]);
}
av_log(avr, AV_LOG_DEBUG, "\n");
}
av_free(matrix_dbl);
}
@ -551,26 +527,13 @@ int ff_audio_mix_get_matrix(AudioMix *am, double *matrix, int stride)
return 0;
}
int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
static void reduce_matrix(AudioMix *am, const double *matrix, int stride)
{
int i, o, i0, o0;
if ( am->in_channels <= 0 || am->in_channels > AVRESAMPLE_MAX_CHANNELS ||
am->out_channels <= 0 || am->out_channels > AVRESAMPLE_MAX_CHANNELS) {
av_log(am->avr, AV_LOG_ERROR, "Invalid channel counts\n");
return AVERROR(EINVAL);
}
if (am->matrix) {
av_free(am->matrix[0]);
am->matrix = NULL;
}
int i, o;
memset(am->output_zero, 0, sizeof(am->output_zero));
memset(am->input_skip, 0, sizeof(am->input_skip));
memset(am->output_skip, 0, sizeof(am->output_zero));
am->in_matrix_channels = am->in_channels;
am->out_matrix_channels = am->out_channels;
memset(am->output_skip, 0, sizeof(am->output_skip));
/* exclude output channels if they can be zeroed instead of mixed */
for (o = 0; o < am->out_channels; o++) {
@ -600,7 +563,7 @@ int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
}
if (am->out_matrix_channels == 0) {
am->in_matrix_channels = 0;
return 0;
return;
}
/* skip input channels that contribute fully only to the corresponding
@ -637,7 +600,7 @@ int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
}
if (am->in_matrix_channels == 0) {
am->out_matrix_channels = 0;
return 0;
return;
}
/* skip output channels that only get full contribution from the
@ -659,8 +622,31 @@ int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
}
if (am->out_matrix_channels == 0) {
am->in_matrix_channels = 0;
return 0;
return;
}
}
int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
{
int i, o, i0, o0, ret;
char in_layout_name[128];
char out_layout_name[128];
if ( am->in_channels <= 0 || am->in_channels > AVRESAMPLE_MAX_CHANNELS ||
am->out_channels <= 0 || am->out_channels > AVRESAMPLE_MAX_CHANNELS) {
av_log(am->avr, AV_LOG_ERROR, "Invalid channel counts\n");
return AVERROR(EINVAL);
}
if (am->matrix) {
av_free(am->matrix[0]);
am->matrix = NULL;
}
am->in_matrix_channels = am->in_channels;
am->out_matrix_channels = am->out_channels;
reduce_matrix(am, matrix, stride);
#define CONVERT_MATRIX(type, expr) \
am->matrix_## type[0] = av_mallocz(am->out_matrix_channels * \
@ -686,6 +672,7 @@ int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
} \
am->matrix = (void **)am->matrix_## type;
if (am->in_matrix_channels && am->out_matrix_channels) {
switch (am->coeff_type) {
case AV_MIX_COEFF_TYPE_Q8:
CONVERT_MATRIX(q8, av_clip_int16(lrint(256.0 * v)))
@ -700,6 +687,32 @@ int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride)
av_log(am->avr, AV_LOG_ERROR, "Invalid mix coeff type\n");
return AVERROR(EINVAL);
}
}
return mix_function_init(am);
ret = mix_function_init(am);
if (ret < 0)
return ret;
av_get_channel_layout_string(in_layout_name, sizeof(in_layout_name),
am->in_channels, am->in_layout);
av_get_channel_layout_string(out_layout_name, sizeof(out_layout_name),
am->out_channels, am->out_layout);
av_log(am->avr, AV_LOG_DEBUG, "audio_mix: %s to %s\n",
in_layout_name, out_layout_name);
av_log(am->avr, AV_LOG_DEBUG, "matrix size: %d x %d\n",
am->in_matrix_channels, am->out_matrix_channels);
for (o = 0; o < am->out_channels; o++) {
for (i = 0; i < am->in_channels; i++) {
if (am->output_zero[o])
av_log(am->avr, AV_LOG_DEBUG, " (ZERO)");
else if (am->input_skip[i] || am->output_skip[o])
av_log(am->avr, AV_LOG_DEBUG, " (SKIP)");
else
av_log(am->avr, AV_LOG_DEBUG, " %0.3f ",
matrix[o * am->in_channels + i]);
}
av_log(am->avr, AV_LOG_DEBUG, "\n");
}
return 0;
}