Merge remote-tracking branch 'qatar/master'

* qatar/master:
  adpcmenc: cosmetics: pretty-printing
  ac3dec: cosmetics: pretty-printing
  yuv4mpeg: cosmetics: pretty-printing
  shorten: remove dead initialization
  roqvideodec: set AVFrame reference before reget_buffer.
  bmp: fix some 1bit samples.
  latmdec: add fate test for audio config change
  oma: PCM support
  oma: better format detection with small probe buffer
  oma: clearify ambiguous if condition
  wavpack: Properly clip samples during lossy decode
  Code clean-up for crc.c, lfg.c, log.c, random_see.d, rational.c and tree.c.
  Cleaned pixdesc.c file in libavutil
  zmbv.c: coding style clean-up.
  xan.c: coding style clean-up.
  mpegvideo.c: code cleanup - first 500 lines.

Conflicts:
	Changelog
	libavcodec/adpcmenc.c
	libavcodec/bmp.c
	libavcodec/zmbv.c
	libavutil/log.c
	libavutil/pixdesc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-12-05 00:11:57 +01:00
commit 707138593a
19 changed files with 1779 additions and 1554 deletions

View File

@ -129,6 +129,7 @@ easier to use. The changes are:
- Playstation Portable PMP format demuxer - Playstation Portable PMP format demuxer
- Microsoft Windows ICO demuxer - Microsoft Windows ICO demuxer
- life source - life source
- PCM format support in OMA demuxer
version 0.8: version 0.8:

View File

@ -44,7 +44,6 @@
*/ */
static uint8_t ungroup_3_in_7_bits_tab[128][3]; static uint8_t ungroup_3_in_7_bits_tab[128][3];
/** tables for ungrouping mantissas */ /** tables for ungrouping mantissas */
static int b1_mantissas[32][3]; static int b1_mantissas[32][3];
static int b2_mantissas[128][3]; static int b2_mantissas[128][3];
@ -124,7 +123,7 @@ static av_cold void ac3_tables_init(void)
/* generate table for ungrouping 3 values in 7 bits /* generate table for ungrouping 3 values in 7 bits
reference: Section 7.1.3 Exponent Decoding */ reference: Section 7.1.3 Exponent Decoding */
for(i=0; i<128; i++) { for (i = 0; i < 128; i++) {
ungroup_3_in_7_bits_tab[i][0] = i / 25; ungroup_3_in_7_bits_tab[i][0] = i / 25;
ungroup_3_in_7_bits_tab[i][1] = (i % 25) / 5; ungroup_3_in_7_bits_tab[i][1] = (i % 25) / 5;
ungroup_3_in_7_bits_tab[i][2] = (i % 25) % 5; ungroup_3_in_7_bits_tab[i][2] = (i % 25) % 5;
@ -132,13 +131,13 @@ static av_cold void ac3_tables_init(void)
/* generate grouped mantissa tables /* generate grouped mantissa tables
reference: Section 7.3.5 Ungrouping of Mantissas */ reference: Section 7.3.5 Ungrouping of Mantissas */
for(i=0; i<32; i++) { for (i = 0; i < 32; i++) {
/* bap=1 mantissas */ /* bap=1 mantissas */
b1_mantissas[i][0] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][0], 3); b1_mantissas[i][0] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][0], 3);
b1_mantissas[i][1] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][1], 3); b1_mantissas[i][1] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][1], 3);
b1_mantissas[i][2] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][2], 3); b1_mantissas[i][2] = symmetric_dequant(ff_ac3_ungroup_3_in_5_bits_tab[i][2], 3);
} }
for(i=0; i<128; i++) { for (i = 0; i < 128; i++) {
/* bap=2 mantissas */ /* bap=2 mantissas */
b2_mantissas[i][0] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][0], 5); b2_mantissas[i][0] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][0], 5);
b2_mantissas[i][1] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][1], 5); b2_mantissas[i][1] = symmetric_dequant(ungroup_3_in_7_bits_tab[i][1], 5);
@ -150,24 +149,23 @@ static av_cold void ac3_tables_init(void)
} }
/* generate ungrouped mantissa tables /* generate ungrouped mantissa tables
reference: Tables 7.21 and 7.23 */ reference: Tables 7.21 and 7.23 */
for(i=0; i<7; i++) { for (i = 0; i < 7; i++) {
/* bap=3 mantissas */ /* bap=3 mantissas */
b3_mantissas[i] = symmetric_dequant(i, 7); b3_mantissas[i] = symmetric_dequant(i, 7);
} }
for(i=0; i<15; i++) { for (i = 0; i < 15; i++) {
/* bap=5 mantissas */ /* bap=5 mantissas */
b5_mantissas[i] = symmetric_dequant(i, 15); b5_mantissas[i] = symmetric_dequant(i, 15);
} }
/* generate dynamic range table /* generate dynamic range table
reference: Section 7.7.1 Dynamic Range Control */ reference: Section 7.7.1 Dynamic Range Control */
for(i=0; i<256; i++) { for (i = 0; i < 256; i++) {
int v = (i >> 5) - ((i >> 7) << 3) - 5; int v = (i >> 5) - ((i >> 7) << 3) - 5;
dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0x1F) | 0x20); dynamic_range_tab[i] = powf(2.0f, v) * ((i & 0x1F) | 0x20);
} }
} }
/** /**
* AVCodec initialization * AVCodec initialization
*/ */
@ -250,7 +248,7 @@ static int ac3_parse_header(AC3DecodeContext *s)
i = get_bits(gbc, 6); i = get_bits(gbc, 6);
do { do {
skip_bits(gbc, 8); skip_bits(gbc, 8);
} while(i--); } while (i--);
} }
return 0; return 0;
@ -265,7 +263,7 @@ static int parse_frame_header(AC3DecodeContext *s)
int err; int err;
err = avpriv_ac3_parse_header(&s->gbc, &hdr); err = avpriv_ac3_parse_header(&s->gbc, &hdr);
if(err) if (err)
return err; return err;
/* get decoding parameters from header info */ /* get decoding parameters from header info */
@ -287,9 +285,9 @@ static int parse_frame_header(AC3DecodeContext *s)
s->frame_type = hdr.frame_type; s->frame_type = hdr.frame_type;
s->substreamid = hdr.substreamid; s->substreamid = hdr.substreamid;
if(s->lfe_on) { if (s->lfe_on) {
s->start_freq[s->lfe_ch] = 0; s->start_freq[s->lfe_ch] = 0;
s->end_freq[s->lfe_ch] = 7; s->end_freq[s->lfe_ch] = 7;
s->num_exp_groups[s->lfe_ch] = 2; s->num_exp_groups[s->lfe_ch] = 2;
s->channel_in_cpl[s->lfe_ch] = 0; s->channel_in_cpl[s->lfe_ch] = 0;
} }
@ -326,38 +324,39 @@ static void set_downmix_coeffs(AC3DecodeContext *s)
float smix = gain_levels[surround_levels[s->surround_mix_level]]; float smix = gain_levels[surround_levels[s->surround_mix_level]];
float norm0, norm1; float norm0, norm1;
for(i=0; i<s->fbw_channels; i++) { for (i = 0; i < s->fbw_channels; i++) {
s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]]; s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]];
s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]]; s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]];
} }
if(s->channel_mode > 1 && s->channel_mode & 1) { if (s->channel_mode > 1 && s->channel_mode & 1) {
s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix; s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix;
} }
if(s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) { if (s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) {
int nf = s->channel_mode - 2; int nf = s->channel_mode - 2;
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB;
} }
if(s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) { if (s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) {
int nf = s->channel_mode - 4; int nf = s->channel_mode - 4;
s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix;
} }
/* renormalize */ /* renormalize */
norm0 = norm1 = 0.0; norm0 = norm1 = 0.0;
for(i=0; i<s->fbw_channels; i++) { for (i = 0; i < s->fbw_channels; i++) {
norm0 += s->downmix_coeffs[i][0]; norm0 += s->downmix_coeffs[i][0];
norm1 += s->downmix_coeffs[i][1]; norm1 += s->downmix_coeffs[i][1];
} }
norm0 = 1.0f / norm0; norm0 = 1.0f / norm0;
norm1 = 1.0f / norm1; norm1 = 1.0f / norm1;
for(i=0; i<s->fbw_channels; i++) { for (i = 0; i < s->fbw_channels; i++) {
s->downmix_coeffs[i][0] *= norm0; s->downmix_coeffs[i][0] *= norm0;
s->downmix_coeffs[i][1] *= norm1; s->downmix_coeffs[i][1] *= norm1;
} }
if(s->output_mode == AC3_CHMODE_MONO) { if (s->output_mode == AC3_CHMODE_MONO) {
for(i=0; i<s->fbw_channels; i++) for (i = 0; i < s->fbw_channels; i++)
s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] + s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB; s->downmix_coeffs[i][0] = (s->downmix_coeffs[i][0] +
s->downmix_coeffs[i][1]) * LEVEL_MINUS_3DB;
} }
} }
@ -374,7 +373,7 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps,
/* unpack groups */ /* unpack groups */
group_size = exp_strategy + (exp_strategy == EXP_D45); group_size = exp_strategy + (exp_strategy == EXP_D45);
for(grp=0,i=0; grp<ngrps; grp++) { for (grp = 0, i = 0; grp < ngrps; grp++) {
expacc = get_bits(gbc, 7); expacc = get_bits(gbc, 7);
dexp[i++] = ungroup_3_in_7_bits_tab[expacc][0]; dexp[i++] = ungroup_3_in_7_bits_tab[expacc][0];
dexp[i++] = ungroup_3_in_7_bits_tab[expacc][1]; dexp[i++] = ungroup_3_in_7_bits_tab[expacc][1];
@ -383,15 +382,15 @@ static int decode_exponents(GetBitContext *gbc, int exp_strategy, int ngrps,
/* convert to absolute exps and expand groups */ /* convert to absolute exps and expand groups */
prevexp = absexp; prevexp = absexp;
for(i=0,j=0; i<ngrps*3; i++) { for (i = 0, j = 0; i < ngrps * 3; i++) {
prevexp += dexp[i] - 2; prevexp += dexp[i] - 2;
if (prevexp > 24U) if (prevexp > 24U)
return -1; return -1;
switch (group_size) { switch (group_size) {
case 4: dexps[j++] = prevexp; case 4: dexps[j++] = prevexp;
dexps[j++] = prevexp; dexps[j++] = prevexp;
case 2: dexps[j++] = prevexp; case 2: dexps[j++] = prevexp;
case 1: dexps[j++] = prevexp; case 1: dexps[j++] = prevexp;
} }
} }
return 0; return 0;
@ -414,7 +413,8 @@ static void calc_transform_coeffs_cpl(AC3DecodeContext *s)
if (s->channel_in_cpl[ch]) { if (s->channel_in_cpl[ch]) {
int cpl_coord = s->cpl_coords[ch][band] << 5; int cpl_coord = s->cpl_coords[ch][band] << 5;
for (bin = band_start; bin < band_end; bin++) { for (bin = band_start; bin < band_end; bin++) {
s->fixed_coeffs[ch][bin] = MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord); s->fixed_coeffs[ch][bin] =
MULH(s->fixed_coeffs[CPL_CH][bin] << 4, cpl_coord);
} }
if (ch == 2 && s->phase_flags[band]) { if (ch == 2 && s->phase_flags[band]) {
for (bin = band_start; bin < band_end; bin++) for (bin = band_start; bin < band_end; bin++)
@ -445,73 +445,70 @@ typedef struct {
static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m) static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m)
{ {
int start_freq = s->start_freq[ch_index]; int start_freq = s->start_freq[ch_index];
int end_freq = s->end_freq[ch_index]; int end_freq = s->end_freq[ch_index];
uint8_t *baps = s->bap[ch_index]; uint8_t *baps = s->bap[ch_index];
int8_t *exps = s->dexps[ch_index]; int8_t *exps = s->dexps[ch_index];
int *coeffs = s->fixed_coeffs[ch_index]; int *coeffs = s->fixed_coeffs[ch_index];
int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index]; int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index];
GetBitContext *gbc = &s->gbc; GetBitContext *gbc = &s->gbc;
int freq; int freq;
for(freq = start_freq; freq < end_freq; freq++){ for (freq = start_freq; freq < end_freq; freq++) {
int bap = baps[freq]; int bap = baps[freq];
int mantissa; int mantissa;
switch(bap){ switch (bap) {
case 0: case 0:
if (dither) if (dither)
mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000; mantissa = (av_lfg_get(&s->dith_state) & 0x7FFFFF) - 0x400000;
else else
mantissa = 0; mantissa = 0;
break; break;
case 1: case 1:
if(m->b1){ if (m->b1) {
m->b1--; m->b1--;
mantissa = m->b1_mant[m->b1]; mantissa = m->b1_mant[m->b1];
} } else {
else{ int bits = get_bits(gbc, 5);
int bits = get_bits(gbc, 5); mantissa = b1_mantissas[bits][0];
mantissa = b1_mantissas[bits][0]; m->b1_mant[1] = b1_mantissas[bits][1];
m->b1_mant[1] = b1_mantissas[bits][1]; m->b1_mant[0] = b1_mantissas[bits][2];
m->b1_mant[0] = b1_mantissas[bits][2]; m->b1 = 2;
m->b1 = 2; }
} break;
break; case 2:
case 2: if (m->b2) {
if(m->b2){ m->b2--;
m->b2--; mantissa = m->b2_mant[m->b2];
mantissa = m->b2_mant[m->b2]; } else {
} int bits = get_bits(gbc, 7);
else{ mantissa = b2_mantissas[bits][0];
int bits = get_bits(gbc, 7); m->b2_mant[1] = b2_mantissas[bits][1];
mantissa = b2_mantissas[bits][0]; m->b2_mant[0] = b2_mantissas[bits][2];
m->b2_mant[1] = b2_mantissas[bits][1]; m->b2 = 2;
m->b2_mant[0] = b2_mantissas[bits][2]; }
m->b2 = 2; break;
} case 3:
break; mantissa = b3_mantissas[get_bits(gbc, 3)];
case 3: break;
mantissa = b3_mantissas[get_bits(gbc, 3)]; case 4:
break; if (m->b4) {
case 4: m->b4 = 0;
if(m->b4){ mantissa = m->b4_mant;
m->b4 = 0; } else {
mantissa = m->b4_mant; int bits = get_bits(gbc, 7);
} mantissa = b4_mantissas[bits][0];
else{ m->b4_mant = b4_mantissas[bits][1];
int bits = get_bits(gbc, 7); m->b4 = 1;
mantissa = b4_mantissas[bits][0]; }
m->b4_mant = b4_mantissas[bits][1]; break;
m->b4 = 1; case 5:
} mantissa = b5_mantissas[get_bits(gbc, 4)];
break; break;
case 5: default: /* 6 to 15 */
mantissa = b5_mantissas[get_bits(gbc, 4)]; /* Shift mantissa and sign-extend it. */
break; mantissa = get_sbits(gbc, quantization_tab[bap]);
default: /* 6 to 15 */ mantissa <<= 24 - quantization_tab[bap];
/* Shift mantissa and sign-extend it. */ break;
mantissa = get_sbits(gbc, quantization_tab[bap]);
mantissa <<= 24 - quantization_tab[bap];
break;
} }
coeffs[freq] = mantissa >> exps[freq]; coeffs[freq] = mantissa >> exps[freq];
} }
@ -525,10 +522,10 @@ static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, ma
static void remove_dithering(AC3DecodeContext *s) { static void remove_dithering(AC3DecodeContext *s) {
int ch, i; int ch, i;
for(ch=1; ch<=s->fbw_channels; ch++) { for (ch = 1; ch <= s->fbw_channels; ch++) {
if(!s->dither_flag[ch] && s->channel_in_cpl[ch]) { if (!s->dither_flag[ch] && s->channel_in_cpl[ch]) {
for(i = s->start_freq[CPL_CH]; i<s->end_freq[CPL_CH]; i++) { for (i = s->start_freq[CPL_CH]; i < s->end_freq[CPL_CH]; i++) {
if(!s->bap[CPL_CH][i]) if (!s->bap[CPL_CH][i])
s->fixed_coeffs[ch][i] = 0; s->fixed_coeffs[ch][i] = 0;
} }
} }
@ -536,7 +533,7 @@ static void remove_dithering(AC3DecodeContext *s) {
} }
static void decode_transform_coeffs_ch(AC3DecodeContext *s, int blk, int ch, static void decode_transform_coeffs_ch(AC3DecodeContext *s, int blk, int ch,
mant_groups *m) mant_groups *m)
{ {
if (!s->channel_uses_aht[ch]) { if (!s->channel_uses_aht[ch]) {
ac3_decode_transform_coeffs_ch(s, ch, m); ac3_decode_transform_coeffs_ch(s, ch, m);
@ -580,7 +577,7 @@ static void decode_transform_coeffs(AC3DecodeContext *s, int blk)
} }
do do
s->fixed_coeffs[ch][end] = 0; s->fixed_coeffs[ch][end] = 0;
while(++end < 256); while (++end < 256);
} }
/* zero the dithered coefficients for appropriate channels */ /* zero the dithered coefficients for appropriate channels */
@ -598,10 +595,10 @@ static void do_rematrixing(AC3DecodeContext *s)
end = FFMIN(s->end_freq[1], s->end_freq[2]); end = FFMIN(s->end_freq[1], s->end_freq[2]);
for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) { for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) {
if(s->rematrixing_flags[bnd]) { if (s->rematrixing_flags[bnd]) {
bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd+1]); bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd + 1]);
for(i=ff_ac3_rematrix_band_tab[bnd]; i<bndend; i++) { for (i = ff_ac3_rematrix_band_tab[bnd]; i < bndend; i++) {
int tmp0 = s->fixed_coeffs[1][i]; int tmp0 = s->fixed_coeffs[1][i];
s->fixed_coeffs[1][i] += s->fixed_coeffs[2][i]; s->fixed_coeffs[1][i] += s->fixed_coeffs[2][i];
s->fixed_coeffs[2][i] = tmp0 - s->fixed_coeffs[2][i]; s->fixed_coeffs[2][i] = tmp0 - s->fixed_coeffs[2][i];
@ -619,21 +616,23 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
{ {
int ch; int ch;
for (ch=1; ch<=channels; ch++) { for (ch = 1; ch <= channels; ch++) {
if (s->block_switch[ch]) { if (s->block_switch[ch]) {
int i; int i;
float *x = s->tmp_output+128; float *x = s->tmp_output + 128;
for(i=0; i<128; i++) for (i = 0; i < 128; i++)
x[i] = s->transform_coeffs[ch][2*i]; x[i] = s->transform_coeffs[ch][2 * i];
s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x); s->imdct_256.imdct_half(&s->imdct_256, s->tmp_output, x);
s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1],
for(i=0; i<128; i++) s->tmp_output, s->window, 128);
x[i] = s->transform_coeffs[ch][2*i+1]; for (i = 0; i < 128; i++)
s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch-1], x); x[i] = s->transform_coeffs[ch][2 * i + 1];
s->imdct_256.imdct_half(&s->imdct_256, s->delay[ch - 1], x);
} else { } else {
s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]); s->imdct_512.imdct_half(&s->imdct_512, s->tmp_output, s->transform_coeffs[ch]);
s->dsp.vector_fmul_window(s->output[ch-1], s->delay[ch-1], s->tmp_output, s->window, 128); s->dsp.vector_fmul_window(s->output[ch - 1], s->delay[ch - 1],
memcpy(s->delay[ch-1], s->tmp_output+128, 128*sizeof(float)); s->tmp_output, s->window, 128);
memcpy(s->delay[ch - 1], s->tmp_output + 128, 128 * sizeof(float));
} }
} }
} }
@ -641,24 +640,25 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
/** /**
* Downmix the output to mono or stereo. * Downmix the output to mono or stereo.
*/ */
void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2],
int out_ch, int in_ch, int len)
{ {
int i, j; int i, j;
float v0, v1; float v0, v1;
if(out_ch == 2) { if (out_ch == 2) {
for(i=0; i<len; i++) { for (i = 0; i < len; i++) {
v0 = v1 = 0.0f; v0 = v1 = 0.0f;
for(j=0; j<in_ch; j++) { for (j = 0; j < in_ch; j++) {
v0 += samples[j][i] * matrix[j][0]; v0 += samples[j][i] * matrix[j][0];
v1 += samples[j][i] * matrix[j][1]; v1 += samples[j][i] * matrix[j][1];
} }
samples[0][i] = v0; samples[0][i] = v0;
samples[1][i] = v1; samples[1][i] = v1;
} }
} else if(out_ch == 1) { } else if (out_ch == 1) {
for(i=0; i<len; i++) { for (i = 0; i < len; i++) {
v0 = 0.0f; v0 = 0.0f;
for(j=0; j<in_ch; j++) for (j = 0; j < in_ch; j++)
v0 += samples[j][i] * matrix[j][0]; v0 += samples[j][i] * matrix[j][0];
samples[0][i] = v0; samples[0][i] = v0;
} }
@ -671,25 +671,25 @@ void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2], int out_ch, int
static void ac3_upmix_delay(AC3DecodeContext *s) static void ac3_upmix_delay(AC3DecodeContext *s)
{ {
int channel_data_size = sizeof(s->delay[0]); int channel_data_size = sizeof(s->delay[0]);
switch(s->channel_mode) { switch (s->channel_mode) {
case AC3_CHMODE_DUALMONO: case AC3_CHMODE_DUALMONO:
case AC3_CHMODE_STEREO: case AC3_CHMODE_STEREO:
/* upmix mono to stereo */ /* upmix mono to stereo */
memcpy(s->delay[1], s->delay[0], channel_data_size); memcpy(s->delay[1], s->delay[0], channel_data_size);
break; break;
case AC3_CHMODE_2F2R: case AC3_CHMODE_2F2R:
memset(s->delay[3], 0, channel_data_size); memset(s->delay[3], 0, channel_data_size);
case AC3_CHMODE_2F1R: case AC3_CHMODE_2F1R:
memset(s->delay[2], 0, channel_data_size); memset(s->delay[2], 0, channel_data_size);
break; break;
case AC3_CHMODE_3F2R: case AC3_CHMODE_3F2R:
memset(s->delay[4], 0, channel_data_size); memset(s->delay[4], 0, channel_data_size);
case AC3_CHMODE_3F1R: case AC3_CHMODE_3F1R:
memset(s->delay[3], 0, channel_data_size); memset(s->delay[3], 0, channel_data_size);
case AC3_CHMODE_3F: case AC3_CHMODE_3F:
memcpy(s->delay[2], s->delay[1], channel_data_size); memcpy(s->delay[2], s->delay[1], channel_data_size);
memset(s->delay[1], 0, channel_data_size); memset(s->delay[1], 0, channel_data_size);
break; break;
} }
} }
@ -742,7 +742,7 @@ static void decode_band_structure(GetBitContext *gbc, int blk, int eac3,
bnd_sz[0] = ecpl ? 6 : 12; bnd_sz[0] = ecpl ? 6 : 12;
for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) { for (bnd = 0, subbnd = 1; subbnd < n_subbands; subbnd++) {
int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12; int subbnd_size = (ecpl && subbnd < 4) ? 6 : 12;
if (band_struct[subbnd-1]) { if (band_struct[subbnd - 1]) {
n_bands--; n_bands--;
bnd_sz[bnd] += subbnd_size; bnd_sz[bnd] += subbnd_size;
} else { } else {
@ -779,7 +779,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (s->block_switch_syntax) { if (s->block_switch_syntax) {
for (ch = 1; ch <= fbw_channels; ch++) { for (ch = 1; ch <= fbw_channels; ch++) {
s->block_switch[ch] = get_bits1(gbc); s->block_switch[ch] = get_bits1(gbc);
if(ch > 1 && s->block_switch[ch] != s->block_switch[1]) if (ch > 1 && s->block_switch[ch] != s->block_switch[1])
different_transforms = 1; different_transforms = 1;
} }
} }
@ -794,13 +794,13 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* dynamic range */ /* dynamic range */
i = !(s->channel_mode); i = !(s->channel_mode);
do { do {
if(get_bits1(gbc)) { if (get_bits1(gbc)) {
s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)]-1.0) * s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)] - 1.0) *
s->drc_scale)+1.0; s->drc_scale) + 1.0;
} else if(blk == 0) { } else if (blk == 0) {
s->dynamic_range[i] = 1.0f; s->dynamic_range[i] = 1.0f;
} }
} while(i--); } while (i--);
/* spectral extension strategy */ /* spectral extension strategy */
if (s->eac3 && (!blk || get_bits1(gbc))) { if (s->eac3 && (!blk || get_bits1(gbc))) {
@ -881,7 +881,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
bandsize = s->spx_band_sizes[bnd]; bandsize = s->spx_band_sizes[bnd];
nratio = ((float)((bin + (bandsize >> 1))) / s->spx_dst_end_freq) - spx_blend; nratio = ((float)((bin + (bandsize >> 1))) / s->spx_dst_end_freq) - spx_blend;
nratio = av_clipf(nratio, 0.0f, 1.0f); nratio = av_clipf(nratio, 0.0f, 1.0f);
nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3) to give unity variance nblend = sqrtf(3.0f * nratio); // noise is scaled by sqrt(3)
// to give unity variance
sblend = sqrtf(1.0f - nratio); sblend = sqrtf(1.0f - nratio);
bin += bandsize; bin += bandsize;
@ -891,7 +892,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (spx_coord_exp == 15) spx_coord_mant <<= 1; if (spx_coord_exp == 15) spx_coord_mant <<= 1;
else spx_coord_mant += 4; else spx_coord_mant += 4;
spx_coord_mant <<= (25 - spx_coord_exp - master_spx_coord); spx_coord_mant <<= (25 - spx_coord_exp - master_spx_coord);
spx_coord = spx_coord_mant * (1.0f/(1<<23)); spx_coord = spx_coord_mant * (1.0f / (1 << 23));
/* multiply noise and signal blending factors by spx coordinate */ /* multiply noise and signal blending factors by spx coordinate */
s->spx_noise_blend [ch][bnd] = nblend * spx_coord; s->spx_noise_blend [ch][bnd] = nblend * spx_coord;
@ -964,8 +965,9 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->phase_flags_in_use = 0; s->phase_flags_in_use = 0;
} }
} else if (!s->eac3) { } else if (!s->eac3) {
if(!blk) { if (!blk) {
av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must be present in block 0\n"); av_log(s->avctx, AV_LOG_ERROR, "new coupling strategy must "
"be present in block 0\n");
return -1; return -1;
} else { } else {
s->cpl_in_use[blk] = s->cpl_in_use[blk-1]; s->cpl_in_use[blk] = s->cpl_in_use[blk-1];
@ -994,7 +996,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord); s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord);
} }
} else if (!blk) { } else if (!blk) {
av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must be present in block 0\n"); av_log(s->avctx, AV_LOG_ERROR, "new coupling coordinates must "
"be present in block 0\n");
return -1; return -1;
} }
} else { } else {
@ -1019,10 +1022,11 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
} else if (s->spx_in_use && s->spx_src_start_freq <= 61) { } else if (s->spx_in_use && s->spx_src_start_freq <= 61) {
s->num_rematrixing_bands--; s->num_rematrixing_bands--;
} }
for(bnd=0; bnd<s->num_rematrixing_bands; bnd++) for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++)
s->rematrixing_flags[bnd] = get_bits1(gbc); s->rematrixing_flags[bnd] = get_bits1(gbc);
} else if (!blk) { } else if (!blk) {
av_log(s->avctx, AV_LOG_WARNING, "Warning: new rematrixing strategy not present in block 0\n"); av_log(s->avctx, AV_LOG_WARNING, "Warning: "
"new rematrixing strategy not present in block 0\n");
s->num_rematrixing_bands = 0; s->num_rematrixing_bands = 0;
} }
} }
@ -1031,7 +1035,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
for (ch = !cpl_in_use; ch <= s->channels; ch++) { for (ch = !cpl_in_use; ch <= s->channels; ch++) {
if (!s->eac3) if (!s->eac3)
s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch)); s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch));
if(s->exp_strategy[blk][ch] != EXP_REUSE) if (s->exp_strategy[blk][ch] != EXP_REUSE)
bit_alloc_stages[ch] = 3; bit_alloc_stages[ch] = 3;
} }
@ -1054,8 +1058,8 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->end_freq[ch] = bandwidth_code * 3 + 73; s->end_freq[ch] = bandwidth_code * 3 + 73;
} }
group_size = 3 << (s->exp_strategy[blk][ch] - 1); group_size = 3 << (s->exp_strategy[blk][ch] - 1);
s->num_exp_groups[ch] = (s->end_freq[ch]+group_size-4) / group_size; s->num_exp_groups[ch] = (s->end_freq[ch] + group_size-4) / group_size;
if(blk > 0 && s->end_freq[ch] != prev) if (blk > 0 && s->end_freq[ch] != prev)
memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS); memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
} }
} }
@ -1074,7 +1078,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n"); av_log(s->avctx, AV_LOG_ERROR, "exponent out-of-range\n");
return -1; return -1;
} }
if(ch != CPL_CH && ch != s->lfe_ch) if (ch != CPL_CH && ch != s->lfe_ch)
skip_bits(gbc, 2); /* skip gainrng */ skip_bits(gbc, 2); /* skip gainrng */
} }
} }
@ -1087,17 +1091,18 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)]; s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)];
s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)]; s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)];
s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)]; s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)];
for(ch=!cpl_in_use; ch<=s->channels; ch++) for (ch = !cpl_in_use; ch <= s->channels; ch++)
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
} else if (!blk) { } else if (!blk) {
av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must be present in block 0\n"); av_log(s->avctx, AV_LOG_ERROR, "new bit allocation info must "
"be present in block 0\n");
return -1; return -1;
} }
} }
/* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */ /* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */
if(!s->eac3 || !blk){ if (!s->eac3 || !blk) {
if(s->snr_offset_strategy && get_bits1(gbc)) { if (s->snr_offset_strategy && get_bits1(gbc)) {
int snr = 0; int snr = 0;
int csnr; int csnr;
csnr = (get_bits(gbc, 6) - 15) << 4; csnr = (get_bits(gbc, 6) - 15) << 4;
@ -1106,7 +1111,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
if (ch == i || s->snr_offset_strategy == 2) if (ch == i || s->snr_offset_strategy == 2)
snr = (csnr + get_bits(gbc, 4)) << 2; snr = (csnr + get_bits(gbc, 4)) << 2;
/* run at least last bit allocation stage if snr offset changes */ /* run at least last bit allocation stage if snr offset changes */
if(blk && s->snr_offset[ch] != snr) { if (blk && s->snr_offset[ch] != snr) {
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1); bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1);
} }
s->snr_offset[ch] = snr; s->snr_offset[ch] = snr;
@ -1116,7 +1121,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int prev = s->fast_gain[ch]; int prev = s->fast_gain[ch];
s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
/* run last 2 bit allocation stages if fast gain changes */ /* run last 2 bit allocation stages if fast gain changes */
if(blk && prev != s->fast_gain[ch]) if (blk && prev != s->fast_gain[ch])
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
} }
} }
@ -1132,7 +1137,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int prev = s->fast_gain[ch]; int prev = s->fast_gain[ch];
s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)];
/* run last 2 bit allocation stages if fast gain changes */ /* run last 2 bit allocation stages if fast gain changes */
if(blk && prev != s->fast_gain[ch]) if (blk && prev != s->fast_gain[ch])
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
} }
} else if (s->eac3 && !blk) { } else if (s->eac3 && !blk) {
@ -1152,14 +1157,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
int sl = get_bits(gbc, 3); int sl = get_bits(gbc, 3);
/* run last 2 bit allocation stages for coupling channel if /* run last 2 bit allocation stages for coupling channel if
coupling leak changes */ coupling leak changes */
if(blk && (fl != s->bit_alloc_params.cpl_fast_leak || if (blk && (fl != s->bit_alloc_params.cpl_fast_leak ||
sl != s->bit_alloc_params.cpl_slow_leak)) { sl != s->bit_alloc_params.cpl_slow_leak)) {
bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2); bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2);
} }
s->bit_alloc_params.cpl_fast_leak = fl; s->bit_alloc_params.cpl_fast_leak = fl;
s->bit_alloc_params.cpl_slow_leak = sl; s->bit_alloc_params.cpl_slow_leak = sl;
} else if (!s->eac3 && !blk) { } else if (!s->eac3 && !blk) {
av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must be present in block 0\n"); av_log(s->avctx, AV_LOG_ERROR, "new coupling leak info must "
"be present in block 0\n");
return -1; return -1;
} }
s->first_cpl_leak = 0; s->first_cpl_leak = 0;
@ -1183,40 +1189,40 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
for (seg = 0; seg < s->dba_nsegs[ch]; seg++) { for (seg = 0; seg < s->dba_nsegs[ch]; seg++) {
s->dba_offsets[ch][seg] = get_bits(gbc, 5); s->dba_offsets[ch][seg] = get_bits(gbc, 5);
s->dba_lengths[ch][seg] = get_bits(gbc, 4); s->dba_lengths[ch][seg] = get_bits(gbc, 4);
s->dba_values[ch][seg] = get_bits(gbc, 3); s->dba_values[ch][seg] = get_bits(gbc, 3);
} }
/* run last 2 bit allocation stages if new dba values */ /* run last 2 bit allocation stages if new dba values */
bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2);
} }
} }
} else if(blk == 0) { } else if (blk == 0) {
for(ch=0; ch<=s->channels; ch++) { for (ch = 0; ch <= s->channels; ch++) {
s->dba_mode[ch] = DBA_NONE; s->dba_mode[ch] = DBA_NONE;
} }
} }
/* Bit allocation */ /* Bit allocation */
for(ch=!cpl_in_use; ch<=s->channels; ch++) { for (ch = !cpl_in_use; ch <= s->channels; ch++) {
if(bit_alloc_stages[ch] > 2) { if (bit_alloc_stages[ch] > 2) {
/* Exponent mapping into PSD and PSD integration */ /* Exponent mapping into PSD and PSD integration */
ff_ac3_bit_alloc_calc_psd(s->dexps[ch], ff_ac3_bit_alloc_calc_psd(s->dexps[ch],
s->start_freq[ch], s->end_freq[ch], s->start_freq[ch], s->end_freq[ch],
s->psd[ch], s->band_psd[ch]); s->psd[ch], s->band_psd[ch]);
} }
if(bit_alloc_stages[ch] > 1) { if (bit_alloc_stages[ch] > 1) {
/* Compute excitation function, Compute masking curve, and /* Compute excitation function, Compute masking curve, and
Apply delta bit allocation */ Apply delta bit allocation */
if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch], if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch],
s->start_freq[ch], s->end_freq[ch], s->start_freq[ch], s->end_freq[ch],
s->fast_gain[ch], (ch == s->lfe_ch), s->fast_gain[ch], (ch == s->lfe_ch),
s->dba_mode[ch], s->dba_nsegs[ch], s->dba_mode[ch], s->dba_nsegs[ch],
s->dba_offsets[ch], s->dba_lengths[ch], s->dba_offsets[ch], s->dba_lengths[ch],
s->dba_values[ch], s->mask[ch])) { s->dba_values[ch], s->mask[ch])) {
av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n"); av_log(s->avctx, AV_LOG_ERROR, "error in bit allocation\n");
return -1; return -1;
} }
} }
if(bit_alloc_stages[ch] > 0) { if (bit_alloc_stages[ch] > 0) {
/* Compute bit allocation */ /* Compute bit allocation */
const uint8_t *bap_tab = s->channel_uses_aht[ch] ? const uint8_t *bap_tab = s->channel_uses_aht[ch] ?
ff_eac3_hebap_tab : ff_ac3_bap_tab; ff_eac3_hebap_tab : ff_ac3_bap_tab;
@ -1231,7 +1237,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* unused dummy data */ /* unused dummy data */
if (s->skip_syntax && get_bits1(gbc)) { if (s->skip_syntax && get_bits1(gbc)) {
int skipl = get_bits(gbc, 9); int skipl = get_bits(gbc, 9);
while(skipl--) while (skipl--)
skip_bits(gbc, 8); skip_bits(gbc, 8);
} }
@ -1242,18 +1248,19 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/* TODO: generate enhanced coupling coordinates and uncouple */ /* TODO: generate enhanced coupling coordinates and uncouple */
/* recover coefficients if rematrixing is in use */ /* recover coefficients if rematrixing is in use */
if(s->channel_mode == AC3_CHMODE_STEREO) if (s->channel_mode == AC3_CHMODE_STEREO)
do_rematrixing(s); do_rematrixing(s);
/* apply scaling to coefficients (headroom, dynrng) */ /* apply scaling to coefficients (headroom, dynrng) */
for(ch=1; ch<=s->channels; ch++) { for (ch = 1; ch <= s->channels; ch++) {
float gain = s->mul_bias / 4194304.0f; float gain = s->mul_bias / 4194304.0f;
if(s->channel_mode == AC3_CHMODE_DUALMONO) { if (s->channel_mode == AC3_CHMODE_DUALMONO) {
gain *= s->dynamic_range[2-ch]; gain *= s->dynamic_range[2 - ch];
} else { } else {
gain *= s->dynamic_range[0]; gain *= s->dynamic_range[0];
} }
s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256); s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch],
s->fixed_coeffs[ch], gain, 256);
} }
/* apply spectral extension to high frequency bins */ /* apply spectral extension to high frequency bins */
@ -1267,27 +1274,30 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
downmix_output = s->channels != s->out_channels && downmix_output = s->channels != s->out_channels &&
!((s->output_mode & AC3_OUTPUT_LFEON) && !((s->output_mode & AC3_OUTPUT_LFEON) &&
s->fbw_channels == s->out_channels); s->fbw_channels == s->out_channels);
if(different_transforms) { if (different_transforms) {
/* the delay samples have already been downmixed, so we upmix the delay /* the delay samples have already been downmixed, so we upmix the delay
samples in order to reconstruct all channels before downmixing. */ samples in order to reconstruct all channels before downmixing. */
if(s->downmixed) { if (s->downmixed) {
s->downmixed = 0; s->downmixed = 0;
ac3_upmix_delay(s); ac3_upmix_delay(s);
} }
do_imdct(s, s->channels); do_imdct(s, s->channels);
if(downmix_output) { if (downmix_output) {
s->dsp.ac3_downmix(s->output, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); s->dsp.ac3_downmix(s->output, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
} }
} else { } else {
if(downmix_output) { if (downmix_output) {
s->dsp.ac3_downmix(s->transform_coeffs+1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); s->dsp.ac3_downmix(s->transform_coeffs + 1, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
} }
if(downmix_output && !s->downmixed) { if (downmix_output && !s->downmixed) {
s->downmixed = 1; s->downmixed = 1;
s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128); s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels,
s->fbw_channels, 128);
} }
do_imdct(s, s->out_channels); do_imdct(s, s->out_channels);
@ -1327,33 +1337,34 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
err = parse_frame_header(s); err = parse_frame_header(s);
if (err) { if (err) {
switch(err) { switch (err) {
case AAC_AC3_PARSE_ERROR_SYNC: case AAC_AC3_PARSE_ERROR_SYNC:
av_log(avctx, AV_LOG_ERROR, "frame sync error\n"); av_log(avctx, AV_LOG_ERROR, "frame sync error\n");
return -1; return -1;
case AAC_AC3_PARSE_ERROR_BSID: case AAC_AC3_PARSE_ERROR_BSID:
av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n"); av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n");
break; break;
case AAC_AC3_PARSE_ERROR_SAMPLE_RATE: case AAC_AC3_PARSE_ERROR_SAMPLE_RATE:
av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n"); av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
break; break;
case AAC_AC3_PARSE_ERROR_FRAME_SIZE: case AAC_AC3_PARSE_ERROR_FRAME_SIZE:
av_log(avctx, AV_LOG_ERROR, "invalid frame size\n"); av_log(avctx, AV_LOG_ERROR, "invalid frame size\n");
break; break;
case AAC_AC3_PARSE_ERROR_FRAME_TYPE: case AAC_AC3_PARSE_ERROR_FRAME_TYPE:
/* skip frame if CRC is ok. otherwise use error concealment. */ /* skip frame if CRC is ok. otherwise use error concealment. */
/* TODO: add support for substreams and dependent frames */ /* TODO: add support for substreams and dependent frames */
if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n"); av_log(avctx, AV_LOG_ERROR, "unsupported frame type : "
*got_frame_ptr = 0; "skipping frame\n");
return s->frame_size; *got_frame_ptr = 0;
} else { return s->frame_size;
av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); } else {
} av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
break; }
default: break;
av_log(avctx, AV_LOG_ERROR, "invalid header\n"); default:
break; av_log(avctx, AV_LOG_ERROR, "invalid header\n");
break;
} }
} else { } else {
/* check that reported frame size fits in input buffer */ /* check that reported frame size fits in input buffer */
@ -1362,7 +1373,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
err = AAC_AC3_PARSE_ERROR_FRAME_SIZE; err = AAC_AC3_PARSE_ERROR_FRAME_SIZE;
} else if (avctx->err_recognition & AV_EF_CRCCHECK) { } else if (avctx->err_recognition & AV_EF_CRCCHECK) {
/* check for crc mismatch */ /* check for crc mismatch */
if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) { if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2],
s->frame_size - 2)) {
av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n"); av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n");
err = AAC_AC3_PARSE_ERROR_CRC; err = AAC_AC3_PARSE_ERROR_CRC;
} }
@ -1372,12 +1384,12 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
/* if frame is ok, set audio parameters */ /* if frame is ok, set audio parameters */
if (!err) { if (!err) {
avctx->sample_rate = s->sample_rate; avctx->sample_rate = s->sample_rate;
avctx->bit_rate = s->bit_rate; avctx->bit_rate = s->bit_rate;
/* channel config */ /* channel config */
s->out_channels = s->channels; s->out_channels = s->channels;
s->output_mode = s->channel_mode; s->output_mode = s->channel_mode;
if(s->lfe_on) if (s->lfe_on)
s->output_mode |= AC3_OUTPUT_LFEON; s->output_mode |= AC3_OUTPUT_LFEON;
if (avctx->request_channels > 0 && avctx->request_channels <= 2 && if (avctx->request_channels > 0 && avctx->request_channels <= 2 &&
avctx->request_channels < s->channels) { avctx->request_channels < s->channels) {
@ -1385,7 +1397,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode]; s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode];
} }
avctx->channels = s->out_channels; avctx->channels = s->out_channels;
avctx->channel_layout = s->channel_layout; avctx->channel_layout = s->channel_layout;
s->loro_center_mix_level = gain_levels[ center_levels[s-> center_mix_level]]; s->loro_center_mix_level = gain_levels[ center_levels[s-> center_mix_level]];
@ -1393,13 +1405,13 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
s->ltrt_center_mix_level = LEVEL_MINUS_3DB; s->ltrt_center_mix_level = LEVEL_MINUS_3DB;
s->ltrt_surround_mix_level = LEVEL_MINUS_3DB; s->ltrt_surround_mix_level = LEVEL_MINUS_3DB;
/* set downmixing coefficients if needed */ /* set downmixing coefficients if needed */
if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && if (s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) &&
s->fbw_channels == s->out_channels)) { s->fbw_channels == s->out_channels)) {
set_downmix_coeffs(s); set_downmix_coeffs(s);
} }
} else if (!s->out_channels) { } else if (!s->out_channels) {
s->out_channels = avctx->channels; s->out_channels = avctx->channels;
if(s->out_channels < s->channels) if (s->out_channels < s->channels)
s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO;
} }
/* set audio service type based on bitstream mode for AC-3 */ /* set audio service type based on bitstream mode for AC-3 */
@ -1476,19 +1488,19 @@ static const AVClass ac3_decoder_class = {
}; };
AVCodec ff_ac3_decoder = { AVCodec ff_ac3_decoder = {
.name = "ac3", .name = "ac3",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_AC3, .id = CODEC_ID_AC3,
.priv_data_size = sizeof (AC3DecodeContext), .priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_S16,
}, AV_SAMPLE_FMT_NONE },
.priv_class = &ac3_decoder_class, .priv_class = &ac3_decoder_class,
}; };
#if CONFIG_EAC3_DECODER #if CONFIG_EAC3_DECODER
@ -1498,19 +1510,20 @@ static const AVClass eac3_decoder_class = {
.option = options, .option = options,
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
}; };
AVCodec ff_eac3_decoder = { AVCodec ff_eac3_decoder = {
.name = "eac3", .name = "eac3",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_EAC3, .id = CODEC_ID_EAC3,
.priv_data_size = sizeof (AC3DecodeContext), .priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_S16,
}, AV_SAMPLE_FMT_NONE },
.priv_class = &eac3_decoder_class, .priv_class = &eac3_decoder_class,
}; };
#endif #endif

View File

@ -66,37 +66,45 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
if (avctx->channels > 2) if (avctx->channels > 2)
return -1; /* only stereo or mono =) */ return -1; /* only stereo or mono =) */
if(avctx->trellis && (unsigned)avctx->trellis > 16U){ if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
return -1; return -1;
} }
if (avctx->trellis) { if (avctx->trellis) {
int frontier = 1 << avctx->trellis; int frontier = 1 << avctx->trellis;
int max_paths = frontier * FREEZE_INTERVAL; int max_paths = frontier * FREEZE_INTERVAL;
FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); FF_ALLOC_OR_GOTO(avctx, s->paths,
FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); max_paths * sizeof(*s->paths), error);
FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); FF_ALLOC_OR_GOTO(avctx, s->node_buf,
FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); 2 * frontier * sizeof(*s->node_buf), error);
FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
2 * frontier * sizeof(*s->nodep_buf), error);
FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
65536 * sizeof(*s->trellis_hash), error);
} }
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
switch(avctx->codec->id) { switch (avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_WAV: case CODEC_ID_ADPCM_IMA_WAV:
avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ /* each 16 bits sample gives one nibble
/* and we have 4 bytes per channel overhead */ and we have 4 bytes per channel overhead */
avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
(4 * avctx->channels) + 1;
/* seems frame_size isn't taken into account...
have to buffer the samples :-( */
avctx->block_align = BLKSIZE; avctx->block_align = BLKSIZE;
avctx->bits_per_coded_sample = 4; avctx->bits_per_coded_sample = 4;
/* seems frame_size isn't taken into account... have to buffer the samples :-( */
break; break;
case CODEC_ID_ADPCM_IMA_QT: case CODEC_ID_ADPCM_IMA_QT:
avctx->frame_size = 64; avctx->frame_size = 64;
avctx->block_align = 34 * avctx->channels; avctx->block_align = 34 * avctx->channels;
break; break;
case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_MS:
avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ /* each 16 bits sample gives one nibble
/* and we have 7 bytes per channel overhead */ and we have 7 bytes per channel overhead */
avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
avctx->block_align = BLKSIZE; avctx->block_align = BLKSIZE;
avctx->bits_per_coded_sample = 4; avctx->bits_per_coded_sample = 4;
avctx->extradata_size = 32; avctx->extradata_size = 32;
@ -111,14 +119,15 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
} }
break; break;
case CODEC_ID_ADPCM_YAMAHA: case CODEC_ID_ADPCM_YAMAHA:
avctx->frame_size = BLKSIZE * avctx->channels; avctx->frame_size = BLKSIZE * avctx->channels;
avctx->block_align = BLKSIZE; avctx->block_align = BLKSIZE;
break; break;
case CODEC_ID_ADPCM_SWF: case CODEC_ID_ADPCM_SWF:
if (avctx->sample_rate != 11025 && if (avctx->sample_rate != 11025 &&
avctx->sample_rate != 22050 && avctx->sample_rate != 22050 &&
avctx->sample_rate != 44100) { avctx->sample_rate != 44100) {
av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
"22050 or 44100\n");
goto error; goto error;
} }
avctx->frame_size = 512 * (avctx->sample_rate / 11025); avctx->frame_size = 512 * (avctx->sample_rate / 11025);
@ -127,7 +136,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
goto error; goto error;
} }
avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame = avcodec_alloc_frame();
avctx->coded_frame->key_frame= 1; avctx->coded_frame->key_frame= 1;
return 0; return 0;
@ -152,19 +161,23 @@ static av_cold int adpcm_encode_close(AVCodecContext *avctx)
} }
static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c,
short sample)
{ {
int delta = sample - c->prev_sample; int delta = sample - c->prev_sample;
int nibble = FFMIN(7, abs(delta)*4/ff_adpcm_step_table[c->step_index]) + (delta<0)*8; int nibble = FFMIN(7, abs(delta) * 4 /
c->prev_sample += ((ff_adpcm_step_table[c->step_index] * ff_adpcm_yamaha_difflookup[nibble]) / 8); ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
ff_adpcm_yamaha_difflookup[nibble]) / 8);
c->prev_sample = av_clip_int16(c->prev_sample); c->prev_sample = av_clip_int16(c->prev_sample);
c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
return nibble; return nibble;
} }
static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, short sample) static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
short sample)
{ {
int delta = sample - c->prev_sample; int delta = sample - c->prev_sample;
int diff, step = ff_adpcm_step_table[c->step_index]; int diff, step = ff_adpcm_step_table[c->step_index];
int nibble = 8*(delta < 0); int nibble = 8*(delta < 0);
@ -173,17 +186,17 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
if (delta >= step) { if (delta >= step) {
nibble |= 4; nibble |= 4;
delta -= step; delta -= step;
} }
step >>= 1; step >>= 1;
if (delta >= step) { if (delta >= step) {
nibble |= 2; nibble |= 2;
delta -= step; delta -= step;
} }
step >>= 1; step >>= 1;
if (delta >= step) { if (delta >= step) {
nibble |= 1; nibble |= 1;
delta -= step; delta -= step;
} }
diff -= delta; diff -= delta;
@ -193,47 +206,53 @@ static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
c->prev_sample += diff; c->prev_sample += diff;
c->prev_sample = av_clip_int16(c->prev_sample); c->prev_sample = av_clip_int16(c->prev_sample);
c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88); c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
return nibble; return nibble;
} }
static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c,
short sample)
{ {
int predictor, nibble, bias; int predictor, nibble, bias;
predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; predictor = (((c->sample1) * (c->coeff1)) +
(( c->sample2) * (c->coeff2))) / 64;
nibble= sample - predictor; nibble = sample - predictor;
if(nibble>=0) bias= c->idelta/2; if (nibble >= 0)
else bias=-c->idelta/2; bias = c->idelta / 2;
else
bias = -c->idelta / 2;
nibble= (nibble + bias) / c->idelta; nibble = (nibble + bias) / c->idelta;
nibble= av_clip(nibble, -8, 7)&0x0F; nibble = av_clip(nibble, -8, 7) & 0x0F;
predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; predictor += (signed)((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
c->sample2 = c->sample1; c->sample2 = c->sample1;
c->sample1 = av_clip_int16(predictor); c->sample1 = av_clip_int16(predictor);
c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
if (c->idelta < 16) c->idelta = 16; if (c->idelta < 16)
c->idelta = 16;
return nibble; return nibble;
} }
static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
short sample)
{ {
int nibble, delta; int nibble, delta;
if(!c->step) { if (!c->step) {
c->predictor = 0; c->predictor = 0;
c->step = 127; c->step = 127;
} }
delta = sample - c->predictor; delta = sample - c->predictor;
nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8); c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
c->predictor = av_clip_int16(c->predictor); c->predictor = av_clip_int16(c->predictor);
@ -249,57 +268,61 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
//FIXME 6% faster if frontier is a compile-time constant //FIXME 6% faster if frontier is a compile-time constant
ADPCMEncodeContext *s = avctx->priv_data; ADPCMEncodeContext *s = avctx->priv_data;
const int frontier = 1 << avctx->trellis; const int frontier = 1 << avctx->trellis;
const int stride = avctx->channels; const int stride = avctx->channels;
const int version = avctx->codec->id; const int version = avctx->codec->id;
TrellisPath *paths = s->paths, *p; TrellisPath *paths = s->paths, *p;
TrellisNode *node_buf = s->node_buf; TrellisNode *node_buf = s->node_buf;
TrellisNode **nodep_buf = s->nodep_buf; TrellisNode **nodep_buf = s->nodep_buf;
TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
TrellisNode **nodes_next = nodep_buf + frontier; TrellisNode **nodes_next = nodep_buf + frontier;
int pathn = 0, froze = -1, i, j, k, generation = 0; int pathn = 0, froze = -1, i, j, k, generation = 0;
uint8_t *hash = s->trellis_hash; uint8_t *hash = s->trellis_hash;
memset(hash, 0xff, 65536 * sizeof(*hash)); memset(hash, 0xff, 65536 * sizeof(*hash));
memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
nodes[0] = node_buf + frontier; nodes[0] = node_buf + frontier;
nodes[0]->ssd = 0; nodes[0]->ssd = 0;
nodes[0]->path = 0; nodes[0]->path = 0;
nodes[0]->step = c->step_index; nodes[0]->step = c->step_index;
nodes[0]->sample1 = c->sample1; nodes[0]->sample1 = c->sample1;
nodes[0]->sample2 = c->sample2; nodes[0]->sample2 = c->sample2;
if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) if (version == CODEC_ID_ADPCM_IMA_WAV ||
version == CODEC_ID_ADPCM_IMA_QT ||
version == CODEC_ID_ADPCM_SWF)
nodes[0]->sample1 = c->prev_sample; nodes[0]->sample1 = c->prev_sample;
if(version == CODEC_ID_ADPCM_MS) if (version == CODEC_ID_ADPCM_MS)
nodes[0]->step = c->idelta; nodes[0]->step = c->idelta;
if(version == CODEC_ID_ADPCM_YAMAHA) { if (version == CODEC_ID_ADPCM_YAMAHA) {
if(c->step == 0) { if (c->step == 0) {
nodes[0]->step = 127; nodes[0]->step = 127;
nodes[0]->sample1 = 0; nodes[0]->sample1 = 0;
} else { } else {
nodes[0]->step = c->step; nodes[0]->step = c->step;
nodes[0]->sample1 = c->predictor; nodes[0]->sample1 = c->predictor;
} }
} }
for(i=0; i<n; i++) { for (i = 0; i < n; i++) {
TrellisNode *t = node_buf + frontier*(i&1); TrellisNode *t = node_buf + frontier*(i&1);
TrellisNode **u; TrellisNode **u;
int sample = samples[i*stride]; int sample = samples[i * stride];
int heap_pos = 0; int heap_pos = 0;
memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
for(j=0; j<frontier && nodes[j]; j++) { for (j = 0; j < frontier && nodes[j]; j++) {
// higher j have higher ssd already, so they're likely to yield a suboptimal next sample too // higher j have higher ssd already, so they're likely
const int range = (j < frontier/2) ? 1 : 0; // to yield a suboptimal next sample too
const int step = nodes[j]->step; const int range = (j < frontier / 2) ? 1 : 0;
const int step = nodes[j]->step;
int nidx; int nidx;
if(version == CODEC_ID_ADPCM_MS) { if (version == CODEC_ID_ADPCM_MS) {
const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; const int predictor = ((nodes[j]->sample1 * c->coeff1) +
const int div = (sample - predictor) / step; (nodes[j]->sample2 * c->coeff2)) / 64;
const int div = (sample - predictor) / step;
const int nmin = av_clip(div-range, -8, 6); const int nmin = av_clip(div-range, -8, 6);
const int nmax = av_clip(div+range, -7, 7); const int nmax = av_clip(div+range, -7, 7);
for(nidx=nmin; nidx<=nmax; nidx++) { for (nidx = nmin; nidx <= nmax; nidx++) {
const int nibble = nidx & 0xf; const int nibble = nidx & 0xf;
int dec_sample = predictor + nidx * step; int dec_sample = predictor + nidx * step;
#define STORE_NODE(NAME, STEP_INDEX)\ #define STORE_NODE(NAME, STEP_INDEX)\
int d;\ int d;\
uint32_t ssd;\ uint32_t ssd;\
@ -334,25 +357,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
} else {\ } else {\
/* Try to replace one of the leaf nodes with the new \ /* Try to replace one of the leaf nodes with the new \
* one, but try a different slot each time. */\ * one, but try a different slot each time. */\
pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ pos = (frontier >> 1) +\
(heap_pos & ((frontier >> 1) - 1));\
if (ssd > nodes_next[pos]->ssd)\ if (ssd > nodes_next[pos]->ssd)\
goto next_##NAME;\ goto next_##NAME;\
heap_pos++;\ heap_pos++;\
}\ }\
*h = generation;\ *h = generation;\
u = nodes_next[pos];\ u = nodes_next[pos];\
if(!u) {\ if (!u) {\
assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\ assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
u = t++;\ u = t++;\
nodes_next[pos] = u;\ nodes_next[pos] = u;\
u->path = pathn++;\ u->path = pathn++;\
}\ }\
u->ssd = ssd;\ u->ssd = ssd;\
u->step = STEP_INDEX;\ u->step = STEP_INDEX;\
u->sample2 = nodes[j]->sample1;\ u->sample2 = nodes[j]->sample1;\
u->sample1 = dec_sample;\ u->sample1 = dec_sample;\
paths[u->path].nibble = nibble;\ paths[u->path].nibble = nibble;\
paths[u->path].prev = nodes[j]->path;\ paths[u->path].prev = nodes[j]->path;\
/* Sift the newly inserted node up in the heap to \ /* Sift the newly inserted node up in the heap to \
* restore the heap property. */\ * restore the heap property. */\
while (pos > 0) {\ while (pos > 0) {\
@ -363,24 +387,34 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
pos = parent;\ pos = parent;\
}\ }\
next_##NAME:; next_##NAME:;
STORE_NODE(ms, FFMAX(16, (ff_adpcm_AdaptationTable[nibble] * step) >> 8)); STORE_NODE(ms, FFMAX(16,
(ff_adpcm_AdaptationTable[nibble] * step) >> 8));
} }
} else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { } else if (version == CODEC_ID_ADPCM_IMA_WAV ||
version == CODEC_ID_ADPCM_IMA_QT ||
version == CODEC_ID_ADPCM_SWF) {
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
const int predictor = nodes[j]->sample1;\ const int predictor = nodes[j]->sample1;\
const int div = (sample - predictor) * 4 / STEP_TABLE;\ const int div = (sample - predictor) * 4 / STEP_TABLE;\
int nmin = av_clip(div-range, -7, 6);\ int nmin = av_clip(div - range, -7, 6);\
int nmax = av_clip(div+range, -6, 7);\ int nmax = av_clip(div + range, -6, 7);\
if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ if (nmin <= 0)\
if(nmax<0) nmax--;\ nmin--; /* distinguish -0 from +0 */\
for(nidx=nmin; nidx<=nmax; nidx++) {\ if (nmax < 0)\
const int nibble = nidx<0 ? 7-nidx : nidx;\ nmax--;\
int dec_sample = predictor + (STEP_TABLE * ff_adpcm_yamaha_difflookup[nibble]) / 8;\ for (nidx = nmin; nidx <= nmax; nidx++) {\
const int nibble = nidx < 0 ? 7 - nidx : nidx;\
int dec_sample = predictor +\
(STEP_TABLE *\
ff_adpcm_yamaha_difflookup[nibble]) / 8;\
STORE_NODE(NAME, STEP_INDEX);\ STORE_NODE(NAME, STEP_INDEX);\
} }
LOOP_NODES(ima, ff_adpcm_step_table[step], av_clip(step + ff_adpcm_index_table[nibble], 0, 88)); LOOP_NODES(ima, ff_adpcm_step_table[step],
av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
} else { //CODEC_ID_ADPCM_YAMAHA } else { //CODEC_ID_ADPCM_YAMAHA
LOOP_NODES(yamaha, step, av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8, 127, 24567)); LOOP_NODES(yamaha, step,
av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
127, 24567));
#undef LOOP_NODES #undef LOOP_NODES
#undef STORE_NODE #undef STORE_NODE
} }
@ -397,16 +431,16 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
} }
// prevent overflow // prevent overflow
if(nodes[0]->ssd > (1<<28)) { if (nodes[0]->ssd > (1 << 28)) {
for(j=1; j<frontier && nodes[j]; j++) for (j = 1; j < frontier && nodes[j]; j++)
nodes[j]->ssd -= nodes[0]->ssd; nodes[j]->ssd -= nodes[0]->ssd;
nodes[0]->ssd = 0; nodes[0]->ssd = 0;
} }
// merge old paths to save memory // merge old paths to save memory
if(i == froze + FREEZE_INTERVAL) { if (i == froze + FREEZE_INTERVAL) {
p = &paths[nodes[0]->path]; p = &paths[nodes[0]->path];
for(k=i; k>froze; k--) { for (k = i; k > froze; k--) {
dst[k] = p->nibble; dst[k] = p->nibble;
p = &paths[p->prev]; p = &paths[p->prev];
} }
@ -415,26 +449,26 @@ static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
// other nodes might use paths that don't coincide with the frozen one. // other nodes might use paths that don't coincide with the frozen one.
// checking which nodes do so is too slow, so just kill them all. // checking which nodes do so is too slow, so just kill them all.
// this also slightly improves quality, but I don't know why. // this also slightly improves quality, but I don't know why.
memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
} }
} }
p = &paths[nodes[0]->path]; p = &paths[nodes[0]->path];
for(i=n-1; i>froze; i--) { for (i = n - 1; i > froze; i--) {
dst[i] = p->nibble; dst[i] = p->nibble;
p = &paths[p->prev]; p = &paths[p->prev];
} }
c->predictor = nodes[0]->sample1; c->predictor = nodes[0]->sample1;
c->sample1 = nodes[0]->sample1; c->sample1 = nodes[0]->sample1;
c->sample2 = nodes[0]->sample2; c->sample2 = nodes[0]->sample2;
c->step_index = nodes[0]->step; c->step_index = nodes[0]->step;
c->step = nodes[0]->step; c->step = nodes[0]->step;
c->idelta = nodes[0]->step; c->idelta = nodes[0]->step;
} }
static int adpcm_encode_frame(AVCodecContext *avctx, static int adpcm_encode_frame(AVCodecContext *avctx,
unsigned char *frame, int buf_size, void *data) unsigned char *frame, int buf_size, void *data)
{ {
int n, i, st; int n, i, st;
short *samples; short *samples;
@ -444,98 +478,96 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
dst = frame; dst = frame;
samples = (short *)data; samples = (short *)data;
st= avctx->channels == 2; st = avctx->channels == 2;
/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
switch(avctx->codec->id) { switch(avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_WAV: case CODEC_ID_ADPCM_IMA_WAV:
n = avctx->frame_size / 8; n = avctx->frame_size / 8;
c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ /* c->status[0].step_index = 0;
bytestream_put_le16(&dst, c->status[0].prev_sample); XXX: not sure how to init the state machine */
*dst++ = (unsigned char)c->status[0].step_index; bytestream_put_le16(&dst, c->status[0].prev_sample);
*dst++ = 0; /* unknown */ *dst++ = (unsigned char)c->status[0].step_index;
*dst++ = 0; /* unknown */
samples++;
if (avctx->channels == 2) {
c->status[1].prev_sample = (signed short)samples[0];
/* c->status[1].step_index = 0; */
bytestream_put_le16(&dst, c->status[1].prev_sample);
*dst++ = (unsigned char)c->status[1].step_index;
*dst++ = 0;
samples++; samples++;
if (avctx->channels == 2) { }
c->status[1].prev_sample = (signed short)samples[0];
/* c->status[1].step_index = 0; */
bytestream_put_le16(&dst, c->status[1].prev_sample);
*dst++ = (unsigned char)c->status[1].step_index;
*dst++ = 0;
samples++;
}
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ /* stereo: 4 bytes (8 samples) for left,
if(avctx->trellis > 0) { 4 bytes for right, 4 bytes left, ... */
FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); if (avctx->trellis > 0) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 8, error);
if(avctx->channels == 2) adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n * 8);
adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); if (avctx->channels == 2)
for(i=0; i<n; i++) { adpcm_compress_trellis(avctx, samples + 1, buf + n * 8,
*dst++ = buf[8*i+0] | (buf[8*i+1] << 4); &c->status[1], n * 8);
*dst++ = buf[8*i+2] | (buf[8*i+3] << 4); for (i = 0; i < n; i++) {
*dst++ = buf[8*i+4] | (buf[8*i+5] << 4); *dst++ = buf[8 * i + 0] | (buf[8 * i + 1] << 4);
*dst++ = buf[8*i+6] | (buf[8*i+7] << 4); *dst++ = buf[8 * i + 2] | (buf[8 * i + 3] << 4);
if (avctx->channels == 2) { *dst++ = buf[8 * i + 4] | (buf[8 * i + 5] << 4);
uint8_t *buf1 = buf + n*8; *dst++ = buf[8 * i + 6] | (buf[8 * i + 7] << 4);
*dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); if (avctx->channels == 2) {
*dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); uint8_t *buf1 = buf + n * 8;
*dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); *dst++ = buf1[8 * i + 0] | (buf1[8 * i + 1] << 4);
*dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); *dst++ = buf1[8 * i + 2] | (buf1[8 * i + 3] << 4);
} *dst++ = buf1[8 * i + 4] | (buf1[8 * i + 5] << 4);
*dst++ = buf1[8 * i + 6] | (buf1[8 * i + 7] << 4);
} }
av_free(buf); }
} else av_free(buf);
for (; n>0; n--) { } else {
*dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); for (; n > 0; n--) {
*dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]);
dst++; *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels ]) << 4;
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
*dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
dst++; *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
*dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
dst++; *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
*dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
dst++;
/* right channel */ /* right channel */
if (avctx->channels == 2) { if (avctx->channels == 2) {
*dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); *dst = adpcm_ima_compress_sample(&c->status[1], samples[1 ]);
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[3 ]) << 4;
dst++; *dst = adpcm_ima_compress_sample(&c->status[1], samples[5 ]);
*dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[7 ]) << 4;
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; *dst = adpcm_ima_compress_sample(&c->status[1], samples[9 ]);
dst++; *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
*dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
dst++;
*dst = adpcm_ima_compress_sample(&c->status[1], samples[13]);
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
dst++;
} }
samples += 8 * avctx->channels; samples += 8 * avctx->channels;
} }
}
break; break;
case CODEC_ID_ADPCM_IMA_QT: case CODEC_ID_ADPCM_IMA_QT:
{ {
int ch, i; int ch, i;
PutBitContext pb; PutBitContext pb;
init_put_bits(&pb, dst, buf_size*8); init_put_bits(&pb, dst, buf_size * 8);
for(ch=0; ch<avctx->channels; ch++){ for (ch = 0; ch < avctx->channels; ch++) {
put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
put_bits(&pb, 7, c->status[ch].step_index); put_bits(&pb, 7, c->status[ch].step_index);
if(avctx->trellis > 0) { if (avctx->trellis > 0) {
uint8_t buf[64]; uint8_t buf[64];
adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
for(i=0; i<64; i++) for (i = 0; i < 64; i++)
put_bits(&pb, 4, buf[i^1]); put_bits(&pb, 4, buf[i ^ 1]);
} else { } else {
for (i=0; i<64; i+=2){ for (i = 0; i < 64; i += 2) {
int t1, t2; int t1, t2;
t1 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); t1 = adpcm_ima_qt_compress_sample(&c->status[ch],
t2 = adpcm_ima_qt_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); samples[avctx->channels * (i + 0) + ch]);
t2 = adpcm_ima_qt_compress_sample(&c->status[ch],
samples[avctx->channels * (i + 1) + ch]);
put_bits(&pb, 4, t2); put_bits(&pb, 4, t2);
put_bits(&pb, 4, t1); put_bits(&pb, 4, t1);
} }
@ -543,119 +575,120 @@ static int adpcm_encode_frame(AVCodecContext *avctx,
} }
flush_put_bits(&pb); flush_put_bits(&pb);
dst += put_bits_count(&pb)>>3; dst += put_bits_count(&pb) >> 3;
break; break;
} }
case CODEC_ID_ADPCM_SWF: case CODEC_ID_ADPCM_SWF:
{ {
int i; int i;
PutBitContext pb; PutBitContext pb;
init_put_bits(&pb, dst, buf_size*8); init_put_bits(&pb, dst, buf_size * 8);
n = avctx->frame_size-1; n = avctx->frame_size - 1;
//Store AdpcmCodeSize // store AdpcmCodeSize
put_bits(&pb, 2, 2); //Set 4bits flash adpcm format put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
//Init the encoder state // init the encoder state
for(i=0; i<avctx->channels; i++){ for (i = 0; i < avctx->channels; i++) {
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits // clip step so it fits 6 bits
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
put_sbits(&pb, 16, samples[i]); put_sbits(&pb, 16, samples[i]);
put_bits(&pb, 6, c->status[i].step_index); put_bits(&pb, 6, c->status[i].step_index);
c->status[i].prev_sample = (signed short)samples[i]; c->status[i].prev_sample = (signed short)samples[i];
} }
if(avctx->trellis > 0) { if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); adpcm_compress_trellis(avctx, samples + 2, buf, &c->status[0], n);
if (avctx->channels == 2) if (avctx->channels == 2)
adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); adpcm_compress_trellis(avctx, samples + 3, buf + n,
for(i=0; i<n; i++) { &c->status[1], n);
for (i = 0; i < n; i++) {
put_bits(&pb, 4, buf[i]); put_bits(&pb, 4, buf[i]);
if (avctx->channels == 2) if (avctx->channels == 2)
put_bits(&pb, 4, buf[n+i]); put_bits(&pb, 4, buf[n + i]);
} }
av_free(buf); av_free(buf);
} else { } else {
for (i=1; i<avctx->frame_size; i++) { for (i = 1; i < avctx->frame_size; i++) {
put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
samples[avctx->channels * i]));
if (avctx->channels == 2) if (avctx->channels == 2)
put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
samples[2 * i + 1]));
} }
} }
flush_put_bits(&pb); flush_put_bits(&pb);
dst += put_bits_count(&pb)>>3; dst += put_bits_count(&pb) >> 3;
break; break;
} }
case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_MS:
for(i=0; i<avctx->channels; i++){ for (i = 0; i < avctx->channels; i++) {
int predictor=0; int predictor = 0;
*dst++ = predictor; *dst++ = predictor;
c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor]; c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor]; c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
} }
for(i=0; i<avctx->channels; i++){ for (i = 0; i < avctx->channels; i++) {
if (c->status[i].idelta < 16) if (c->status[i].idelta < 16)
c->status[i].idelta = 16; c->status[i].idelta = 16;
bytestream_put_le16(&dst, c->status[i].idelta); bytestream_put_le16(&dst, c->status[i].idelta);
} }
for(i=0; i<avctx->channels; i++){ for (i = 0; i < avctx->channels; i++)
c->status[i].sample2= *samples++; c->status[i].sample2= *samples++;
} for (i = 0; i < avctx->channels; i++) {
for(i=0; i<avctx->channels; i++){ c->status[i].sample1 = *samples++;
c->status[i].sample1= *samples++;
bytestream_put_le16(&dst, c->status[i].sample1); bytestream_put_le16(&dst, c->status[i].sample1);
} }
for(i=0; i<avctx->channels; i++) for (i = 0; i < avctx->channels; i++)
bytestream_put_le16(&dst, c->status[i].sample2); bytestream_put_le16(&dst, c->status[i].sample2);
if(avctx->trellis > 0) { if (avctx->trellis > 0) {
int n = avctx->block_align - 7*avctx->channels; int n = avctx->block_align - 7 * avctx->channels;
FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
if(avctx->channels == 1) { if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
for(i=0; i<n; i+=2) for (i = 0; i < n; i += 2)
*dst++ = (buf[i] << 4) | buf[i+1]; *dst++ = (buf[i] << 4) | buf[i + 1];
} else { } else {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
for(i=0; i<n; i++) for (i = 0; i < n; i++)
*dst++ = (buf[i] << 4) | buf[n+i]; *dst++ = (buf[i] << 4) | buf[n + i];
} }
av_free(buf); av_free(buf);
} else } else {
for(i=7*avctx->channels; i<avctx->block_align; i++) { for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
int nibble; int nibble;
nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
*dst++ = nibble; *dst++ = nibble;
}
} }
break; break;
case CODEC_ID_ADPCM_YAMAHA: case CODEC_ID_ADPCM_YAMAHA:
n = avctx->frame_size / 2; n = avctx->frame_size / 2;
if(avctx->trellis > 0) { if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
n *= 2; n *= 2;
if(avctx->channels == 1) { if (avctx->channels == 1) {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
for(i=0; i<n; i+=2) for (i = 0; i < n; i += 2)
*dst++ = buf[i] | (buf[i+1] << 4); *dst++ = buf[i] | (buf[i + 1] << 4);
} else { } else {
adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
for(i=0; i<n; i++) for (i = 0; i < n; i++)
*dst++ = buf[i] | (buf[n+i] << 4); *dst++ = buf[i] | (buf[n + i] << 4);
} }
av_free(buf); av_free(buf);
} else } else
for (n *= avctx->channels; n>0; n--) { for (n *= avctx->channels; n > 0; n--) {
int nibble; int nibble;
nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
*dst++ = nibble; *dst++ = nibble;
} }
break; break;
default: default:
@ -675,12 +708,13 @@ AVCodec ff_ ## name_ ## _encoder = { \
.init = adpcm_encode_init, \ .init = adpcm_encode_init, \
.encode = adpcm_encode_frame, \ .encode = adpcm_encode_frame, \
.close = adpcm_encode_close, \ .close = adpcm_encode_close, \
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
AV_SAMPLE_FMT_NONE}, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }
ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");

View File

@ -263,9 +263,9 @@ static int bmp_decode_frame(AVCodecContext *avctx,
}else{ }else{
switch(depth){ switch(depth){
case 1: case 1:
for(i = 0; i < avctx->height; i++){ for (i = 0; i < avctx->height; i++) {
int j; int j;
for(j = 0; j < n; j++){ for (j = 0; j < n; j++) {
ptr[j*8+0] = buf[j] >> 7; ptr[j*8+0] = buf[j] >> 7;
ptr[j*8+1] = (buf[j] >> 6) & 1; ptr[j*8+1] = (buf[j] >> 6) & 1;
ptr[j*8+2] = (buf[j] >> 5) & 1; ptr[j*8+2] = (buf[j] >> 5) & 1;

View File

@ -66,44 +66,61 @@ static void dct_unquantize_h263_inter_c(MpegEncContext *s,
//#define DEBUG //#define DEBUG
static const uint8_t ff_default_chroma_qscale_table[32]={ static const uint8_t ff_default_chroma_qscale_table[32] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
}; };
const uint8_t ff_mpeg1_dc_scale_table[128]={ const uint8_t ff_mpeg1_dc_scale_table[128] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
}; };
static const uint8_t mpeg2_dc_scale_table1[128]={ static const uint8_t mpeg2_dc_scale_table1[128] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
}; };
static const uint8_t mpeg2_dc_scale_table2[128]={ static const uint8_t mpeg2_dc_scale_table2[128] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
}; };
static const uint8_t mpeg2_dc_scale_table3[128]={ static const uint8_t mpeg2_dc_scale_table3[128] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
}; };
const uint8_t * const ff_mpeg2_dc_scale_table[4]={ const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
ff_mpeg1_dc_scale_table, ff_mpeg1_dc_scale_table,
mpeg2_dc_scale_table1, mpeg2_dc_scale_table1,
mpeg2_dc_scale_table2, mpeg2_dc_scale_table2,
@ -123,34 +140,37 @@ const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
PIX_FMT_NONE PIX_FMT_NONE
}; };
const uint8_t *avpriv_mpv_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){ const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
const uint8_t *end,
uint32_t * restrict state)
{
int i; int i;
assert(p<=end); assert(p <= end);
if(p>=end) if (p >= end)
return end; return end;
for(i=0; i<3; i++){ for (i = 0; i < 3; i++) {
uint32_t tmp= *state << 8; uint32_t tmp = *state << 8;
*state= tmp + *(p++); *state = tmp + *(p++);
if(tmp == 0x100 || p==end) if (tmp == 0x100 || p == end)
return p; return p;
} }
while(p<end){ while (p < end) {
if (p[-1] > 1 ) p+= 3; if (p[-1] > 1 ) p += 3;
else if(p[-2] ) p+= 2; else if (p[-2] ) p += 2;
else if(p[-3]|(p[-1]-1)) p++; else if (p[-3]|(p[-1]-1)) p++;
else{ else {
p++; p++;
break; break;
} }
} }
p= FFMIN(p, end)-4; p = FFMIN(p, end) - 4;
*state= AV_RB32(p); *state = AV_RB32(p);
return p+4; return p + 4;
} }
/* init common dct for both encoder and decoder */ /* init common dct for both encoder and decoder */
@ -163,11 +183,11 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
if(s->flags & CODEC_FLAG_BITEXACT) if (s->flags & CODEC_FLAG_BITEXACT)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
#if HAVE_MMX #if HAVE_MMX
MPV_common_init_mmx(s); MPV_common_init_mmx(s);
#elif ARCH_ALPHA #elif ARCH_ALPHA
MPV_common_init_axp(s); MPV_common_init_axp(s);
@ -184,12 +204,12 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
#endif #endif
/* load & permutate scantables /* load & permutate scantables
note: only wmv uses different ones * note: only wmv uses different ones
*/ */
if(s->alternate_scan){ if (s->alternate_scan) {
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
}else{ } else {
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
} }
@ -199,9 +219,10 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
return 0; return 0;
} }
void ff_copy_picture(Picture *dst, Picture *src){ void ff_copy_picture(Picture *dst, Picture *src)
{
*dst = *src; *dst = *src;
dst->f.type= FF_BUFFER_TYPE_COPY; dst->f.type = FF_BUFFER_TYPE_COPY;
} }
/** /**
@ -210,11 +231,12 @@ void ff_copy_picture(Picture *dst, Picture *src){
static void free_frame_buffer(MpegEncContext *s, Picture *pic) static void free_frame_buffer(MpegEncContext *s, Picture *pic)
{ {
/* Windows Media Image codecs allocate internal buffers with different /* Windows Media Image codecs allocate internal buffers with different
dimensions; ignore user defined callbacks for these */ * dimensions; ignore user defined callbacks for these
*/
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
ff_thread_release_buffer(s->avctx, (AVFrame*)pic); ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
else else
avcodec_default_release_buffer(s->avctx, (AVFrame*)pic); avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
av_freep(&pic->f.hwaccel_picture_private); av_freep(&pic->f.hwaccel_picture_private);
} }
@ -237,9 +259,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
} }
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE) if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic); r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
else else
r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic); r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) { if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
@ -248,14 +270,17 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
return -1; return -1;
} }
if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) { if (s->linesize && (s->linesize != pic->f.linesize[0] ||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); s->uvlinesize != pic->f.linesize[1])) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed (stride changed)\n");
free_frame_buffer(s, pic); free_frame_buffer(s, pic);
return -1; return -1;
} }
if (pic->f.linesize[1] != pic->f.linesize[2]) { if (pic->f.linesize[1] != pic->f.linesize[2]) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n"); av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed (uv stride mismatch)\n");
free_frame_buffer(s, pic); free_frame_buffer(s, pic);
return -1; return -1;
} }
@ -265,21 +290,25 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
/** /**
* allocates a Picture * allocates a Picture
* The pixels are allocated/set by calling get_buffer() if shared=0 * The pixels are allocated/set by calling get_buffer() if shared = 0
*/ */
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11 {
const int mb_array_size= s->mb_stride*s->mb_height; const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
const int b8_array_size= s->b8_stride*s->mb_height*2;
const int b4_array_size= s->b4_stride*s->mb_height*4;
int i;
int r= -1;
if(shared){ // the + 1 is needed so memset(,,stride*height) does not sig11
const int mb_array_size = s->mb_stride * s->mb_height;
const int b8_array_size = s->b8_stride * s->mb_height * 2;
const int b4_array_size = s->b4_stride * s->mb_height * 4;
int i;
int r = -1;
if (shared) {
assert(pic->f.data[0]); assert(pic->f.data[0]);
assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED); assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
pic->f.type = FF_BUFFER_TYPE_SHARED; pic->f.type = FF_BUFFER_TYPE_SHARED;
}else{ } else {
assert(!pic->f.data[0]); assert(!pic->f.data[0]);
if (alloc_frame_buffer(s, pic) < 0) if (alloc_frame_buffer(s, pic) < 0)
@ -291,49 +320,69 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
if (pic->f.qscale_table == NULL) { if (pic->f.qscale_table == NULL) {
if (s->encoding) { if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail) mb_array_size * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
mb_array_size * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
mb_array_size * sizeof(int8_t ), fail)
} }
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail) mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1; (big_mb_num + s->mb_stride) * sizeof(uint8_t),
pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1; fail)
if(s->out_format == FMT_H264){ FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
for(i=0; i<2; i++){ (big_mb_num + s->mb_stride) * sizeof(uint32_t),
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail) fail)
pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
if (s->out_format == FMT_H264) {
for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
2 * (b4_array_size + 4) * sizeof(int16_t),
fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4; pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
4 * mb_array_size * sizeof(uint8_t), fail)
} }
pic->f.motion_subsample_log2 = 2; pic->f.motion_subsample_log2 = 2;
}else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ } else if (s->out_format == FMT_H263 || s->encoding ||
for(i=0; i<2; i++){ (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail) for (i = 0; i < 2; i++) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
2 * (b8_array_size + 4) * sizeof(int16_t),
fail)
pic->f.motion_val[i] = pic->motion_val_base[i] + 4; pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
4 * mb_array_size * sizeof(uint8_t), fail)
} }
pic->f.motion_subsample_log2 = 3; pic->f.motion_subsample_log2 = 3;
} }
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
} }
pic->f.qstride = s->mb_stride; pic->f.qstride = s->mb_stride;
FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail) FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
1 * sizeof(AVPanScan), fail)
} }
/* It might be nicer if the application would keep track of these /* It might be nicer if the application would keep track of these
* but it would require an API change. */ * but it would require an API change. */
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); memmove(s->prev_pict_types + 1, s->prev_pict_types,
s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type; PREV_PICT_TYPES_BUFFER_SIZE-1);
if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B) s->prev_pict_types[0] = s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway. if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2
// and it is a bit tricky to skip them anyway.
pic->owner2 = s; pic->owner2 = s;
return 0; return 0;
fail: //for the FF_ALLOCZ_OR_GOTO macro fail: // for the FF_ALLOCZ_OR_GOTO macro
if(r>=0) if (r >= 0)
free_frame_buffer(s, pic); free_frame_buffer(s, pic);
return -1; return -1;
} }
@ -341,7 +390,8 @@ fail: //for the FF_ALLOCZ_OR_GOTO macro
/** /**
* deallocates a picture * deallocates a picture
*/ */
static void free_picture(MpegEncContext *s, Picture *pic){ static void free_picture(MpegEncContext *s, Picture *pic)
{
int i; int i;
if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) { if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
@ -357,13 +407,13 @@ static void free_picture(MpegEncContext *s, Picture *pic){
av_freep(&pic->f.dct_coeff); av_freep(&pic->f.dct_coeff);
av_freep(&pic->f.pan_scan); av_freep(&pic->f.pan_scan);
pic->f.mb_type = NULL; pic->f.mb_type = NULL;
for(i=0; i<2; i++){ for (i = 0; i < 2; i++) {
av_freep(&pic->motion_val_base[i]); av_freep(&pic->motion_val_base[i]);
av_freep(&pic->f.ref_index[i]); av_freep(&pic->f.ref_index[i]);
} }
if (pic->f.type == FF_BUFFER_TYPE_SHARED) { if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
for(i=0; i<4; i++){ for (i = 0; i < 4; i++) {
pic->f.base[i] = pic->f.base[i] =
pic->f.data[i] = NULL; pic->f.data[i] = NULL;
} }
@ -371,38 +421,47 @@ static void free_picture(MpegEncContext *s, Picture *pic){
} }
} }
static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
{
int y_size = s->b8_stride * (2 * s->mb_height + 1); int y_size = s->b8_stride * (2 * s->mb_height + 1);
int c_size = s->mb_stride * (s->mb_height + 1); int c_size = s->mb_stride * (s->mb_height + 1);
int yc_size = y_size + 2 * c_size; int yc_size = y_size + 2 * c_size;
int i; int i;
// edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264) // edge emu needs blocksize + filter length - 1
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance // (= 17x17 for halfpel / 21x21 for h264)
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
(s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
//FIXME should be linesize instead of s->width*2 but that is not known before get_buffer() // FIXME should be linesize instead of s->width * 2
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail) // but that is not known before get_buffer()
s->me.temp= s->me.scratchpad; FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
s->rd_scratchpad= s->me.scratchpad; (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
s->b_scratchpad= s->me.scratchpad; s->me.temp = s->me.scratchpad;
s->obmc_scratchpad= s->me.scratchpad + 16; s->rd_scratchpad = s->me.scratchpad;
s->b_scratchpad = s->me.scratchpad;
s->obmc_scratchpad = s->me.scratchpad + 16;
if (s->encoding) { if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail) ME_MAP_SIZE * sizeof(uint32_t), fail)
if(s->avctx->noise_reduction){ FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail) ME_MAP_SIZE * sizeof(uint32_t), fail)
if (s->avctx->noise_reduction) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
2 * 64 * sizeof(int), fail)
} }
} }
FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail) FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
s->block= s->blocks[0]; s->block = s->blocks[0];
for(i=0;i<12;i++){ for (i = 0; i < 12; i++) {
s->pblocks[i] = &s->block[i]; s->pblocks[i] = &s->block[i];
} }
if (s->out_format == FMT_H263) { if (s->out_format == FMT_H263) {
/* ac values */ /* ac values */
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
yc_size * sizeof(int16_t) * 16, fail);
s->ac_val[0] = s->ac_val_base + s->b8_stride + 1; s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1; s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
s->ac_val[2] = s->ac_val[1] + c_size; s->ac_val[2] = s->ac_val[1] + c_size;
@ -410,29 +469,32 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
return 0; return 0;
fail: fail:
return -1; //free() through MPV_common_end() return -1; // free() through MPV_common_end()
} }
static void free_duplicate_context(MpegEncContext *s){ static void free_duplicate_context(MpegEncContext *s)
if(s==NULL) return; {
if (s == NULL)
return;
av_freep(&s->edge_emu_buffer); av_freep(&s->edge_emu_buffer);
av_freep(&s->me.scratchpad); av_freep(&s->me.scratchpad);
s->me.temp= s->me.temp =
s->rd_scratchpad= s->rd_scratchpad =
s->b_scratchpad= s->b_scratchpad =
s->obmc_scratchpad= NULL; s->obmc_scratchpad = NULL;
av_freep(&s->dct_error_sum); av_freep(&s->dct_error_sum);
av_freep(&s->me.map); av_freep(&s->me.map);
av_freep(&s->me.score_map); av_freep(&s->me.score_map);
av_freep(&s->blocks); av_freep(&s->blocks);
av_freep(&s->ac_val_base); av_freep(&s->ac_val_base);
s->block= NULL; s->block = NULL;
} }
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
#define COPY(a) bak->a= src->a {
#define COPY(a) bak->a = src->a
COPY(edge_emu_buffer); COPY(edge_emu_buffer);
COPY(me.scratchpad); COPY(me.scratchpad);
COPY(me.temp); COPY(me.temp);
@ -457,28 +519,33 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
#undef COPY #undef COPY
} }
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
{
MpegEncContext bak; MpegEncContext bak;
int i; int i;
//FIXME copy only needed parts // FIXME copy only needed parts
//START_TIMER // START_TIMER
backup_duplicate_context(&bak, dst); backup_duplicate_context(&bak, dst);
memcpy(dst, src, sizeof(MpegEncContext)); memcpy(dst, src, sizeof(MpegEncContext));
backup_duplicate_context(dst, &bak); backup_duplicate_context(dst, &bak);
for(i=0;i<12;i++){ for (i = 0; i < 12; i++) {
dst->pblocks[i] = &dst->block[i]; dst->pblocks[i] = &dst->block[i];
} }
//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads // STOP_TIMER("update_duplicate_context")
// about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
} }
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) int ff_mpeg_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{ {
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data; MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
if(dst == src || !s1->context_initialized) return 0; if (dst == src || !s1->context_initialized)
return 0;
//FIXME can parameters change on I-frames? in that case dst may need a reinit // FIXME can parameters change on I-frames?
if(!s->context_initialized){ // in that case dst may need a reinit
if (!s->context_initialized) {
memcpy(s, s1, sizeof(MpegEncContext)); memcpy(s, s1, sizeof(MpegEncContext));
s->avctx = dst; s->avctx = dst;

View File

@ -331,7 +331,6 @@ static int read_header(ShortenContext *s)
s->lpcqoffset = 0; s->lpcqoffset = 0;
s->blocksize = DEFAULT_BLOCK_SIZE; s->blocksize = DEFAULT_BLOCK_SIZE;
s->channels = 1;
s->nmean = -1; s->nmean = -1;
s->version = get_bits(&s->gb, 8); s->version = get_bits(&s->gb, 8);
s->internal_ftype = get_uint(s, TYPESIZE); s->internal_ftype = get_uint(s, TYPESIZE);

View File

@ -110,7 +110,7 @@ typedef struct WavpackFrameContext {
int extra_bits; int extra_bits;
int and, or, shift; int and, or, shift;
int post_shift; int post_shift;
int hybrid, hybrid_bitrate; int hybrid, hybrid_bitrate, hybrid_maxclip;
int float_flag; int float_flag;
int float_shift; int float_shift;
int float_max_exp; int float_max_exp;
@ -403,8 +403,14 @@ static inline int wv_get_value_integer(WavpackFrameContext *s, uint32_t *crc, in
*crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16); *crc = *crc * 9 + (S&0xffff) * 3 + ((unsigned)S>>16);
} }
} }
bit = (S & s->and) | s->or; bit = (S & s->and) | s->or;
return (((S + bit) << s->shift) - bit) << s->post_shift; bit = (((S + bit) << s->shift) - bit);
if(s->hybrid)
bit = av_clip(bit, -s->hybrid_maxclip, s->hybrid_maxclip - 1);
return bit << s->post_shift;
} }
static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S) static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
@ -792,6 +798,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->joint = s->frame_flags & WV_JOINT_STEREO; s->joint = s->frame_flags & WV_JOINT_STEREO;
s->hybrid = s->frame_flags & WV_HYBRID_MODE; s->hybrid = s->frame_flags & WV_HYBRID_MODE;
s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE; s->hybrid_bitrate = s->frame_flags & WV_HYBRID_BITRATE;
s->hybrid_maxclip = 1 << ((((s->frame_flags & 0x03) + 1) << 3) - 1);
s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f); s->post_shift = 8 * (bpp-1-(s->frame_flags&0x03)) + ((s->frame_flags >> 13) & 0x1f);
s->CRC = AV_RL32(buf); buf += 4; s->CRC = AV_RL32(buf); buf += 4;
if(wc->mkv_mode) if(wc->mkv_mode)

View File

@ -113,13 +113,13 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
init_get_bits(&gb, ptr, ptr_len * 8); init_get_bits(&gb, ptr, ptr_len * 8);
while ( val != 0x16 ) { while (val != 0x16) {
unsigned idx = val - 0x17 + get_bits1(&gb) * byte; unsigned idx = val - 0x17 + get_bits1(&gb) * byte;
if (idx >= 2 * byte) if (idx >= 2 * byte)
return -1; return -1;
val = src[idx]; val = src[idx];
if ( val < 0x16 ) { if (val < 0x16) {
if (dest >= dest_end) if (dest >= dest_end)
return 0; return 0;
*dest++ = val; *dest++ = val;
@ -149,27 +149,23 @@ static void xan_unpack(unsigned char *dest, int dest_len,
if (opcode < 0xe0) { if (opcode < 0xe0) {
int size2, back; int size2, back;
if ( (opcode & 0x80) == 0 ) { if ((opcode & 0x80) == 0) {
size = opcode & 3; size = opcode & 3;
back = ((opcode & 0x60) << 3) + *src++ + 1; back = ((opcode & 0x60) << 3) + *src++ + 1;
size2 = ((opcode & 0x1c) >> 2) + 3; size2 = ((opcode & 0x1c) >> 2) + 3;
} else if ((opcode & 0x40) == 0) {
} else if ( (opcode & 0x40) == 0 ) {
size = *src >> 6; size = *src >> 6;
back = (bytestream_get_be16(&src) & 0x3fff) + 1; back = (bytestream_get_be16(&src) & 0x3fff) + 1;
size2 = (opcode & 0x3f) + 4; size2 = (opcode & 0x3f) + 4;
} else { } else {
size = opcode & 3; size = opcode & 3;
back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1; back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1;
size2 = ((opcode & 0x0c) << 6) + *src++ + 5; size2 = ((opcode & 0x0c) << 6) + *src++ + 5;
} }
if (dest_end - dest < size + size2 || if (dest_end - dest < size + size2 ||
dest + size - dest_org < back || dest + size - dest_org < back ||
src_end - src < size) src_end - src < size)
@ -205,7 +201,7 @@ static inline void xan_wc3_output_pixel_run(XanContext *s,
line_inc = stride - width; line_inc = stride - width;
index = y * stride + x; index = y * stride + x;
current_x = x; current_x = x;
while(pixel_count && (index < s->frame_size)) { while (pixel_count && index < s->frame_size) {
int count = FFMIN(pixel_count, width - current_x); int count = FFMIN(pixel_count, width - current_x);
memcpy(palette_plane + index, pixel_buffer, count); memcpy(palette_plane + index, pixel_buffer, count);
pixel_count -= count; pixel_count -= count;
@ -220,8 +216,9 @@ static inline void xan_wc3_output_pixel_run(XanContext *s,
} }
} }
static inline void xan_wc3_copy_pixel_run(XanContext *s, static inline void xan_wc3_copy_pixel_run(XanContext *s, int x, int y,
int x, int y, int pixel_count, int motion_x, int motion_y) int pixel_count, int motion_x,
int motion_y)
{ {
int stride; int stride;
int line_inc; int line_inc;
@ -230,8 +227,8 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
int width = s->avctx->width; int width = s->avctx->width;
unsigned char *palette_plane, *prev_palette_plane; unsigned char *palette_plane, *prev_palette_plane;
if ( y + motion_y < 0 || y + motion_y >= s->avctx->height || if (y + motion_y < 0 || y + motion_y >= s->avctx->height ||
x + motion_x < 0 || x + motion_x >= s->avctx->width) x + motion_x < 0 || x + motion_x >= s->avctx->width)
return; return;
palette_plane = s->current_frame.data[0]; palette_plane = s->current_frame.data[0];
@ -244,12 +241,14 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
curframe_x = x; curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x; prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x; prevframe_x = x + motion_x;
while(pixel_count && while (pixel_count &&
curframe_index < s->frame_size && curframe_index < s->frame_size &&
prevframe_index < s->frame_size) { prevframe_index < s->frame_size) {
int count = FFMIN3(pixel_count, width - curframe_x, width - prevframe_x); int count = FFMIN3(pixel_count, width - curframe_x,
width - prevframe_x);
memcpy(palette_plane + curframe_index, prev_palette_plane + prevframe_index, count); memcpy(palette_plane + curframe_index,
prev_palette_plane + prevframe_index, count);
pixel_count -= count; pixel_count -= count;
curframe_index += count; curframe_index += count;
prevframe_index += count; prevframe_index += count;
@ -270,7 +269,7 @@ static inline void xan_wc3_copy_pixel_run(XanContext *s,
static int xan_wc3_decode_frame(XanContext *s) { static int xan_wc3_decode_frame(XanContext *s) {
int width = s->avctx->width; int width = s->avctx->width;
int height = s->avctx->height; int height = s->avctx->height;
int total_pixels = width * height; int total_pixels = width * height;
unsigned char opcode; unsigned char opcode;
@ -289,7 +288,8 @@ static int xan_wc3_decode_frame(XanContext *s) {
const unsigned char *size_segment; const unsigned char *size_segment;
const unsigned char *vector_segment; const unsigned char *vector_segment;
const unsigned char *imagedata_segment; const unsigned char *imagedata_segment;
int huffman_offset, size_offset, vector_offset, imagedata_offset, imagedata_size; int huffman_offset, size_offset, vector_offset, imagedata_offset,
imagedata_size;
if (s->size < 8) if (s->size < 8)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -374,6 +374,7 @@ static int xan_wc3_decode_frame(XanContext *s) {
size_segment += 3; size_segment += 3;
break; break;
} }
if (size > total_pixels) if (size > total_pixels)
break; break;
@ -518,7 +519,8 @@ static int xan_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (s->palettes_count >= PALETTES_MAX) if (s->palettes_count >= PALETTES_MAX)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
tmpptr = av_realloc(s->palettes, (s->palettes_count + 1) * AVPALETTE_SIZE); tmpptr = av_realloc(s->palettes,
(s->palettes_count + 1) * AVPALETTE_SIZE);
if (!tmpptr) if (!tmpptr)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->palettes = tmpptr; s->palettes = tmpptr;
@ -569,7 +571,8 @@ static int xan_decode_frame(AVCodecContext *avctx,
if (!s->frame_size) if (!s->frame_size)
s->frame_size = s->current_frame.linesize[0] * s->avctx->height; s->frame_size = s->current_frame.linesize[0] * s->avctx->height;
memcpy(s->current_frame.data[1], s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE); memcpy(s->current_frame.data[1],
s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
s->buf = buf; s->buf = buf;
s->size = buf_size; s->size = buf_size;
@ -617,5 +620,5 @@ AVCodec ff_xan_wc3_decoder = {
.close = xan_decode_end, .close = xan_decode_end,
.decode = xan_decode_frame, .decode = xan_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"), .long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
}; };

View File

@ -88,8 +88,8 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
output = c->cur; output = c->cur;
prev = c->prev; prev = c->prev;
if(c->flags & ZMBV_DELTAPAL){ if (c->flags & ZMBV_DELTAPAL) {
for(i = 0; i < 768; i++) for (i = 0; i < 768; i++)
c->pal[i] ^= *src++; c->pal[i] ^= *src++;
} }
@ -97,9 +97,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3); src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0; block = 0;
for(y = 0; y < c->height; y += c->bh) { for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
for(x = 0; x < c->width; x += c->bw) { for (x = 0; x < c->width; x += c->bw) {
uint8_t *out, *tprev; uint8_t *out, *tprev;
d = mvec[block] & 1; d = mvec[block] & 1;
@ -114,12 +114,12 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width; tprev = prev + x + dx + dy * c->width;
mx = x + dx; mx = x + dx;
my = y + dy; my = y + dy;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
if((my + j < 0) || (my + j >= c->height)) { if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2); memset(out, 0, bw2);
} else { } else {
for(i = 0; i < bw2; i++){ for (i = 0; i < bw2; i++) {
if((mx + i < 0) || (mx + i >= c->width)) if (mx + i < 0 || mx + i >= c->width)
out[i] = 0; out[i] = 0;
else else
out[i] = tprev[i]; out[i] = tprev[i];
@ -129,10 +129,10 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
tprev += c->width; tprev += c->width;
} }
if(d) { /* apply XOR'ed difference */ if (d) { /* apply XOR'ed difference */
out = output + x; out = output + x;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
for(i = 0; i < bw2; i++) for (i = 0; i < bw2; i++)
out[i] ^= *src++; out[i] ^= *src++;
out += c->width; out += c->width;
} }
@ -141,8 +141,9 @@ static int zmbv_decode_xor_8(ZmbvContext *c)
output += c->width * c->bh; output += c->width * c->bh;
prev += c->width * c->bh; prev += c->width * c->bh;
} }
if(src - c->decomp_buf != c->decomp_len) if (src - c->decomp_buf != c->decomp_len)
av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
src-c->decomp_buf, c->decomp_len);
return 0; return 0;
} }
@ -168,9 +169,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3); src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0; block = 0;
for(y = 0; y < c->height; y += c->bh) { for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
for(x = 0; x < c->width; x += c->bw) { for (x = 0; x < c->width; x += c->bw) {
uint16_t *out, *tprev; uint16_t *out, *tprev;
d = mvec[block] & 1; d = mvec[block] & 1;
@ -185,12 +186,12 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width; tprev = prev + x + dx + dy * c->width;
mx = x + dx; mx = x + dx;
my = y + dy; my = y + dy;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
if((my + j < 0) || (my + j >= c->height)) { if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 2); memset(out, 0, bw2 * 2);
} else { } else {
for(i = 0; i < bw2; i++){ for (i = 0; i < bw2; i++) {
if((mx + i < 0) || (mx + i >= c->width)) if (mx + i < 0 || mx + i >= c->width)
out[i] = 0; out[i] = 0;
else else
out[i] = tprev[i]; out[i] = tprev[i];
@ -200,10 +201,10 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
tprev += c->width; tprev += c->width;
} }
if(d) { /* apply XOR'ed difference */ if (d) { /* apply XOR'ed difference */
out = output + x; out = output + x;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++){
for(i = 0; i < bw2; i++) { for (i = 0; i < bw2; i++) {
out[i] ^= *((uint16_t*)src); out[i] ^= *((uint16_t*)src);
src += 2; src += 2;
} }
@ -214,8 +215,9 @@ static int zmbv_decode_xor_16(ZmbvContext *c)
output += c->width * c->bh; output += c->width * c->bh;
prev += c->width * c->bh; prev += c->width * c->bh;
} }
if(src - c->decomp_buf != c->decomp_len) if (src - c->decomp_buf != c->decomp_len)
av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
src-c->decomp_buf, c->decomp_len);
return 0; return 0;
} }
@ -244,9 +246,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3); src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0; block = 0;
for(y = 0; y < c->height; y += c->bh) { for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
for(x = 0; x < c->width; x += c->bw) { for (x = 0; x < c->width; x += c->bw) {
uint8_t *out, *tprev; uint8_t *out, *tprev;
d = mvec[block] & 1; d = mvec[block] & 1;
@ -261,12 +263,12 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
tprev = prev + (x + dx) * 3 + dy * stride; tprev = prev + (x + dx) * 3 + dy * stride;
mx = x + dx; mx = x + dx;
my = y + dy; my = y + dy;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
if((my + j < 0) || (my + j >= c->height)) { if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 3); memset(out, 0, bw2 * 3);
} else { } else {
for(i = 0; i < bw2; i++){ for (i = 0; i < bw2; i++){
if((mx + i < 0) || (mx + i >= c->width)) { if (mx + i < 0 || mx + i >= c->width) {
out[i * 3 + 0] = 0; out[i * 3 + 0] = 0;
out[i * 3 + 1] = 0; out[i * 3 + 1] = 0;
out[i * 3 + 2] = 0; out[i * 3 + 2] = 0;
@ -281,10 +283,10 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
tprev += stride; tprev += stride;
} }
if(d) { /* apply XOR'ed difference */ if (d) { /* apply XOR'ed difference */
out = output + x * 3; out = output + x * 3;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
for(i = 0; i < bw2; i++) { for (i = 0; i < bw2; i++) {
out[i * 3 + 0] ^= *src++; out[i * 3 + 0] ^= *src++;
out[i * 3 + 1] ^= *src++; out[i * 3 + 1] ^= *src++;
out[i * 3 + 2] ^= *src++; out[i * 3 + 2] ^= *src++;
@ -296,8 +298,9 @@ static int zmbv_decode_xor_24(ZmbvContext *c)
output += stride * c->bh; output += stride * c->bh;
prev += stride * c->bh; prev += stride * c->bh;
} }
if(src - c->decomp_buf != c->decomp_len) if (src - c->decomp_buf != c->decomp_len)
av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n", src-c->decomp_buf, c->decomp_len); av_log(c->avctx, AV_LOG_ERROR, "Used %i of %i bytes\n",
src-c->decomp_buf, c->decomp_len);
return 0; return 0;
} }
#endif //ZMBV_ENABLE_24BPP #endif //ZMBV_ENABLE_24BPP
@ -324,9 +327,9 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
src += ((c->bx * c->by * 2 + 3) & ~3); src += ((c->bx * c->by * 2 + 3) & ~3);
block = 0; block = 0;
for(y = 0; y < c->height; y += c->bh) { for (y = 0; y < c->height; y += c->bh) {
bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y); bh2 = ((c->height - y) > c->bh) ? c->bh : (c->height - y);
for(x = 0; x < c->width; x += c->bw) { for (x = 0; x < c->width; x += c->bw) {
uint32_t *out, *tprev; uint32_t *out, *tprev;
d = mvec[block] & 1; d = mvec[block] & 1;
@ -341,12 +344,12 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
tprev = prev + x + dx + dy * c->width; tprev = prev + x + dx + dy * c->width;
mx = x + dx; mx = x + dx;
my = y + dy; my = y + dy;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++) {
if((my + j < 0) || (my + j >= c->height)) { if (my + j < 0 || my + j >= c->height) {
memset(out, 0, bw2 * 4); memset(out, 0, bw2 * 4);
} else { } else {
for(i = 0; i < bw2; i++){ for (i = 0; i < bw2; i++){
if((mx + i < 0) || (mx + i >= c->width)) if (mx + i < 0 || mx + i >= c->width)
out[i] = 0; out[i] = 0;
else else
out[i] = tprev[i]; out[i] = tprev[i];
@ -356,11 +359,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
tprev += c->width; tprev += c->width;
} }
if(d) { /* apply XOR'ed difference */ if (d) { /* apply XOR'ed difference */
out = output + x; out = output + x;
for(j = 0; j < bh2; j++){ for (j = 0; j < bh2; j++){
for(i = 0; i < bw2; i++) { for (i = 0; i < bw2; i++) {
out[i] ^= *((uint32_t*)src); out[i] ^= *((uint32_t *) src);
src += 4; src += 4;
} }
out += c->width; out += c->width;
@ -368,10 +371,11 @@ static int zmbv_decode_xor_32(ZmbvContext *c)
} }
} }
output += c->width * c->bh; output += c->width * c->bh;
prev += c->width * c->bh; prev += c->width * c->bh;
} }
if(src - c->decomp_buf != c->decomp_len) if (src - c->decomp_buf != c->decomp_len)
av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n", src-c->decomp_buf, c->decomp_len); av_log(c->avctx, AV_LOG_ERROR, "Used %ti of %i bytes\n",
src-c->decomp_buf, c->decomp_len);
return 0; return 0;
} }
@ -401,12 +405,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
int len = buf_size; int len = buf_size;
int hi_ver, lo_ver; int hi_ver, lo_ver;
if(c->pic.data[0]) if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic); avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 3; c->pic.reference = 3;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID; c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if(avctx->get_buffer(avctx, &c->pic) < 0){ if (avctx->get_buffer(avctx, &c->pic) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
@ -414,7 +418,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
/* parse header */ /* parse header */
c->flags = buf[0]; c->flags = buf[0];
buf++; len--; buf++; len--;
if(c->flags & ZMBV_KEYFRAME) { if (c->flags & ZMBV_KEYFRAME) {
void *decode_intra = NULL; void *decode_intra = NULL;
c->decode_intra= NULL; c->decode_intra= NULL;
hi_ver = buf[0]; hi_ver = buf[0];
@ -426,21 +430,26 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
buf += 6; buf += 6;
len -= 6; len -= 6;
av_log(avctx, AV_LOG_DEBUG, "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh); av_log(avctx, AV_LOG_DEBUG,
if(hi_ver != 0 || lo_ver != 1) { "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",
av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n", hi_ver, lo_ver); c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh);
if (hi_ver != 0 || lo_ver != 1) {
av_log(avctx, AV_LOG_ERROR, "Unsupported version %i.%i\n",
hi_ver, lo_ver);
return -1; return -1;
} }
if(c->bw == 0 || c->bh == 0) { if (c->bw == 0 || c->bh == 0) {
av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n", c->bw, c->bh); av_log(avctx, AV_LOG_ERROR, "Unsupported block size %ix%i\n",
c->bw, c->bh);
return -1; return -1;
} }
if(c->comp != 0 && c->comp != 1) { if (c->comp != 0 && c->comp != 1) {
av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n", c->comp); av_log(avctx, AV_LOG_ERROR, "Unsupported compression type %i\n",
c->comp);
return -1; return -1;
} }
switch(c->fmt) { switch (c->fmt) {
case ZMBV_FMT_8BPP: case ZMBV_FMT_8BPP:
c->bpp = 8; c->bpp = 8;
decode_intra = zmbv_decode_intra; decode_intra = zmbv_decode_intra;
@ -466,7 +475,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break; break;
default: default:
c->decode_xor = NULL; c->decode_xor = NULL;
av_log(avctx, AV_LOG_ERROR, "Unsupported (for now) format %i\n", c->fmt); av_log(avctx, AV_LOG_ERROR,
"Unsupported (for now) format %i\n", c->fmt);
return -1; return -1;
} }
@ -476,21 +486,21 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
return -1; return -1;
} }
c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8)); c->cur = av_realloc_f(c->cur, avctx->width * avctx->height, (c->bpp / 8));
c->prev = av_realloc_f(c->prev, avctx->width * avctx->height, (c->bpp / 8)); c->prev = av_realloc_f(c->prev, avctx->width * avctx->height, (c->bpp / 8));
c->bx = (c->width + c->bw - 1) / c->bw; c->bx = (c->width + c->bw - 1) / c->bw;
c->by = (c->height+ c->bh - 1) / c->bh; c->by = (c->height+ c->bh - 1) / c->bh;
if(!c->cur || !c->prev) if (!c->cur || !c->prev)
return -1; return -1;
c->decode_intra= decode_intra; c->decode_intra= decode_intra;
} }
if(c->decode_intra == NULL) { if (c->decode_intra == NULL) {
av_log(avctx, AV_LOG_ERROR, "Error! Got no format or no keyframe!\n"); av_log(avctx, AV_LOG_ERROR, "Error! Got no format or no keyframe!\n");
return -1; return -1;
} }
if(c->comp == 0) { //Uncompressed data if (c->comp == 0) { //Uncompressed data
memcpy(c->decomp_buf, buf, len); memcpy(c->decomp_buf, buf, len);
c->decomp_size = 1; c->decomp_size = 1;
} else { // ZLIB-compressed data } else { // ZLIB-compressed data
@ -502,14 +512,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
inflate(&c->zstream, Z_FINISH); inflate(&c->zstream, Z_FINISH);
c->decomp_len = c->zstream.total_out; c->decomp_len = c->zstream.total_out;
} }
if(c->flags & ZMBV_KEYFRAME) { if (c->flags & ZMBV_KEYFRAME) {
c->pic.key_frame = 1; c->pic.key_frame = 1;
c->pic.pict_type = AV_PICTURE_TYPE_I; c->pic.pict_type = AV_PICTURE_TYPE_I;
c->decode_intra(c); c->decode_intra(c);
} else { } else {
c->pic.key_frame = 0; c->pic.key_frame = 0;
c->pic.pict_type = AV_PICTURE_TYPE_P; c->pic.pict_type = AV_PICTURE_TYPE_P;
if(c->decomp_len) if (c->decomp_len)
c->decode_xor(c); c->decode_xor(c);
} }
@ -520,10 +530,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
out = c->pic.data[0]; out = c->pic.data[0];
src = c->cur; src = c->cur;
switch(c->fmt) { switch (c->fmt) {
case ZMBV_FMT_8BPP: case ZMBV_FMT_8BPP:
for(j = 0; j < c->height; j++) { for (j = 0; j < c->height; j++) {
for(i = 0; i < c->width; i++) { for (i = 0; i < c->width; i++) {
out[i * 3 + 0] = c->pal[(*src) * 3 + 0]; out[i * 3 + 0] = c->pal[(*src) * 3 + 0];
out[i * 3 + 1] = c->pal[(*src) * 3 + 1]; out[i * 3 + 1] = c->pal[(*src) * 3 + 1];
out[i * 3 + 2] = c->pal[(*src) * 3 + 2]; out[i * 3 + 2] = c->pal[(*src) * 3 + 2];
@ -533,8 +543,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
} }
break; break;
case ZMBV_FMT_15BPP: case ZMBV_FMT_15BPP:
for(j = 0; j < c->height; j++) { for (j = 0; j < c->height; j++) {
for(i = 0; i < c->width; i++) { for (i = 0; i < c->width; i++) {
uint16_t tmp = AV_RL16(src); uint16_t tmp = AV_RL16(src);
src += 2; src += 2;
out[i * 3 + 0] = (tmp & 0x7C00) >> 7; out[i * 3 + 0] = (tmp & 0x7C00) >> 7;
@ -545,8 +555,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
} }
break; break;
case ZMBV_FMT_16BPP: case ZMBV_FMT_16BPP:
for(j = 0; j < c->height; j++) { for (j = 0; j < c->height; j++) {
for(i = 0; i < c->width; i++) { for (i = 0; i < c->width; i++) {
uint16_t tmp = AV_RL16(src); uint16_t tmp = AV_RL16(src);
src += 2; src += 2;
out[i * 3 + 0] = (tmp & 0xF800) >> 8; out[i * 3 + 0] = (tmp & 0xF800) >> 8;
@ -558,7 +568,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break; break;
#ifdef ZMBV_ENABLE_24BPP #ifdef ZMBV_ENABLE_24BPP
case ZMBV_FMT_24BPP: case ZMBV_FMT_24BPP:
for(j = 0; j < c->height; j++) { for (j = 0; j < c->height; j++) {
memcpy(out, src, c->width * 3); memcpy(out, src, c->width * 3);
src += c->width * 3; src += c->width * 3;
out += c->pic.linesize[0]; out += c->pic.linesize[0];
@ -566,8 +576,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
break; break;
#endif //ZMBV_ENABLE_24BPP #endif //ZMBV_ENABLE_24BPP
case ZMBV_FMT_32BPP: case ZMBV_FMT_32BPP:
for(j = 0; j < c->height; j++) { for (j = 0; j < c->height; j++) {
for(i = 0; i < c->width; i++) { for (i = 0; i < c->width; i++) {
uint32_t tmp = AV_RL32(src); uint32_t tmp = AV_RL32(src);
src += 4; src += 4;
AV_WB24(out+(i*3), tmp); AV_WB24(out+(i*3), tmp);
@ -616,7 +626,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
/* Allocate decompression buffer */ /* Allocate decompression buffer */
if (c->decomp_size) { if (c->decomp_size) {
if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) { if ((c->decomp_buf = av_malloc(c->decomp_size)) == NULL) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); av_log(avctx, AV_LOG_ERROR,
"Can't allocate decompression buffer.\n");
return 1; return 1;
} }
} }

View File

@ -61,9 +61,10 @@ enum {
}; };
static const AVCodecTag codec_oma_tags[] = { static const AVCodecTag codec_oma_tags[] = {
{ CODEC_ID_ATRAC3, OMA_CODECID_ATRAC3 }, { CODEC_ID_ATRAC3, OMA_CODECID_ATRAC3 },
{ CODEC_ID_ATRAC3P, OMA_CODECID_ATRAC3P }, { CODEC_ID_ATRAC3P, OMA_CODECID_ATRAC3P },
{ CODEC_ID_MP3, OMA_CODECID_MP3 }, { CODEC_ID_MP3, OMA_CODECID_MP3 },
{ CODEC_ID_PCM_S16BE, OMA_CODECID_LPCM },
}; };
static const uint64_t leaf_table[] = { static const uint64_t leaf_table[] = {
@ -205,8 +206,8 @@ static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header)
while (em) { while (em) {
if (!strcmp(em->tag, "GEOB") && if (!strcmp(em->tag, "GEOB") &&
(geob = em->data) && (geob = em->data) &&
!strcmp(geob->description, "OMG_LSI") || (!strcmp(geob->description, "OMG_LSI") ||
!strcmp(geob->description, "OMG_BKLSI")) { !strcmp(geob->description, "OMG_BKLSI"))) {
break; break;
} }
em = em->next; em = em->next;
@ -361,6 +362,16 @@ static int oma_read_header(AVFormatContext *s,
st->need_parsing = AVSTREAM_PARSE_FULL; st->need_parsing = AVSTREAM_PARSE_FULL;
framesize = 1024; framesize = 1024;
break; break;
case OMA_CODECID_LPCM:
/* PCM 44.1 kHz 16 bit stereo big-endian */
st->codec->channels = 2;
st->codec->sample_rate = 44100;
framesize = 1024;
/* bit rate = sample rate x PCM block align (= 4) x 8 */
st->codec->bit_rate = st->codec->sample_rate * 32;
st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
break;
default: default:
av_log(s, AV_LOG_ERROR, "Unsupported codec %d!\n",buf[32]); av_log(s, AV_LOG_ERROR, "Unsupported codec %d!\n",buf[32]);
return -1; return -1;
@ -397,14 +408,20 @@ static int oma_read_probe(AVProbeData *p)
unsigned tag_len = 0; unsigned tag_len = 0;
buf = p->buf; buf = p->buf;
/* version must be 3 and flags byte zero */
if (ff_id3v2_match(buf, ID3v2_EA3_MAGIC) && buf[3] == 3 && !buf[4])
tag_len = ff_id3v2_tag_len(buf);
// This check cannot overflow as tag_len has at most 28 bits if (p->buf_size < ID3v2_HEADER_SIZE ||
if (p->buf_size < tag_len + 5) !ff_id3v2_match(buf, ID3v2_EA3_MAGIC) ||
buf[3] != 3 || // version must be 3
buf[4]) // flags byte zero
return 0; return 0;
tag_len = ff_id3v2_tag_len(buf);
/* This check cannot overflow as tag_len has at most 28 bits */
if (p->buf_size < tag_len + 5)
/* EA3 header comes late, might be outside of the probe buffer */
return AVPROBE_SCORE_MAX / 2;
buf += tag_len; buf += tag_len;
if (!memcmp(buf, "EA3", 3) && !buf[4] && buf[5] == EA3_HEADER_SIZE) if (!memcmp(buf, "EA3", 3) && !buf[4] && buf[5] == EA3_HEADER_SIZE)

View File

@ -39,23 +39,24 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
char inter; char inter;
const char *colorspace = ""; const char *colorspace = "";
st = s->streams[0]; st = s->streams[0];
width = st->codec->width; width = st->codec->width;
height = st->codec->height; height = st->codec->height;
av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1); av_reduce(&raten, &rated, st->codec->time_base.den,
st->codec->time_base.num, (1UL << 31) - 1);
aspectn = st->sample_aspect_ratio.num; aspectn = st->sample_aspect_ratio.num;
aspectd = st->sample_aspect_ratio.den; aspectd = st->sample_aspect_ratio.den;
if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown if (aspectn == 0 && aspectd == 1)
aspectd = 0; // 0:0 means unknown
inter = 'p'; /* progressive is the default */ inter = 'p'; /* progressive is the default */
if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) { if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame)
inter = st->codec->coded_frame->top_field_first ? 't' : 'b'; inter = st->codec->coded_frame->top_field_first ? 't' : 'b';
}
switch(st->codec->pix_fmt) { switch (st->codec->pix_fmt) {
case PIX_FMT_GRAY8: case PIX_FMT_GRAY8:
colorspace = " Cmono"; colorspace = " Cmono";
break; break;
@ -63,9 +64,11 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
colorspace = " C411 XYSCSS=411"; colorspace = " C411 XYSCSS=411";
break; break;
case PIX_FMT_YUV420P: case PIX_FMT_YUV420P:
colorspace = (st->codec->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)?" C420paldv XYSCSS=420PALDV": switch (st->codec->chroma_sample_location) {
(st->codec->chroma_sample_location == AVCHROMA_LOC_LEFT) ?" C420mpeg2 XYSCSS=420MPEG2": case AVCHROMA_LOC_TOPLEFT: colorspace = " C420paldv XYSCSS=420PALDV"; break;
" C420jpeg XYSCSS=420JPEG"; case AVCHROMA_LOC_LEFT: colorspace = " C420mpeg2 XYSCSS=420MPEG2"; break;
default: colorspace = " C420jpeg XYSCSS=420JPEG"; break;
}
break; break;
case PIX_FMT_YUV422P: case PIX_FMT_YUV422P:
colorspace = " C422 XYSCSS=422"; colorspace = " C422 XYSCSS=422";
@ -77,13 +80,8 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
/* construct stream header, if this is the first frame */ /* construct stream header, if this is the first frame */
n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n", n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n",
Y4M_MAGIC, Y4M_MAGIC, width, height, raten, rated, inter,
width, aspectn, aspectd, colorspace);
height,
raten, rated,
inter,
aspectn, aspectd,
colorspace);
return n; return n;
} }
@ -96,7 +94,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
int* first_pkt = s->priv_data; int* first_pkt = s->priv_data;
int width, height, h_chroma_shift, v_chroma_shift; int width, height, h_chroma_shift, v_chroma_shift;
int i; int i;
char buf2[Y4M_LINE_MAX+1]; char buf2[Y4M_LINE_MAX + 1];
char buf1[20]; char buf1[20];
uint8_t *ptr, *ptr1, *ptr2; uint8_t *ptr, *ptr1, *ptr2;
@ -106,7 +104,8 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
if (*first_pkt) { if (*first_pkt) {
*first_pkt = 0; *first_pkt = 0;
if (yuv4_generate_header(s, buf2) < 0) { if (yuv4_generate_header(s, buf2) < 0) {
av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n"); av_log(s, AV_LOG_ERROR,
"Error. YUV4MPEG stream header write failed.\n");
return AVERROR(EIO); return AVERROR(EIO);
} else { } else {
avio_write(pb, buf2, strlen(buf2)); avio_write(pb, buf2, strlen(buf2));
@ -118,31 +117,32 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC); snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
avio_write(pb, buf1, strlen(buf1)); avio_write(pb, buf1, strlen(buf1));
width = st->codec->width; width = st->codec->width;
height = st->codec->height; height = st->codec->height;
ptr = picture->data[0]; ptr = picture->data[0];
for(i=0;i<height;i++) { for (i = 0; i < height; i++) {
avio_write(pb, ptr, width); avio_write(pb, ptr, width);
ptr += picture->linesize[0]; ptr += picture->linesize[0];
} }
if (st->codec->pix_fmt != PIX_FMT_GRAY8){ if (st->codec->pix_fmt != PIX_FMT_GRAY8) {
// Adjust for smaller Cb and Cr planes // Adjust for smaller Cb and Cr planes
avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift); avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift,
width >>= h_chroma_shift; &v_chroma_shift);
height >>= v_chroma_shift; width >>= h_chroma_shift;
height >>= v_chroma_shift;
ptr1 = picture->data[1]; ptr1 = picture->data[1];
ptr2 = picture->data[2]; ptr2 = picture->data[2];
for(i=0;i<height;i++) { /* Cb */ for (i = 0; i < height; i++) { /* Cb */
avio_write(pb, ptr1, width); avio_write(pb, ptr1, width);
ptr1 += picture->linesize[1]; ptr1 += picture->linesize[1];
} }
for(i=0;i<height;i++) { /* Cr */ for (i = 0; i < height; i++) { /* Cr */
avio_write(pb, ptr2, width); avio_write(pb, ptr2, width);
ptr2 += picture->linesize[2]; ptr2 += picture->linesize[2];
} }
} }
avio_flush(pb); avio_flush(pb);
return 0; return 0;
@ -150,7 +150,7 @@ static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
static int yuv4_write_header(AVFormatContext *s) static int yuv4_write_header(AVFormatContext *s)
{ {
int* first_pkt = s->priv_data; int *first_pkt = s->priv_data;
if (s->nb_streams != 1) if (s->nb_streams != 1)
return AVERROR(EIO); return AVERROR(EIO);
@ -162,13 +162,15 @@ static int yuv4_write_header(AVFormatContext *s)
} }
if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) { if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) {
av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n"); av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV "
} "stream, some mjpegtools might not work.\n");
else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) && } else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) &&
(s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) && (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) &&
(s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) && (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) &&
(s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) { (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) {
av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n"); av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, "
"yuv422p, yuv420p, yuv411p and gray pixel formats. "
"Use -pix_fmt to select one.\n");
return AVERROR(EIO); return AVERROR(EIO);
} }
@ -186,7 +188,7 @@ AVOutputFormat ff_yuv4mpegpipe_muxer = {
.video_codec = CODEC_ID_RAWVIDEO, .video_codec = CODEC_ID_RAWVIDEO,
.write_header = yuv4_write_header, .write_header = yuv4_write_header,
.write_packet = yuv4_write_packet, .write_packet = yuv4_write_packet,
.flags = AVFMT_RAWPICTURE, .flags = AVFMT_RAWPICTURE,
}; };
#endif #endif
@ -196,85 +198,96 @@ AVOutputFormat ff_yuv4mpegpipe_muxer = {
static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
{ {
char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option char header[MAX_YUV4_HEADER + 10]; // Include headroom for
char *tokstart,*tokend,*header_end; // the longest option
char *tokstart, *tokend, *header_end;
int i; int i;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0; int width = -1, height = -1, raten = 0,
enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE; rated = 0, aspectn = 0, aspectd = 0;
enum PixelFormat pix_fmt = PIX_FMT_NONE, alt_pix_fmt = PIX_FMT_NONE;
enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;
AVStream *st; AVStream *st;
struct frame_attributes *s1 = s->priv_data; struct frame_attributes *s1 = s->priv_data;
for (i=0; i<MAX_YUV4_HEADER; i++) { for (i = 0; i < MAX_YUV4_HEADER; i++) {
header[i] = avio_r8(pb); header[i] = avio_r8(pb);
if (header[i] == '\n') { if (header[i] == '\n') {
header[i+1] = 0x20; // Add a space after last option. Makes parsing "444" vs "444alpha" easier. header[i + 1] = 0x20; // Add a space after last option.
header[i+2] = 0; // Makes parsing "444" vs "444alpha" easier.
header[i + 2] = 0;
break; break;
} }
} }
if (i == MAX_YUV4_HEADER) return -1; if (i == MAX_YUV4_HEADER)
if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1; return -1;
if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC)))
return -1;
s1->interlaced_frame = 0; s1->interlaced_frame = 0;
s1->top_field_first = 0; s1->top_field_first = 0;
header_end = &header[i+1]; // Include space header_end = &header[i + 1]; // Include space
for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) { for (tokstart = &header[strlen(Y4M_MAGIC) + 1];
if (*tokstart==0x20) continue; tokstart < header_end; tokstart++) {
if (*tokstart == 0x20)
continue;
switch (*tokstart++) { switch (*tokstart++) {
case 'W': // Width. Required. case 'W': // Width. Required.
width = strtol(tokstart, &tokend, 10); width = strtol(tokstart, &tokend, 10);
tokstart=tokend; tokstart = tokend;
break; break;
case 'H': // Height. Required. case 'H': // Height. Required.
height = strtol(tokstart, &tokend, 10); height = strtol(tokstart, &tokend, 10);
tokstart=tokend; tokstart = tokend;
break; break;
case 'C': // Color space case 'C': // Color space
if (strncmp("420jpeg",tokstart,7)==0) { if (strncmp("420jpeg", tokstart, 7) == 0) {
pix_fmt = PIX_FMT_YUV420P; pix_fmt = PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_CENTER; chroma_sample_location = AVCHROMA_LOC_CENTER;
} else if (strncmp("420mpeg2",tokstart,8)==0) { } else if (strncmp("420mpeg2", tokstart, 8) == 0) {
pix_fmt = PIX_FMT_YUV420P; pix_fmt = PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_LEFT; chroma_sample_location = AVCHROMA_LOC_LEFT;
} else if (strncmp("420paldv", tokstart, 8)==0) { } else if (strncmp("420paldv", tokstart, 8) == 0) {
pix_fmt = PIX_FMT_YUV420P; pix_fmt = PIX_FMT_YUV420P;
chroma_sample_location = AVCHROMA_LOC_TOPLEFT; chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
} else if (strncmp("411", tokstart, 3)==0) } else if (strncmp("411", tokstart, 3) == 0)
pix_fmt = PIX_FMT_YUV411P; pix_fmt = PIX_FMT_YUV411P;
else if (strncmp("422", tokstart, 3)==0) else if (strncmp("422", tokstart, 3) == 0)
pix_fmt = PIX_FMT_YUV422P; pix_fmt = PIX_FMT_YUV422P;
else if (strncmp("444alpha", tokstart, 8)==0) { else if (strncmp("444alpha", tokstart, 8) == 0 ) {
av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 YUV4MPEG stream.\n"); av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 "
"YUV4MPEG stream.\n");
return -1; return -1;
} else if (strncmp("444", tokstart, 3)==0) } else if (strncmp("444", tokstart, 3) == 0)
pix_fmt = PIX_FMT_YUV444P; pix_fmt = PIX_FMT_YUV444P;
else if (strncmp("mono",tokstart, 4)==0) { else if (strncmp("mono", tokstart, 4) == 0) {
pix_fmt = PIX_FMT_GRAY8; pix_fmt = PIX_FMT_GRAY8;
} else { } else {
av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown pixel format.\n"); av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown "
"pixel format.\n");
return -1; return -1;
} }
while(tokstart<header_end&&*tokstart!=0x20) tokstart++; while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break; break;
case 'I': // Interlace type case 'I': // Interlace type
switch (*tokstart++){ switch (*tokstart++){
case '?': case '?':
break; break;
case 'p': case 'p':
s1->interlaced_frame=0; s1->interlaced_frame = 0;
break; break;
case 't': case 't':
s1->interlaced_frame=1; s1->interlaced_frame = 1;
s1->top_field_first=1; s1->top_field_first = 1;
break; break;
case 'b': case 'b':
s1->interlaced_frame=1; s1->interlaced_frame = 1;
s1->top_field_first=0; s1->top_field_first = 0;
break; break;
case 'm': case 'm':
av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\n"); av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed "
"interlaced and non-interlaced frames.\n");
return -1; return -1;
default: default:
av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
@ -282,36 +295,39 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
} }
break; break;
case 'F': // Frame rate case 'F': // Frame rate
sscanf(tokstart,"%d:%d",&raten,&rated); // 0:0 if unknown sscanf(tokstart, "%d:%d", &raten, &rated); // 0:0 if unknown
while(tokstart<header_end&&*tokstart!=0x20) tokstart++; while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break; break;
case 'A': // Pixel aspect case 'A': // Pixel aspect
sscanf(tokstart,"%d:%d",&aspectn,&aspectd); // 0:0 if unknown sscanf(tokstart, "%d:%d", &aspectn, &aspectd); // 0:0 if unknown
while(tokstart<header_end&&*tokstart!=0x20) tokstart++; while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break; break;
case 'X': // Vendor extensions case 'X': // Vendor extensions
if (strncmp("YSCSS=",tokstart,6)==0) { if (strncmp("YSCSS=", tokstart, 6) == 0) {
// Older nonstandard pixel format representation // Older nonstandard pixel format representation
tokstart+=6; tokstart += 6;
if (strncmp("420JPEG",tokstart,7)==0) if (strncmp("420JPEG", tokstart, 7) == 0)
alt_pix_fmt=PIX_FMT_YUV420P; alt_pix_fmt = PIX_FMT_YUV420P;
else if (strncmp("420MPEG2",tokstart,8)==0) else if (strncmp("420MPEG2", tokstart, 8) == 0)
alt_pix_fmt=PIX_FMT_YUV420P; alt_pix_fmt = PIX_FMT_YUV420P;
else if (strncmp("420PALDV",tokstart,8)==0) else if (strncmp("420PALDV", tokstart, 8) == 0)
alt_pix_fmt=PIX_FMT_YUV420P; alt_pix_fmt = PIX_FMT_YUV420P;
else if (strncmp("411",tokstart,3)==0) else if (strncmp("411", tokstart, 3) == 0)
alt_pix_fmt=PIX_FMT_YUV411P; alt_pix_fmt = PIX_FMT_YUV411P;
else if (strncmp("422",tokstart,3)==0) else if (strncmp("422", tokstart, 3) == 0)
alt_pix_fmt=PIX_FMT_YUV422P; alt_pix_fmt = PIX_FMT_YUV422P;
else if (strncmp("444",tokstart,3)==0) else if (strncmp("444", tokstart, 3) == 0)
alt_pix_fmt=PIX_FMT_YUV444P; alt_pix_fmt = PIX_FMT_YUV444P;
} }
while(tokstart<header_end&&*tokstart!=0x20) tokstart++; while (tokstart < header_end && *tokstart != 0x20)
tokstart++;
break; break;
} }
} }
if ((width == -1) || (height == -1)) { if (width == -1 || height == -1) {
av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
return -1; return -1;
} }
@ -335,16 +351,16 @@ static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
} }
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if(!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codec->width = width; st->codec->width = width;
st->codec->height = height; st->codec->height = height;
av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1); av_reduce(&raten, &rated, raten, rated, (1UL << 31) - 1);
avpriv_set_pts_info(st, 64, rated, raten); avpriv_set_pts_info(st, 64, rated, raten);
st->codec->pix_fmt = pix_fmt; st->codec->pix_fmt = pix_fmt;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->sample_aspect_ratio= (AVRational){aspectn, aspectd}; st->sample_aspect_ratio = (AVRational){ aspectn, aspectd };
st->codec->chroma_sample_location = chroma_sample_location; st->codec->chroma_sample_location = chroma_sample_location;
return 0; return 0;
@ -358,17 +374,19 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
struct frame_attributes *s1 = s->priv_data; struct frame_attributes *s1 = s->priv_data;
for (i=0; i<MAX_FRAME_HEADER; i++) { for (i = 0; i < MAX_FRAME_HEADER; i++) {
header[i] = avio_r8(s->pb); header[i] = avio_r8(s->pb);
if (header[i] == '\n') { if (header[i] == '\n') {
header[i+1] = 0; header[i + 1] = 0;
break; break;
} }
} }
if (i == MAX_FRAME_HEADER) return -1; if (i == MAX_FRAME_HEADER)
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1; return -1;
if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC)))
return -1;
width = st->codec->width; width = st->codec->width;
height = st->codec->height; height = st->codec->height;
packet_size = avpicture_get_size(st->codec->pix_fmt, width, height); packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
@ -378,9 +396,9 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
if (av_get_packet(s->pb, pkt, packet_size) != packet_size) if (av_get_packet(s->pb, pkt, packet_size) != packet_size)
return AVERROR(EIO); return AVERROR(EIO);
if (s->streams[0]->codec->coded_frame) { if (st->codec->coded_frame) {
s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame; st->codec->coded_frame->interlaced_frame = s1->interlaced_frame;
s->streams[0]->codec->coded_frame->top_field_first = s1->top_field_first; st->codec->coded_frame->top_field_first = s1->top_field_first;
} }
pkt->stream_index = 0; pkt->stream_index = 0;
@ -390,7 +408,7 @@ static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
static int yuv4_probe(AVProbeData *pd) static int yuv4_probe(AVProbeData *pd)
{ {
/* check file header */ /* check file header */
if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC)-1)==0) if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC) - 1) == 0)
return AVPROBE_SCORE_MAX; return AVPROBE_SCORE_MAX;
else else
return 0; return 0;
@ -404,6 +422,6 @@ AVInputFormat ff_yuv4mpegpipe_demuxer = {
.read_probe = yuv4_probe, .read_probe = yuv4_probe,
.read_header = yuv4_read_header, .read_header = yuv4_read_header,
.read_packet = yuv4_read_packet, .read_packet = yuv4_read_packet,
.extensions = "y4m" .extensions = "y4m"
}; };
#endif #endif

View File

@ -56,32 +56,34 @@ static AVCRC av_crc_table[AV_CRC_MAX][257];
* @param ctx_size size of ctx in bytes * @param ctx_size size of ctx in bytes
* @return <0 on failure * @return <0 on failure
*/ */
int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size){ int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size)
{
unsigned i, j; unsigned i, j;
uint32_t c; uint32_t c;
if (bits < 8 || bits > 32 || poly >= (1LL<<bits)) if (bits < 8 || bits > 32 || poly >= (1LL << bits))
return -1; return -1;
if (ctx_size != sizeof(AVCRC)*257 && ctx_size != sizeof(AVCRC)*1024) if (ctx_size != sizeof(AVCRC) * 257 && ctx_size != sizeof(AVCRC) * 1024)
return -1; return -1;
for (i = 0; i < 256; i++) { for (i = 0; i < 256; i++) {
if (le) { if (le) {
for (c = i, j = 0; j < 8; j++) for (c = i, j = 0; j < 8; j++)
c = (c>>1)^(poly & (-(c&1))); c = (c >> 1) ^ (poly & (-(c & 1)));
ctx[i] = c; ctx[i] = c;
} else { } else {
for (c = i << 24, j = 0; j < 8; j++) for (c = i << 24, j = 0; j < 8; j++)
c = (c<<1) ^ ((poly<<(32-bits)) & (((int32_t)c)>>31) ); c = (c << 1) ^ ((poly << (32 - bits)) & (((int32_t) c) >> 31));
ctx[i] = av_bswap32(c); ctx[i] = av_bswap32(c);
} }
} }
ctx[256]=1; ctx[256] = 1;
#if !CONFIG_SMALL #if !CONFIG_SMALL
if(ctx_size >= sizeof(AVCRC)*1024) if (ctx_size >= sizeof(AVCRC) * 1024)
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++)
for(j=0; j<3; j++) for (j = 0; j < 3; j++)
ctx[256*(j+1) + i]= (ctx[256*j + i]>>8) ^ ctx[ ctx[256*j + i]&0xFF ]; ctx[256 *(j + 1) + i] =
(ctx[256 * j + i] >> 8) ^ ctx[ctx[256 * j + i] & 0xFF];
#endif #endif
return 0; return 0;
@ -92,9 +94,10 @@ int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size){
* @param crc_id ID of a standard CRC * @param crc_id ID of a standard CRC
* @return a pointer to the CRC table or NULL on failure * @return a pointer to the CRC table or NULL on failure
*/ */
const AVCRC *av_crc_get_table(AVCRCId crc_id){ const AVCRC *av_crc_get_table(AVCRCId crc_id)
{
#if !CONFIG_HARDCODED_TABLES #if !CONFIG_HARDCODED_TABLES
if (!av_crc_table[crc_id][FF_ARRAY_ELEMS(av_crc_table[crc_id])-1]) if (!av_crc_table[crc_id][FF_ARRAY_ELEMS(av_crc_table[crc_id]) - 1])
if (av_crc_init(av_crc_table[crc_id], if (av_crc_init(av_crc_table[crc_id],
av_crc_table_params[crc_id].le, av_crc_table_params[crc_id].le,
av_crc_table_params[crc_id].bits, av_crc_table_params[crc_id].bits,
@ -112,46 +115,50 @@ const AVCRC *av_crc_get_table(AVCRCId crc_id){
* *
* @see av_crc_init() "le" parameter * @see av_crc_init() "le" parameter
*/ */
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length){ uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
const uint8_t *end= buffer+length; const uint8_t *buffer, size_t length)
{
const uint8_t *end = buffer + length;
#if !CONFIG_SMALL #if !CONFIG_SMALL
if(!ctx[256]) { if (!ctx[256]) {
while(((intptr_t) buffer & 3) && buffer < end) while (((intptr_t) buffer & 3) && buffer < end)
crc = ctx[((uint8_t)crc) ^ *buffer++] ^ (crc >> 8); crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8);
while(buffer<end-3){ while (buffer < end - 3) {
crc ^= av_le2ne32(*(const uint32_t*)buffer); buffer+=4; crc ^= av_le2ne32(*(const uint32_t *) buffer); buffer += 4;
crc = ctx[3*256 + ( crc &0xFF)] crc = ctx[3 * 256 + ( crc & 0xFF)] ^
^ctx[2*256 + ((crc>>8 )&0xFF)] ctx[2 * 256 + ((crc >> 8 ) & 0xFF)] ^
^ctx[1*256 + ((crc>>16)&0xFF)] ctx[1 * 256 + ((crc >> 16) & 0xFF)] ^
^ctx[0*256 + ((crc>>24) )]; ctx[0 * 256 + ((crc >> 24) )];
} }
} }
#endif #endif
while(buffer<end) while (buffer < end)
crc = ctx[((uint8_t)crc) ^ *buffer++] ^ (crc >> 8); crc = ctx[((uint8_t) crc) ^ *buffer++] ^ (crc >> 8);
return crc; return crc;
} }
#ifdef TEST #ifdef TEST
#undef printf #undef printf
int main(void){ int main(void)
{
uint8_t buf[1999]; uint8_t buf[1999];
int i; int i;
int p[4][3]={{AV_CRC_32_IEEE_LE, 0xEDB88320, 0x3D5CDD04}, int p[4][3] = { { AV_CRC_32_IEEE_LE, 0xEDB88320, 0x3D5CDD04 },
{AV_CRC_32_IEEE , 0x04C11DB7, 0xC0F5BAE0}, { AV_CRC_32_IEEE , 0x04C11DB7, 0xC0F5BAE0 },
{AV_CRC_16_ANSI , 0x8005, 0x1FBB }, { AV_CRC_16_ANSI , 0x8005 , 0x1FBB },
{AV_CRC_8_ATM , 0x07, 0xE3 },}; { AV_CRC_8_ATM , 0x07 , 0xE3 }
};
const AVCRC *ctx; const AVCRC *ctx;
for(i=0; i<sizeof(buf); i++) for (i = 0; i < sizeof(buf); i++)
buf[i]= i+i*i; buf[i] = i + i * i;
for(i=0; i<4; i++){ for (i = 0; i < 4; i++) {
ctx = av_crc_get_table(p[i][0]); ctx = av_crc_get_table(p[i][0]);
printf("crc %08X =%X\n", p[i][1], av_crc(ctx, 0, buf, sizeof(buf))); printf("crc %08X = %X\n", p[i][1], av_crc(ctx, 0, buf, sizeof(buf)));
} }
return 0; return 0;
} }

View File

@ -27,19 +27,21 @@
#include "intreadwrite.h" #include "intreadwrite.h"
#include "attributes.h" #include "attributes.h"
void av_cold av_lfg_init(AVLFG *c, unsigned int seed){ void av_cold av_lfg_init(AVLFG *c, unsigned int seed)
uint8_t tmp[16]={0}; {
uint8_t tmp[16] = { 0 };
int i; int i;
for(i=8; i<64; i+=4){ for (i = 8; i < 64; i += 4) {
AV_WL32(tmp, seed); tmp[4]=i; AV_WL32(tmp, seed);
av_md5_sum(tmp, tmp, 16); tmp[4] = i;
c->state[i ]= AV_RL32(tmp); av_md5_sum(tmp, tmp, 16);
c->state[i+1]= AV_RL32(tmp+4); c->state[i ] = AV_RL32(tmp);
c->state[i+2]= AV_RL32(tmp+8); c->state[i + 1] = AV_RL32(tmp + 4);
c->state[i+3]= AV_RL32(tmp+12); c->state[i + 2] = AV_RL32(tmp + 8);
c->state[i + 3] = AV_RL32(tmp + 12);
} }
c->index=0; c->index = 0;
} }
void av_bmg_get(AVLFG *lfg, double out[2]) void av_bmg_get(AVLFG *lfg, double out[2])
@ -47,9 +49,9 @@ void av_bmg_get(AVLFG *lfg, double out[2])
double x1, x2, w; double x1, x2, w;
do { do {
x1 = 2.0/UINT_MAX*av_lfg_get(lfg) - 1.0; x1 = 2.0 / UINT_MAX * av_lfg_get(lfg) - 1.0;
x2 = 2.0/UINT_MAX*av_lfg_get(lfg) - 1.0; x2 = 2.0 / UINT_MAX * av_lfg_get(lfg) - 1.0;
w = x1*x1 + x2*x2; w = x1 * x1 + x2 * x2;
} while (w >= 1.0); } while (w >= 1.0);
w = sqrt((-2.0 * log(w)) / w); w = sqrt((-2.0 * log(w)) / w);
@ -63,7 +65,7 @@ void av_bmg_get(AVLFG *lfg, double out[2])
int main(void) int main(void)
{ {
int x=0; int x = 0;
int i, j; int i, j;
AVLFG state; AVLFG state;
@ -71,8 +73,8 @@ int main(void)
for (j = 0; j < 10000; j++) { for (j = 0; j < 10000; j++) {
START_TIMER START_TIMER
for (i = 0; i < 624; i++) { for (i = 0; i < 624; i++) {
// av_log(NULL,AV_LOG_ERROR, "%X\n", av_lfg_get(&state)); //av_log(NULL, AV_LOG_ERROR, "%X\n", av_lfg_get(&state));
x+=av_lfg_get(&state); x += av_lfg_get(&state);
} }
STOP_TIMER("624 calls of av_lfg_get"); STOP_TIMER("624 calls of av_lfg_get");
} }

View File

@ -34,49 +34,54 @@ static int flags;
#if defined(_WIN32) && !defined(__MINGW32CE__) #if defined(_WIN32) && !defined(__MINGW32CE__)
#include <windows.h> #include <windows.h>
static const uint8_t color[] = {12,12,12,14,7,7,7}; static const uint8_t color[] = { 12, 12, 12, 14, 7, 7, 7 };
static int16_t background, attr_orig; static int16_t background, attr_orig;
static HANDLE con; static HANDLE con;
#define set_color(x) SetConsoleTextAttribute(con, background | color[x]) #define set_color(x) SetConsoleTextAttribute(con, background | color[x])
#define reset_color() SetConsoleTextAttribute(con, attr_orig) #define reset_color() SetConsoleTextAttribute(con, attr_orig)
#else #else
static const uint8_t color[]={0x41,0x41,0x11,0x03,9,9,9}; static const uint8_t color[] = { 0x41, 0x41, 0x11, 0x03, 9, 9, 9 };
#define set_color(x) fprintf(stderr, "\033[%d;3%dm", color[x]>>4, color[x]&15) #define set_color(x) fprintf(stderr, "\033[%d;3%dm", color[x] >> 4, color[x]&15)
#define reset_color() fprintf(stderr, "\033[0m") #define reset_color() fprintf(stderr, "\033[0m")
#endif #endif
static int use_color=-1; static int use_color = -1;
#undef fprintf #undef fprintf
static void colored_fputs(int level, const char *str){ static void colored_fputs(int level, const char *str)
if(use_color<0){ {
if (use_color < 0) {
#if defined(_WIN32) && !defined(__MINGW32CE__) #if defined(_WIN32) && !defined(__MINGW32CE__)
CONSOLE_SCREEN_BUFFER_INFO con_info; CONSOLE_SCREEN_BUFFER_INFO con_info;
con = GetStdHandle(STD_ERROR_HANDLE); con = GetStdHandle(STD_ERROR_HANDLE);
use_color = (con != INVALID_HANDLE_VALUE) && !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR"); use_color = (con != INVALID_HANDLE_VALUE) && !getenv("NO_COLOR") &&
!getenv("AV_LOG_FORCE_NOCOLOR");
if (use_color) { if (use_color) {
GetConsoleScreenBufferInfo(con, &con_info); GetConsoleScreenBufferInfo(con, &con_info);
attr_orig = con_info.wAttributes; attr_orig = con_info.wAttributes;
background = attr_orig & 0xF0; background = attr_orig & 0xF0;
} }
#elif HAVE_ISATTY #elif HAVE_ISATTY
use_color= !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR") && use_color = !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR") &&
(getenv("TERM") && isatty(2) || getenv("AV_LOG_FORCE_COLOR")); (getenv("TERM") && isatty(2) ||
getenv("AV_LOG_FORCE_COLOR"));
#else #else
use_color= getenv("AV_LOG_FORCE_COLOR") && !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR"); use_color = getenv("AV_LOG_FORCE_COLOR") && !getenv("NO_COLOR") &&
!getenv("AV_LOG_FORCE_NOCOLOR");
#endif #endif
} }
if(use_color){ if (use_color) {
set_color(level); set_color(level);
} }
fputs(str, stderr); fputs(str, stderr);
if(use_color){ if (use_color) {
reset_color(); reset_color();
} }
} }
const char* av_default_item_name(void* ptr){ const char *av_default_item_name(void *ptr)
return (*(AVClass**)ptr)->class_name; {
return (*(AVClass **) ptr)->class_name;
} }
static void sanitize(uint8_t *line){ static void sanitize(uint8_t *line){
@ -89,58 +94,64 @@ static void sanitize(uint8_t *line){
void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl) void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl)
{ {
static int print_prefix=1; static int print_prefix = 1;
static int count; static int count;
static char prev[1024]; static char prev[1024];
char line[1024]; char line[1024];
static int is_atty; static int is_atty;
AVClass* avc= ptr ? *(AVClass**)ptr : NULL; AVClass* avc = ptr ? *(AVClass **) ptr : NULL;
if(level>av_log_level) if (level > av_log_level)
return; return;
line[0]=0; line[0] = 0;
#undef fprintf #undef fprintf
if(print_prefix && avc) { if (print_prefix && avc) {
if (avc->parent_log_context_offset) { if (avc->parent_log_context_offset) {
AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset); AVClass** parent = *(AVClass ***) (((uint8_t *) ptr) +
if(parent && *parent){ avc->parent_log_context_offset);
snprintf(line, sizeof(line), "[%s @ %p] ", (*parent)->item_name(parent), parent); if (parent && *parent) {
snprintf(line, sizeof(line), "[%s @ %p] ",
(*parent)->item_name(parent), parent);
} }
} }
snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr); snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ",
avc->item_name(ptr), ptr);
} }
vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl); vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl);
print_prefix = strlen(line) && line[strlen(line)-1] == '\n'; print_prefix = strlen(line) && line[strlen(line) - 1] == '\n';
#if HAVE_ISATTY #if HAVE_ISATTY
if(!is_atty) is_atty= isatty(2) ? 1 : -1; if (!is_atty)
is_atty = isatty(2) ? 1 : -1;
#endif #endif
if(print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){ if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){
count++; count++;
if(is_atty==1) if (is_atty == 1)
fprintf(stderr, " Last message repeated %d times\r", count); fprintf(stderr, " Last message repeated %d times\r", count);
return; return;
} }
if(count>0){ if (count > 0) {
fprintf(stderr, " Last message repeated %d times\n", count); fprintf(stderr, " Last message repeated %d times\n", count);
count=0; count = 0;
} }
strcpy(prev, line); strcpy(prev, line);
sanitize(line); sanitize(line);
colored_fputs(av_clip(level>>3, 0, 6), line); colored_fputs(av_clip(level >> 3, 0, 6), line);
} }
static void (*av_log_callback)(void*, int, const char*, va_list) = av_log_default_callback; static void (*av_log_callback)(void*, int, const char*, va_list) =
av_log_default_callback;
void av_log(void* avcl, int level, const char *fmt, ...) void av_log(void* avcl, int level, const char *fmt, ...)
{ {
AVClass* avc= avcl ? *(AVClass**)avcl : NULL; AVClass* avc = avcl ? *(AVClass **) avcl : NULL;
va_list vl; va_list vl;
va_start(vl, fmt); va_start(vl, fmt);
if(avc && avc->version >= (50<<16 | 15<<8 | 2) && avc->log_level_offset_offset && level>=AV_LOG_FATAL) if (avc && avc->version >= (50 << 16 | 15 << 8 | 2) &&
level += *(int*)(((uint8_t*)avcl) + avc->log_level_offset_offset); avc->log_level_offset_offset && level >= AV_LOG_FATAL)
level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset);
av_vlog(avcl, level, fmt, vl); av_vlog(avcl, level, fmt, vl);
va_end(vl); va_end(vl);
} }
@ -162,7 +173,7 @@ void av_log_set_level(int level)
void av_log_set_flags(int arg) void av_log_set_flags(int arg)
{ {
flags= arg; flags = arg;
} }
void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)) void av_log_set_callback(void (*callback)(void*, int, const char*, va_list))

File diff suppressed because it is too large Load Diff

View File

@ -40,24 +40,24 @@ static int read_random(uint32_t *dst, const char *file)
static uint32_t get_generic_seed(void) static uint32_t get_generic_seed(void)
{ {
clock_t last_t=0; clock_t last_t = 0;
int bits=0; int bits = 0;
uint64_t random=0; uint64_t random = 0;
unsigned i; unsigned i;
float s=0.000000000001; float s = 0.000000000001;
for(i=0;bits<64;i++){ for (i = 0; bits < 64; i++) {
clock_t t= clock(); clock_t t = clock();
if(last_t && fabs(t-last_t)>s || t==(clock_t)-1){ if (last_t && fabs(t - last_t) > s || t == (clock_t) -1) {
if(i<10000 && s<(1<<24)){ if (i < 10000 && s < (1 << 24)) {
s+=s; s += s;
i=t=0; i = t = 0;
}else{ } else {
random= 2*random + (i&1); random = 2 * random + (i & 1);
bits++; bits++;
} }
} }
last_t= t; last_t = t;
} }
#ifdef AV_READ_TIME #ifdef AV_READ_TIME
random ^= AV_READ_TIME(); random ^= AV_READ_TIME();
@ -65,7 +65,7 @@ static uint32_t get_generic_seed(void)
random ^= clock(); random ^= clock();
#endif #endif
random += random>>32; random += random >> 32;
return random; return random;
} }

View File

@ -33,75 +33,86 @@
#include "mathematics.h" #include "mathematics.h"
#include "rational.h" #include "rational.h"
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max){ int av_reduce(int *dst_num, int *dst_den,
AVRational a0={0,1}, a1={1,0}; int64_t num, int64_t den, int64_t max)
int sign= (num<0) ^ (den<0); {
int64_t gcd= av_gcd(FFABS(num), FFABS(den)); AVRational a0 = { 0, 1 }, a1 = { 1, 0 };
int sign = (num < 0) ^ (den < 0);
int64_t gcd = av_gcd(FFABS(num), FFABS(den));
if(gcd){ if (gcd) {
num = FFABS(num)/gcd; num = FFABS(num) / gcd;
den = FFABS(den)/gcd; den = FFABS(den) / gcd;
} }
if(num<=max && den<=max){ if (num <= max && den <= max) {
a1= (AVRational){num, den}; a1 = (AVRational) { num, den };
den=0; den = 0;
} }
while(den){ while (den) {
uint64_t x = num / den; uint64_t x = num / den;
int64_t next_den= num - den*x; int64_t next_den = num - den * x;
int64_t a2n= x*a1.num + a0.num; int64_t a2n = x * a1.num + a0.num;
int64_t a2d= x*a1.den + a0.den; int64_t a2d = x * a1.den + a0.den;
if(a2n > max || a2d > max){ if (a2n > max || a2d > max) {
if(a1.num) x= (max - a0.num) / a1.num; if (a1.num) x = (max - a0.num) / a1.num;
if(a1.den) x= FFMIN(x, (max - a0.den) / a1.den); if (a1.den) x = FFMIN(x, (max - a0.den) / a1.den);
if (den*(2*x*a1.den + a0.den) > num*a1.den) if (den * (2 * x * a1.den + a0.den) > num * a1.den)
a1 = (AVRational){x*a1.num + a0.num, x*a1.den + a0.den}; a1 = (AVRational) { x * a1.num + a0.num, x * a1.den + a0.den };
break; break;
} }
a0= a1; a0 = a1;
a1= (AVRational){a2n, a2d}; a1 = (AVRational) { a2n, a2d };
num= den; num = den;
den= next_den; den = next_den;
} }
av_assert2(av_gcd(a1.num, a1.den) <= 1U); av_assert2(av_gcd(a1.num, a1.den) <= 1U);
*dst_num = sign ? -a1.num : a1.num; *dst_num = sign ? -a1.num : a1.num;
*dst_den = a1.den; *dst_den = a1.den;
return den==0; return den == 0;
} }
AVRational av_mul_q(AVRational b, AVRational c){ AVRational av_mul_q(AVRational b, AVRational c)
av_reduce(&b.num, &b.den, b.num * (int64_t)c.num, b.den * (int64_t)c.den, INT_MAX); {
av_reduce(&b.num, &b.den,
b.num * (int64_t) c.num,
b.den * (int64_t) c.den, INT_MAX);
return b; return b;
} }
AVRational av_div_q(AVRational b, AVRational c){ AVRational av_div_q(AVRational b, AVRational c)
return av_mul_q(b, (AVRational){c.den, c.num}); {
return av_mul_q(b, (AVRational) { c.den, c.num });
} }
AVRational av_add_q(AVRational b, AVRational c){ AVRational av_add_q(AVRational b, AVRational c) {
av_reduce(&b.num, &b.den, b.num * (int64_t)c.den + c.num * (int64_t)b.den, b.den * (int64_t)c.den, INT_MAX); av_reduce(&b.num, &b.den,
b.num * (int64_t) c.den +
c.num * (int64_t) b.den,
b.den * (int64_t) c.den, INT_MAX);
return b; return b;
} }
AVRational av_sub_q(AVRational b, AVRational c){ AVRational av_sub_q(AVRational b, AVRational c)
return av_add_q(b, (AVRational){-c.num, c.den}); {
return av_add_q(b, (AVRational) { -c.num, c.den });
} }
AVRational av_d2q(double d, int max){ AVRational av_d2q(double d, int max)
{
AVRational a; AVRational a;
#define LOG2 0.69314718055994530941723212145817656807550013436025 #define LOG2 0.69314718055994530941723212145817656807550013436025
int exponent; int exponent;
int64_t den; int64_t den;
if (isnan(d)) if (isnan(d))
return (AVRational){0,0}; return (AVRational) { 0,0 };
if (isinf(d)) if (isinf(d))
return (AVRational){ d<0 ? -1:1, 0 }; return (AVRational) { d < 0 ? -1 : 1, 0 };
exponent = FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0); exponent = FFMAX( (int)(log(fabs(d) + 1e-20)/LOG2), 0);
den = 1LL << (61 - exponent); den = 1LL << (61 - exponent);
av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max); av_reduce(&a.num, &a.den, (int64_t)(d * den + 0.5), den, max);
@ -127,7 +138,7 @@ int av_nearer_q(AVRational q, AVRational q1, AVRational q2)
int av_find_nearest_q_idx(AVRational q, const AVRational* q_list) int av_find_nearest_q_idx(AVRational q, const AVRational* q_list)
{ {
int i, nearest_q_idx = 0; int i, nearest_q_idx = 0;
for(i=0; q_list[i].den; i++) for (i = 0; q_list[i].den; i++)
if (av_nearer_q(q, q_list[i], q_list[nearest_q_idx]) > 0) if (av_nearer_q(q, q_list[i], q_list[nearest_q_idx]) > 0)
nearest_q_idx = i; nearest_q_idx = i;
@ -138,16 +149,19 @@ int av_find_nearest_q_idx(AVRational q, const AVRational* q_list)
int main(void) int main(void)
{ {
AVRational a,b; AVRational a,b;
for(a.num=-2; a.num<=2; a.num++){ for (a.num = -2; a.num <= 2; a.num++) {
for(a.den=-2; a.den<=2; a.den++){ for (a.den = -2; a.den <= 2; a.den++) {
for(b.num=-2; b.num<=2; b.num++){ for (b.num = -2; b.num <= 2; b.num++) {
for(b.den=-2; b.den<=2; b.den++){ for (b.den = -2; b.den <= 2; b.den++) {
int c= av_cmp_q(a,b); int c = av_cmp_q(a,b);
double d= av_q2d(a) == av_q2d(b) ? 0 : (av_q2d(a) - av_q2d(b)); double d = av_q2d(a) == av_q2d(b) ?
if(d>0) d=1; 0 : (av_q2d(a) - av_q2d(b));
else if(d<0) d=-1; if (d > 0) d = 1;
else if(d != d) d= INT_MIN; else if (d < 0) d = -1;
if(c!=d) av_log(0, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num, a.den, b.num, b.den, c,d); else if (d != d) d = INT_MIN;
if (c != d)
av_log(0, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num,
a.den, b.num, b.den, c,d);
} }
} }
} }

View File

@ -21,22 +21,24 @@
#include "log.h" #include "log.h"
#include "tree.h" #include "tree.h"
typedef struct AVTreeNode{ typedef struct AVTreeNode {
struct AVTreeNode *child[2]; struct AVTreeNode *child[2];
void *elem; void *elem;
int state; int state;
}AVTreeNode; } AVTreeNode;
const int av_tree_node_size = sizeof(AVTreeNode); const int av_tree_node_size = sizeof(AVTreeNode);
void *av_tree_find(const AVTreeNode *t, void *key, int (*cmp)(void *key, const void *b), void *next[2]){ void *av_tree_find(const AVTreeNode *t, void *key,
if(t){ int (*cmp)(void *key, const void *b), void *next[2])
unsigned int v= cmp(key, t->elem); {
if(v){ if (t) {
if(next) next[v>>31]= t->elem; unsigned int v = cmp(key, t->elem);
return av_tree_find(t->child[(v>>31)^1], key, cmp, next); if (v) {
}else{ if (next) next[v >> 31] = t->elem;
if(next){ return av_tree_find(t->child[(v >> 31) ^ 1], key, cmp, next);
} else {
if (next) {
av_tree_find(t->child[0], key, cmp, next); av_tree_find(t->child[0], key, cmp, next);
av_tree_find(t->child[1], key, cmp, next); av_tree_find(t->child[1], key, cmp, next);
} }
@ -46,41 +48,43 @@ void *av_tree_find(const AVTreeNode *t, void *key, int (*cmp)(void *key, const v
return NULL; return NULL;
} }
void *av_tree_insert(AVTreeNode **tp, void *key, int (*cmp)(void *key, const void *b), AVTreeNode **next){ void *av_tree_insert(AVTreeNode **tp, void *key,
AVTreeNode *t= *tp; int (*cmp)(void *key, const void *b), AVTreeNode **next)
if(t){ {
unsigned int v= cmp(t->elem, key); AVTreeNode *t = *tp;
if (t) {
unsigned int v = cmp(t->elem, key);
void *ret; void *ret;
if(!v){ if (!v) {
if(*next) if (*next)
return t->elem; return t->elem;
else if(t->child[0]||t->child[1]){ else if (t->child[0] || t->child[1]) {
int i= !t->child[0]; int i = !t->child[0];
void *next_elem[2]; void *next_elem[2];
av_tree_find(t->child[i], key, cmp, next_elem); av_tree_find(t->child[i], key, cmp, next_elem);
key= t->elem= next_elem[i]; key = t->elem = next_elem[i];
v= -i; v = -i;
}else{ } else {
*next= t; *next = t;
*tp=NULL; *tp = NULL;
return NULL; return NULL;
} }
} }
ret= av_tree_insert(&t->child[v>>31], key, cmp, next); ret = av_tree_insert(&t->child[v >> 31], key, cmp, next);
if(!ret){ if (!ret) {
int i= (v>>31) ^ !!*next; int i = (v >> 31) ^ !!*next;
AVTreeNode **child= &t->child[i]; AVTreeNode **child = &t->child[i];
t->state += 2*i - 1; t->state += 2 * i - 1;
if(!(t->state&1)){ if (!(t->state & 1)) {
if(t->state){ if (t->state) {
/* The following code is equivalent to /* The following code is equivalent to
if((*child)->state*2 == -t->state) if((*child)->state*2 == -t->state)
rotate(child, i^1); rotate(child, i^1);
rotate(tp, i); rotate(tp, i);
with rotate(): with rotate():
static void rotate(AVTreeNode **tp, int i){ static void rotate(AVTreeNode **tp, int i) {
AVTreeNode *t= *tp; AVTreeNode *t= *tp;
*tp= t->child[i]; *tp= t->child[i];
@ -92,54 +96,62 @@ void *av_tree_insert(AVTreeNode **tp, void *key, int (*cmp)(void *key, const voi
} }
but such a rotate function is both bigger and slower but such a rotate function is both bigger and slower
*/ */
if((*child)->state*2 == -t->state){ if (( *child )->state * 2 == -t->state) {
*tp= (*child)->child[i^1]; *tp = (*child)->child[i ^ 1];
(*child)->child[i^1]= (*tp)->child[i]; (*child)->child[i ^ 1] = (*tp)->child[i];
(*tp)->child[i]= *child; (*tp)->child[i] = *child;
*child= (*tp)->child[i^1]; *child = ( *tp )->child[i ^ 1];
(*tp)->child[i^1]= t; (*tp)->child[i ^ 1] = t;
(*tp)->child[0]->state= -((*tp)->state>0); (*tp)->child[0]->state = -((*tp)->state > 0);
(*tp)->child[1]->state= (*tp)->state<0 ; (*tp)->child[1]->state = (*tp)->state < 0;
(*tp)->state=0; (*tp)->state = 0;
}else{ } else {
*tp= *child; *tp = *child;
*child= (*child)->child[i^1]; *child = (*child)->child[i ^ 1];
(*tp)->child[i^1]= t; (*tp)->child[i ^ 1] = t;
if((*tp)->state) t->state = 0; if ((*tp)->state) t->state = 0;
else t->state>>= 1; else t->state >>= 1;
(*tp)->state= -t->state; (*tp)->state = -t->state;
} }
} }
} }
if(!(*tp)->state ^ !!*next) if (!(*tp)->state ^ !!*next)
return key; return key;
} }
return ret; return ret;
}else{ } else {
*tp= *next; *next= NULL; *tp = *next;
if(*tp){ *next = NULL;
(*tp)->elem= key; if (*tp) {
(*tp)->elem = key;
return NULL; return NULL;
}else } else
return key; return key;
} }
} }
void av_tree_destroy(AVTreeNode *t){ void av_tree_destroy(AVTreeNode *t)
if(t){ {
if (t) {
av_tree_destroy(t->child[0]); av_tree_destroy(t->child[0]);
av_tree_destroy(t->child[1]); av_tree_destroy(t->child[1]);
av_free(t); av_free(t);
} }
} }
void av_tree_enumerate(AVTreeNode *t, void *opaque, int (*cmp)(void *opaque, void *elem), int (*enu)(void *opaque, void *elem)){ void av_tree_enumerate(AVTreeNode *t, void *opaque,
if(t){ int (*cmp)(void *opaque, void *elem),
int v= cmp ? cmp(opaque, t->elem) : 0; int (*enu)(void *opaque, void *elem))
if(v>=0) av_tree_enumerate(t->child[0], opaque, cmp, enu); {
if(v==0) enu(opaque, t->elem); if (t) {
if(v<=0) av_tree_enumerate(t->child[1], opaque, cmp, enu); int v = cmp ? cmp(opaque, t->elem) : 0;
if (v >= 0)
av_tree_enumerate(t->child[0], opaque, cmp, enu);
if (v == 0)
enu(opaque, t->elem);
if (v <= 0)
av_tree_enumerate(t->child[1], opaque, cmp, enu);
} }
} }
@ -147,64 +159,68 @@ void av_tree_enumerate(AVTreeNode *t, void *opaque, int (*cmp)(void *opaque, voi
#include "lfg.h" #include "lfg.h"
static int check(AVTreeNode *t){ static int check(AVTreeNode *t)
if(t){ {
int left= check(t->child[0]); if (t) {
int right= check(t->child[1]); int left = check(t->child[0]);
int right = check(t->child[1]);
if(left>999 || right>999) if (left>999 || right>999)
return 1000; return 1000;
if(right - left != t->state) if (right - left != t->state)
return 1000; return 1000;
if(t->state>1 || t->state<-1) if (t->state>1 || t->state<-1)
return 1000; return 1000;
return FFMAX(left, right)+1; return FFMAX(left, right) + 1;
} }
return 0; return 0;
} }
static void print(AVTreeNode *t, int depth){ static void print(AVTreeNode *t, int depth)
{
int i; int i;
for(i=0; i<depth*4; i++) av_log(NULL, AV_LOG_ERROR, " "); for (i = 0; i < depth * 4; i++) av_log(NULL, AV_LOG_ERROR, " ");
if(t){ if (t) {
av_log(NULL, AV_LOG_ERROR, "Node %p %2d %p\n", t, t->state, t->elem); av_log(NULL, AV_LOG_ERROR, "Node %p %2d %p\n", t, t->state, t->elem);
print(t->child[0], depth+1); print(t->child[0], depth + 1);
print(t->child[1], depth+1); print(t->child[1], depth + 1);
}else } else
av_log(NULL, AV_LOG_ERROR, "NULL\n"); av_log(NULL, AV_LOG_ERROR, "NULL\n");
} }
static int cmp(void *a, const void *b){ static int cmp(void *a, const void *b)
return (uint8_t*)a-(const uint8_t*)b; {
return (uint8_t *) a - (const uint8_t *) b;
} }
int main(void){ int main (void)
{
int i; int i;
void *k; void *k;
AVTreeNode *root= NULL, *node=NULL; AVTreeNode *root = NULL, *node = NULL;
AVLFG prng; AVLFG prng;
av_lfg_init(&prng, 1); av_lfg_init(&prng, 1);
for(i=0; i<10000; i++){ for (i = 0; i < 10000; i++) {
int j = av_lfg_get(&prng) % 86294; int j = av_lfg_get(&prng) % 86294;
if(check(root) > 999){ if (check(root) > 999) {
av_log(NULL, AV_LOG_ERROR, "FATAL error %d\n", i); av_log(NULL, AV_LOG_ERROR, "FATAL error %d\n", i);
print(root, 0); print(root, 0);
return -1; return -1;
} }
av_log(NULL, AV_LOG_ERROR, "inserting %4d\n", j); av_log(NULL, AV_LOG_ERROR, "inserting %4d\n", j);
if(!node) if (!node)
node= av_mallocz(av_tree_node_size); node = av_mallocz(av_tree_node_size);
av_tree_insert(&root, (void*)(j+1), cmp, &node); av_tree_insert(&root, (void *) (j + 1), cmp, &node);
j = av_lfg_get(&prng) % 86294; j = av_lfg_get(&prng) % 86294;
{ {
AVTreeNode *node2=NULL; AVTreeNode *node2 = NULL;
av_log(NULL, AV_LOG_ERROR, "removing %4d\n", j); av_log(NULL, AV_LOG_ERROR, "removing %4d\n", j);
av_tree_insert(&root, (void*)(j+1), cmp, &node2); av_tree_insert(&root, (void *) (j + 1), cmp, &node2);
k= av_tree_find(root, (void*)(j+1), cmp, NULL); k = av_tree_find(root, (void *) (j + 1), cmp, NULL);
if(k) if (k)
av_log(NULL, AV_LOG_ERROR, "removal failure %d\n", i); av_log(NULL, AV_LOG_ERROR, "removal failure %d\n", i);
} }
} }

View File

@ -38,6 +38,10 @@ FATE_AAC += fate-aac-ap05_48
fate-aac-ap05_48: CMD = pcm -i $(SAMPLES)/aac/ap05_48.mp4 fate-aac-ap05_48: CMD = pcm -i $(SAMPLES)/aac/ap05_48.mp4
fate-aac-ap05_48: REF = $(SAMPLES)/aac/ap05_48.s16 fate-aac-ap05_48: REF = $(SAMPLES)/aac/ap05_48.s16
FATE_AAC += fate-aac-latm_stereo_to_51
fate-aac-latm_stereo_to_51: CMD = pcm -i $(SAMPLES)/aac/latm_stereo_to_51.ts -ac 6
fate-aac-latm_stereo_to_51: REF = $(SAMPLES)/aac/latm_stereo_to_51.s16
fate-aac-ct%: CMD = pcm -i $(SAMPLES)/aac/CT_DecoderCheck/$(@:fate-aac-ct-%=%) fate-aac-ct%: CMD = pcm -i $(SAMPLES)/aac/CT_DecoderCheck/$(@:fate-aac-ct-%=%)
fate-aac-ct%: REF = $(SAMPLES)/aac/CT_DecoderCheck/aacPlusv2.wav fate-aac-ct%: REF = $(SAMPLES)/aac/CT_DecoderCheck/aacPlusv2.wav