mirror of
https://gitee.com/openharmony/third_party_ffmpeg
synced 2024-11-23 11:19:55 +00:00
add FF_ prefix to all (frame)_TYPE usage
Originally committed as revision 12399 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
88855b51cd
commit
9701840bb5
@ -565,7 +565,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
||||
|
||||
#ifdef HAVE_XVMC
|
||||
//prevent dsp.sad() check, that requires access to the image
|
||||
if(s->avctx->xvmc_acceleration && s->pict_type==I_TYPE) return 1;
|
||||
if(s->avctx->xvmc_acceleration && s->pict_type==FF_I_TYPE) return 1;
|
||||
#endif
|
||||
|
||||
skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
|
||||
@ -584,7 +584,7 @@ static int is_intra_more_likely(MpegEncContext *s){
|
||||
j++;
|
||||
if((j%skip_amount) != 0) continue; //skip a few to speed things up
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
|
||||
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
|
||||
|
||||
@ -893,7 +893,7 @@ void ff_er_frame_end(MpegEncContext *s){
|
||||
}
|
||||
|
||||
/* guess MVs */
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
for(mb_y=0; mb_y<s->mb_height; mb_y++){
|
||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||
int xy= mb_x*2 + mb_y*2*s->b8_stride;
|
||||
@ -1031,7 +1031,7 @@ ec_clean:
|
||||
const int mb_xy= s->mb_index2xy[i];
|
||||
int error= s->error_status_table[mb_xy];
|
||||
|
||||
if(s->pict_type!=B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
|
||||
if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
|
||||
s->mbskip_table[mb_xy]=0;
|
||||
}
|
||||
s->mbintra_table[mb_xy]=1;
|
||||
|
@ -493,9 +493,9 @@ static int h261_decode_picture_header(H261Context *h){
|
||||
skip_bits(&s->gb, 8);
|
||||
}
|
||||
|
||||
// h261 has no I-FRAMES, but if we pass I_TYPE for the first frame, the codec crashes if it does
|
||||
// h261 has no I-FRAMES, but if we pass FF_I_TYPE for the first frame, the codec crashes if it does
|
||||
// not contain all I-blocks (e.g. when a packet is lost)
|
||||
s->pict_type = P_TYPE;
|
||||
s->pict_type = FF_P_TYPE;
|
||||
|
||||
h->gob_number = 0;
|
||||
return 0;
|
||||
@ -593,12 +593,12 @@ retry:
|
||||
|
||||
// for hurry_up==5
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == I_TYPE;
|
||||
s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
|
||||
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
|
||||
|
@ -204,7 +204,7 @@ void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, 16, s->width);
|
||||
put_bits(&s->pb, 16, s->height);
|
||||
}
|
||||
put_bits(&s->pb, 2, s->pict_type == P_TYPE); /* PictureType */
|
||||
put_bits(&s->pb, 2, s->pict_type == FF_P_TYPE); /* PictureType */
|
||||
put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */
|
||||
put_bits(&s->pb, 5, s->qscale); /* Quantizer */
|
||||
put_bits(&s->pb, 1, 0); /* ExtraInformation */
|
||||
@ -261,7 +261,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
if (!s->h263_plus) {
|
||||
/* H.263v1 */
|
||||
put_bits(&s->pb, 3, format);
|
||||
put_bits(&s->pb, 1, (s->pict_type == P_TYPE));
|
||||
put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE));
|
||||
/* By now UMV IS DISABLED ON H.263v1, since the restrictions
|
||||
of H.263v1 UMV implies to check the predicted MV after
|
||||
calculation of the current MB to see if we're on the limits */
|
||||
@ -297,7 +297,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
|
||||
put_bits(&s->pb,3,0); /* Reserved */
|
||||
|
||||
put_bits(&s->pb, 3, s->pict_type == P_TYPE);
|
||||
put_bits(&s->pb, 3, s->pict_type == FF_P_TYPE);
|
||||
|
||||
put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
|
||||
put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
|
||||
@ -376,12 +376,12 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
|
||||
put_bits(&s->pb, 1, 1);
|
||||
put_bits(&s->pb, 5, s->qscale); /* GQUANT */
|
||||
put_bits(&s->pb, 1, 1);
|
||||
put_bits(&s->pb, 2, s->pict_type == I_TYPE); /* GFID */
|
||||
put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
|
||||
}else{
|
||||
int gob_number= mb_line / s->gob_index;
|
||||
|
||||
put_bits(&s->pb, 5, gob_number); /* GN */
|
||||
put_bits(&s->pb, 2, s->pict_type == I_TYPE); /* GFID */
|
||||
put_bits(&s->pb, 2, s->pict_type == FF_I_TYPE); /* GFID */
|
||||
put_bits(&s->pb, 5, s->qscale); /* GQUANT */
|
||||
}
|
||||
}
|
||||
@ -556,7 +556,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
|
||||
|
||||
ff_clean_h263_qscales(s);
|
||||
|
||||
if(s->pict_type== B_TYPE){
|
||||
if(s->pict_type== FF_B_TYPE){
|
||||
int odd=0;
|
||||
/* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */
|
||||
|
||||
@ -900,8 +900,8 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
{
|
||||
int cbpc, cbpy, pred_x, pred_y;
|
||||
PutBitContext * const pb2 = s->data_partitioning ? &s->pb2 : &s->pb;
|
||||
PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=B_TYPE ? &s->tex_pb : &s->pb;
|
||||
PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=I_TYPE ? &s->pb2 : &s->pb;
|
||||
PutBitContext * const tex_pb = s->data_partitioning && s->pict_type!=FF_B_TYPE ? &s->tex_pb : &s->pb;
|
||||
PutBitContext * const dc_pb = s->data_partitioning && s->pict_type!=FF_I_TYPE ? &s->pb2 : &s->pb;
|
||||
const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0;
|
||||
const int dquant_code[5]= {1,0,9,2,3};
|
||||
|
||||
@ -909,7 +909,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
if (!s->mb_intra) {
|
||||
int i, cbp;
|
||||
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
static const int mb_type_table[8]= {-1, 3, 2, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
|
||||
int mb_type= mb_type_table[s->mv_dir];
|
||||
|
||||
@ -1042,7 +1042,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
s->p_tex_bits+= get_bits_diff(s);
|
||||
}
|
||||
|
||||
}else{ /* s->pict_type==B_TYPE */
|
||||
}else{ /* s->pict_type==FF_B_TYPE */
|
||||
cbp= get_p_cbp(s, block, motion_x, motion_y);
|
||||
|
||||
if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
|
||||
@ -1067,7 +1067,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
int diff;
|
||||
Picture *pic= s->reordered_input_picture[i+1];
|
||||
|
||||
if(pic==NULL || pic->pict_type!=B_TYPE) break;
|
||||
if(pic==NULL || pic->pict_type!=FF_B_TYPE) break;
|
||||
|
||||
b_pic= pic->data[0] + offset;
|
||||
if(pic->type != FF_BUFFER_TYPE_SHARED)
|
||||
@ -1219,7 +1219,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
cbpc = cbp & 3;
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
if(s->dquant) cbpc+=4;
|
||||
put_bits(&s->pb,
|
||||
intra_MCBPC_bits[cbpc],
|
||||
@ -1413,7 +1413,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
}
|
||||
|
||||
cbpc = cbp & 3;
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
if(s->dquant) cbpc+=4;
|
||||
put_bits(&s->pb,
|
||||
intra_MCBPC_bits[cbpc],
|
||||
@ -1471,7 +1471,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
uint8_t *dest_cb= s->dest[1];
|
||||
uint8_t *dest_cr= s->dest[2];
|
||||
|
||||
// if(s->pict_type==B_TYPE && !s->readable) return;
|
||||
// if(s->pict_type==FF_B_TYPE && !s->readable) return;
|
||||
|
||||
/*
|
||||
Diag Top
|
||||
@ -2268,7 +2268,7 @@ void ff_mpeg4_stuffing(PutBitContext * pbc)
|
||||
|
||||
/* must be called before writing the header */
|
||||
void ff_set_mpeg4_time(MpegEncContext * s){
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
ff_mpeg4_init_direct_mv(s);
|
||||
}else{
|
||||
s->last_time_base= s->time_base;
|
||||
@ -2458,7 +2458,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
int time_incr;
|
||||
int time_div, time_mod;
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
if(!(s->flags&CODEC_FLAG_GLOBAL_HEADER)){
|
||||
if(s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) //HACK, the reference sw is buggy
|
||||
mpeg4_encode_visual_object_header(s);
|
||||
@ -2469,7 +2469,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
mpeg4_encode_gop_header(s);
|
||||
}
|
||||
|
||||
s->partitioned_frame= s->data_partitioning && s->pict_type!=B_TYPE;
|
||||
s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
|
||||
|
||||
//printf("num:%d rate:%d base:%d\n", s->picture_number, s->time_base.den, FRAME_RATE_BASE);
|
||||
|
||||
@ -2491,8 +2491,8 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */
|
||||
put_bits(&s->pb, 1, 1); /* marker */
|
||||
put_bits(&s->pb, 1, 1); /* vop coded */
|
||||
if ( s->pict_type == P_TYPE
|
||||
|| (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
|
||||
if ( s->pict_type == FF_P_TYPE
|
||||
|| (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE)) {
|
||||
put_bits(&s->pb, 1, s->no_rounding); /* rounding type */
|
||||
}
|
||||
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
|
||||
@ -2504,9 +2504,9 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
|
||||
put_bits(&s->pb, 5, s->qscale);
|
||||
|
||||
if (s->pict_type != I_TYPE)
|
||||
if (s->pict_type != FF_I_TYPE)
|
||||
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
|
||||
if (s->pict_type == B_TYPE)
|
||||
if (s->pict_type == FF_B_TYPE)
|
||||
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
|
||||
// printf("****frame %d\n", picture_number);
|
||||
}
|
||||
@ -3070,7 +3070,7 @@ void ff_mpeg4_merge_partitions(MpegEncContext *s)
|
||||
const int tex_pb_len= put_bits_count(&s->tex_pb);
|
||||
const int bits= put_bits_count(&s->pb);
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
put_bits(&s->pb, 19, DC_MARKER);
|
||||
s->misc_bits+=19 + pb2_len + bits - s->last_bits;
|
||||
s->i_tex_bits+= tex_pb_len;
|
||||
@ -3094,12 +3094,12 @@ void ff_mpeg4_merge_partitions(MpegEncContext *s)
|
||||
|
||||
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
|
||||
switch(s->pict_type){
|
||||
case I_TYPE:
|
||||
case FF_I_TYPE:
|
||||
return 16;
|
||||
case P_TYPE:
|
||||
case S_TYPE:
|
||||
case FF_P_TYPE:
|
||||
case FF_S_TYPE:
|
||||
return s->f_code+15;
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
return FFMAX(FFMAX(s->f_code, s->b_code)+15, 17);
|
||||
default:
|
||||
return -1;
|
||||
@ -3135,7 +3135,7 @@ static inline int mpeg4_is_resync(MpegEncContext *s){
|
||||
}
|
||||
|
||||
while(v<=0xFF){
|
||||
if(s->pict_type==B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
|
||||
if(s->pict_type==FF_B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
|
||||
break;
|
||||
skip_bits(&s->gb, 8+s->pict_type);
|
||||
bits_count+= 8+s->pict_type;
|
||||
@ -3200,7 +3200,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num);
|
||||
return -1;
|
||||
}
|
||||
if(s->pict_type == B_TYPE){
|
||||
if(s->pict_type == FF_B_TYPE){
|
||||
while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) mb_num++;
|
||||
if(mb_num >= s->mb_num) return -1; // slice contains just skipped MBs which where allready decoded
|
||||
}
|
||||
@ -3234,20 +3234,20 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
||||
if(s->shape != BIN_ONLY_SHAPE){
|
||||
skip_bits(&s->gb, 3); /* intra dc vlc threshold */
|
||||
//FIXME don't just ignore everything
|
||||
if(s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
if(s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
mpeg4_decode_sprite_trajectory(s, &s->gb);
|
||||
av_log(s->avctx, AV_LOG_ERROR, "untested\n");
|
||||
}
|
||||
|
||||
//FIXME reduced res stuff here
|
||||
|
||||
if (s->pict_type != I_TYPE) {
|
||||
if (s->pict_type != FF_I_TYPE) {
|
||||
int f_code = get_bits(&s->gb, 3); /* fcode_for */
|
||||
if(f_code==0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n");
|
||||
}
|
||||
}
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
int b_code = get_bits(&s->gb, 3);
|
||||
if(b_code==0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n");
|
||||
@ -3406,7 +3406,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){
|
||||
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
|
||||
s->first_slice_line=0;
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
int i;
|
||||
|
||||
do{
|
||||
@ -3455,7 +3455,7 @@ try_again:
|
||||
skip_bits1(&s->gb);
|
||||
if(bits&0x10000){
|
||||
/* skip mb */
|
||||
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0;
|
||||
mx= get_amv(s, 0);
|
||||
my= get_amv(s, 1);
|
||||
@ -3496,7 +3496,7 @@ try_again:
|
||||
if(s->mbintra_table[xy])
|
||||
ff_clean_intra_table_entries(s);
|
||||
|
||||
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
|
||||
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
|
||||
s->mcsel= get_bits1(&s->gb);
|
||||
else s->mcsel= 0;
|
||||
|
||||
@ -3568,7 +3568,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
||||
if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1)
|
||||
s->first_slice_line=0;
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
int ac_pred= get_bits1(&s->gb);
|
||||
int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
|
||||
if(cbpy<0){
|
||||
@ -3642,8 +3642,8 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){
|
||||
int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
||||
{
|
||||
int mb_num;
|
||||
const int part_a_error= s->pict_type==I_TYPE ? (DC_ERROR|MV_ERROR) : MV_ERROR;
|
||||
const int part_a_end = s->pict_type==I_TYPE ? (DC_END |MV_END) : MV_END;
|
||||
const int part_a_error= s->pict_type==FF_I_TYPE ? (DC_ERROR|MV_ERROR) : MV_ERROR;
|
||||
const int part_a_end = s->pict_type==FF_I_TYPE ? (DC_END |MV_END) : MV_END;
|
||||
|
||||
mb_num= mpeg4_decode_partition_a(s);
|
||||
if(mb_num<0){
|
||||
@ -3659,7 +3659,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
||||
|
||||
s->mb_num_left= mb_num;
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
while(show_bits(&s->gb, 9) == 1)
|
||||
skip_bits(&s->gb, 9);
|
||||
if(get_bits_long(&s->gb, 19)!=DC_MARKER){
|
||||
@ -3677,11 +3677,11 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s)
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, part_a_end);
|
||||
|
||||
if( mpeg4_decode_partition_b(s, mb_num) < 0){
|
||||
if(s->pict_type==P_TYPE)
|
||||
if(s->pict_type==FF_P_TYPE)
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, DC_ERROR);
|
||||
return -1;
|
||||
}else{
|
||||
if(s->pict_type==P_TYPE)
|
||||
if(s->pict_type==FF_P_TYPE)
|
||||
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, DC_END);
|
||||
}
|
||||
|
||||
@ -3706,7 +3706,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
ff_set_qscale(s, s->current_picture.qscale_table[xy] );
|
||||
}
|
||||
|
||||
if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
|
||||
int i;
|
||||
for(i=0; i<4; i++){
|
||||
s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0];
|
||||
@ -3720,7 +3720,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
s->block_last_index[i] = -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
s->mcsel=1;
|
||||
s->mb_skipped = 0;
|
||||
}else{
|
||||
@ -3792,7 +3792,7 @@ static void preview_obmc(MpegEncContext *s){
|
||||
s->block_index[i]+= 1;
|
||||
s->mb_x++;
|
||||
|
||||
assert(s->pict_type == P_TYPE);
|
||||
assert(s->pict_type == FF_P_TYPE);
|
||||
|
||||
do{
|
||||
if (get_bits1(&s->gb)) {
|
||||
@ -3892,7 +3892,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
|
||||
assert(!s->h263_pred);
|
||||
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
do{
|
||||
if (get_bits1(&s->gb)) {
|
||||
/* skip mb */
|
||||
@ -3992,10 +3992,10 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(s->obmc){
|
||||
if(s->pict_type == P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
|
||||
if(s->pict_type == FF_P_TYPE && s->mb_x+1<s->mb_width && s->mb_num_left != 1)
|
||||
preview_obmc(s);
|
||||
}
|
||||
} else if(s->pict_type==B_TYPE) {
|
||||
} else if(s->pict_type==FF_B_TYPE) {
|
||||
int mb_type;
|
||||
const int stride= s->b8_stride;
|
||||
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
|
||||
@ -4159,7 +4159,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
|
||||
|
||||
assert(s->h263_pred);
|
||||
|
||||
if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE || s->pict_type==FF_S_TYPE) {
|
||||
do{
|
||||
if (get_bits1(&s->gb)) {
|
||||
/* skip mb */
|
||||
@ -4168,7 +4168,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->block_last_index[i] = -1;
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE){
|
||||
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0;
|
||||
s->mcsel=1;
|
||||
s->mv[0][0][0]= get_amv(s, 0);
|
||||
@ -4197,7 +4197,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
|
||||
s->mb_intra = ((cbpc & 4) != 0);
|
||||
if (s->mb_intra) goto intra;
|
||||
|
||||
if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
|
||||
if(s->pict_type==FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0)
|
||||
s->mcsel= get_bits1(&s->gb);
|
||||
else s->mcsel= 0;
|
||||
cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F;
|
||||
@ -4276,7 +4276,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s,
|
||||
mot_val[1] = my;
|
||||
}
|
||||
}
|
||||
} else if(s->pict_type==B_TYPE) {
|
||||
} else if(s->pict_type==FF_B_TYPE) {
|
||||
int modb1; // first bit of modb
|
||||
int modb2; // second bit of modb
|
||||
int mb_type;
|
||||
@ -4470,7 +4470,7 @@ end:
|
||||
if(s->codec_id==CODEC_ID_MPEG4){
|
||||
if(mpeg4_is_resync(s)){
|
||||
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
|
||||
if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[xy + delta])
|
||||
if(s->pict_type==FF_B_TYPE && s->next_picture.mbskip_table[xy + delta])
|
||||
return SLICE_OK;
|
||||
return SLICE_END;
|
||||
}
|
||||
@ -4564,7 +4564,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
/* DC coef */
|
||||
if(s->codec_id == CODEC_ID_RV10){
|
||||
#ifdef CONFIG_RV10_DECODER
|
||||
if (s->rv10_version == 3 && s->pict_type == I_TYPE) {
|
||||
if (s->rv10_version == 3 && s->pict_type == FF_I_TYPE) {
|
||||
int component, diff;
|
||||
component = (n <= 3 ? 0 : n - 4 + 1);
|
||||
level = s->last_dc[component];
|
||||
@ -5033,7 +5033,7 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
if (!width)
|
||||
return -1;
|
||||
|
||||
s->pict_type = I_TYPE + get_bits1(&s->gb);
|
||||
s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
|
||||
|
||||
s->h263_long_vectors = get_bits1(&s->gb);
|
||||
|
||||
@ -5100,10 +5100,10 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
/* MPPTYPE */
|
||||
s->pict_type = get_bits(&s->gb, 3);
|
||||
switch(s->pict_type){
|
||||
case 0: s->pict_type= I_TYPE;break;
|
||||
case 1: s->pict_type= P_TYPE;break;
|
||||
case 3: s->pict_type= B_TYPE;break;
|
||||
case 7: s->pict_type= I_TYPE;break; //ZYGO
|
||||
case 0: s->pict_type= FF_I_TYPE;break;
|
||||
case 1: s->pict_type= FF_P_TYPE;break;
|
||||
case 3: s->pict_type= FF_B_TYPE;break;
|
||||
case 7: s->pict_type= FF_I_TYPE;break; //ZYGO
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
@ -5223,7 +5223,7 @@ int h263_decode_picture_header(MpegEncContext *s)
|
||||
show_pict_info(s);
|
||||
}
|
||||
#if 1
|
||||
if (s->pict_type == I_TYPE && s->codec_tag == ff_get_fourcc("ZYGO")){
|
||||
if (s->pict_type == FF_I_TYPE && s->codec_tag == ff_get_fourcc("ZYGO")){
|
||||
int i,j;
|
||||
for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "\n");
|
||||
@ -5784,13 +5784,13 @@ static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
|
||||
static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
int time_incr, time_increment;
|
||||
|
||||
s->pict_type = get_bits(gb, 2) + I_TYPE; /* pict type: I = 0 , P = 1 */
|
||||
if(s->pict_type==B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
|
||||
s->pict_type = get_bits(gb, 2) + FF_I_TYPE; /* pict type: I = 0 , P = 1 */
|
||||
if(s->pict_type==FF_B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n");
|
||||
s->low_delay=0;
|
||||
}
|
||||
|
||||
s->partitioned_frame= s->data_partitioning && s->pict_type!=B_TYPE;
|
||||
s->partitioned_frame= s->data_partitioning && s->pict_type!=FF_B_TYPE;
|
||||
if(s->partitioned_frame)
|
||||
s->decode_mb= mpeg4_decode_partitioned_mb;
|
||||
else
|
||||
@ -5817,7 +5817,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
|
||||
// printf("%d %X\n", s->time_increment_bits, time_increment);
|
||||
//av_log(s->avctx, AV_LOG_DEBUG, " type:%d modulo_time_base:%d increment:%d t_frame %d\n", s->pict_type, time_incr, time_increment, s->t_frame);
|
||||
if(s->pict_type!=B_TYPE){
|
||||
if(s->pict_type!=FF_B_TYPE){
|
||||
s->last_time_base= s->time_base;
|
||||
s->time_base+= time_incr;
|
||||
s->time= s->time_base*s->avctx->time_base.den + time_increment;
|
||||
@ -5869,8 +5869,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
}
|
||||
//printf("time %d %d %d || %"PRId64" %"PRId64" %"PRId64"\n", s->time_increment_bits, s->avctx->time_base.den, s->time_base,
|
||||
//s->time, s->last_non_b_time, s->last_non_b_time - s->pp_time);
|
||||
if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == P_TYPE
|
||||
|| (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) {
|
||||
if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == FF_P_TYPE
|
||||
|| (s->pict_type == FF_S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) {
|
||||
/* rounding type for motion estimation */
|
||||
s->no_rounding = get_bits1(gb);
|
||||
} else {
|
||||
@ -5879,7 +5879,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
//FIXME reduced res stuff
|
||||
|
||||
if (s->shape != RECT_SHAPE) {
|
||||
if (s->vol_sprite_usage != 1 || s->pict_type != I_TYPE) {
|
||||
if (s->vol_sprite_usage != 1 || s->pict_type != FF_I_TYPE) {
|
||||
int width, height, hor_spat_ref, ver_spat_ref;
|
||||
|
||||
width = get_bits(gb, 13);
|
||||
@ -5919,7 +5919,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
|
||||
}
|
||||
|
||||
if(s->pict_type == S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
|
||||
if(s->pict_type == FF_S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
|
||||
mpeg4_decode_sprite_trajectory(s, gb);
|
||||
if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n");
|
||||
if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
|
||||
@ -5932,7 +5932,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
return -1; // makes no sense to continue, as there is nothing left from the image then
|
||||
}
|
||||
|
||||
if (s->pict_type != I_TYPE) {
|
||||
if (s->pict_type != FF_I_TYPE) {
|
||||
s->f_code = get_bits(gb, 3); /* fcode_for */
|
||||
if(s->f_code==0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n");
|
||||
@ -5941,7 +5941,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
}else
|
||||
s->f_code=1;
|
||||
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
s->b_code = get_bits(gb, 3);
|
||||
}else
|
||||
s->b_code=1;
|
||||
@ -5949,14 +5949,14 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d\n",
|
||||
s->qscale, s->f_code, s->b_code,
|
||||
s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")),
|
||||
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
|
||||
gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first,
|
||||
s->quarter_sample ? "q" : "h", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points,
|
||||
s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? " VOLC" : " ", s->intra_dc_threshold);
|
||||
}
|
||||
|
||||
if(!s->scalability){
|
||||
if (s->shape!=RECT_SHAPE && s->pict_type!=I_TYPE) {
|
||||
if (s->shape!=RECT_SHAPE && s->pict_type!=FF_I_TYPE) {
|
||||
skip_bits1(gb); // vop shape coding type
|
||||
}
|
||||
}else{
|
||||
@ -6106,7 +6106,7 @@ int intel_h263_decode_picture_header(MpegEncContext *s)
|
||||
}
|
||||
s->h263_plus = 0;
|
||||
|
||||
s->pict_type = I_TYPE + get_bits1(&s->gb);
|
||||
s->pict_type = FF_I_TYPE + get_bits1(&s->gb);
|
||||
|
||||
s->unrestricted_mv = get_bits1(&s->gb);
|
||||
s->h263_long_vectors = s->unrestricted_mv;
|
||||
@ -6197,10 +6197,10 @@ int flv_h263_decode_picture_header(MpegEncContext *s)
|
||||
s->width = width;
|
||||
s->height = height;
|
||||
|
||||
s->pict_type = I_TYPE + get_bits(&s->gb, 2);
|
||||
s->dropable= s->pict_type > P_TYPE;
|
||||
s->pict_type = FF_I_TYPE + get_bits(&s->gb, 2);
|
||||
s->dropable= s->pict_type > FF_P_TYPE;
|
||||
if (s->dropable)
|
||||
s->pict_type = P_TYPE;
|
||||
s->pict_type = FF_P_TYPE;
|
||||
|
||||
skip_bits1(&s->gb); /* deblocking flag */
|
||||
s->chroma_qscale= s->qscale = get_bits(&s->gb, 5);
|
||||
|
@ -208,7 +208,7 @@ static int decode_slice(MpegEncContext *s){
|
||||
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
|
||||
ret= s->decode_mb(s, s->block);
|
||||
|
||||
if (s->pict_type!=B_TYPE)
|
||||
if (s->pict_type!=FF_B_TYPE)
|
||||
ff_h263_update_motion_val(s);
|
||||
|
||||
if(ret<0){
|
||||
@ -291,7 +291,7 @@ static int decode_slice(MpegEncContext *s){
|
||||
int max_extra=7;
|
||||
|
||||
/* no markers in M$ crap */
|
||||
if(s->msmpeg4_version && s->pict_type==I_TYPE)
|
||||
if(s->msmpeg4_version && s->pict_type==FF_I_TYPE)
|
||||
max_extra+= 17;
|
||||
|
||||
/* buggy padding but the frame should still end approximately at the bitstream end */
|
||||
@ -581,30 +581,30 @@ retry:
|
||||
|
||||
// for hurry_up==5
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == I_TYPE;
|
||||
s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)) return get_consumed_bytes(s, buf_size);
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size);
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return get_consumed_bytes(s, buf_size);
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
/* skip everything if we are in a hurry>=5 */
|
||||
if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==B_TYPE)
|
||||
if(s->pict_type==FF_B_TYPE)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
else
|
||||
s->next_p_frame_damaged=0;
|
||||
}
|
||||
|
||||
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==B_TYPE){
|
||||
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==FF_B_TYPE){
|
||||
s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
|
||||
s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
|
||||
}else if((!s->no_rounding) || s->pict_type==B_TYPE){
|
||||
}else if((!s->no_rounding) || s->pict_type==FF_B_TYPE){
|
||||
s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
|
||||
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
|
||||
}else{
|
||||
@ -649,7 +649,7 @@ retry:
|
||||
decode_slice(s);
|
||||
}
|
||||
|
||||
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE)
|
||||
if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==FF_I_TYPE)
|
||||
if(!ENABLE_MSMPEG4_DECODER || msmpeg4_decode_ext_header(s, buf_size) < 0){
|
||||
s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR;
|
||||
}
|
||||
@ -690,7 +690,7 @@ intrax8_decoded:
|
||||
|
||||
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
|
||||
assert(s->current_picture.pict_type == s->pict_type);
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict= *(AVFrame*)s->last_picture_ptr;
|
||||
|
@ -477,7 +477,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
|
||||
*(uint32_t*)h->mvd_cache [list][scan8[4 ]]=
|
||||
*(uint32_t*)h->mvd_cache [list][scan8[12]]= 0;
|
||||
|
||||
if(h->slice_type == B_TYPE){
|
||||
if(h->slice_type == FF_B_TYPE){
|
||||
fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1);
|
||||
|
||||
if(IS_DIRECT(top_type)){
|
||||
@ -918,16 +918,16 @@ static inline void direct_ref_list_init(H264Context * const h){
|
||||
Picture * const ref1 = &h->ref_list[1][0];
|
||||
Picture * const cur = s->current_picture_ptr;
|
||||
int list, i, j;
|
||||
if(cur->pict_type == I_TYPE)
|
||||
if(cur->pict_type == FF_I_TYPE)
|
||||
cur->ref_count[0] = 0;
|
||||
if(cur->pict_type != B_TYPE)
|
||||
if(cur->pict_type != FF_B_TYPE)
|
||||
cur->ref_count[1] = 0;
|
||||
for(list=0; list<2; list++){
|
||||
cur->ref_count[list] = h->ref_count[list];
|
||||
for(j=0; j<h->ref_count[list]; j++)
|
||||
cur->ref_poc[list][j] = h->ref_list[list][j].poc;
|
||||
}
|
||||
if(cur->pict_type != B_TYPE || h->direct_spatial_mv_pred)
|
||||
if(cur->pict_type != FF_B_TYPE || h->direct_spatial_mv_pred)
|
||||
return;
|
||||
for(list=0; list<2; list++){
|
||||
for(i=0; i<ref1->ref_count[list]; i++){
|
||||
@ -1358,7 +1358,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){
|
||||
}
|
||||
}
|
||||
|
||||
if(h->slice_type == B_TYPE && h->pps.cabac){
|
||||
if(h->slice_type == FF_B_TYPE && h->pps.cabac){
|
||||
if(IS_8X8(mb_type)){
|
||||
uint8_t *direct_table = &h->direct_table[b8_xy];
|
||||
direct_table[1+0*h->b8_stride] = IS_DIRECT(h->sub_mb_type[1]) ? 1 : 0;
|
||||
@ -2857,7 +2857,7 @@ static int fill_default_ref_list(H264Context *h){
|
||||
frame_list[1] = h->default_ref_list[1];
|
||||
}
|
||||
|
||||
if(h->slice_type==B_TYPE){
|
||||
if(h->slice_type==FF_B_TYPE){
|
||||
int list;
|
||||
int len[2];
|
||||
int short_len[2];
|
||||
@ -2979,7 +2979,7 @@ static int fill_default_ref_list(H264Context *h){
|
||||
for (i=0; i<h->ref_count[0]; i++) {
|
||||
tprintf(h->s.avctx, "List0: %s fn:%d 0x%p\n", (h->default_ref_list[0][i].long_ref ? "LT" : "ST"), h->default_ref_list[0][i].pic_id, h->default_ref_list[0][i].data[0]);
|
||||
}
|
||||
if(h->slice_type==B_TYPE){
|
||||
if(h->slice_type==FF_B_TYPE){
|
||||
for (i=0; i<h->ref_count[1]; i++) {
|
||||
tprintf(h->s.avctx, "List1: %s fn:%d 0x%p\n", (h->default_ref_list[1][i].long_ref ? "LT" : "ST"), h->default_ref_list[1][i].pic_id, h->default_ref_list[1][i].data[0]);
|
||||
}
|
||||
@ -3021,7 +3021,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){
|
||||
|
||||
print_short_term(h);
|
||||
print_long_term(h);
|
||||
if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move before func
|
||||
if(h->slice_type==FF_I_TYPE || h->slice_type==FF_SI_TYPE) return 0; //FIXME move before func
|
||||
|
||||
for(list=0; list<h->list_count; list++){
|
||||
memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]);
|
||||
@ -3122,7 +3122,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){
|
||||
}
|
||||
}
|
||||
|
||||
if(h->slice_type==B_TYPE && !h->direct_spatial_mv_pred)
|
||||
if(h->slice_type==FF_B_TYPE && !h->direct_spatial_mv_pred)
|
||||
direct_dist_scale_factor(h);
|
||||
direct_ref_list_init(h);
|
||||
return 0;
|
||||
@ -3205,7 +3205,7 @@ static int pred_weight_table(H264Context *h){
|
||||
}
|
||||
}
|
||||
}
|
||||
if(h->slice_type != B_TYPE) break;
|
||||
if(h->slice_type != FF_B_TYPE) break;
|
||||
}
|
||||
h->use_weight= h->use_weight || h->use_weight_chroma;
|
||||
return 0;
|
||||
@ -3864,7 +3864,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
unsigned int first_mb_in_slice;
|
||||
unsigned int pps_id;
|
||||
int num_ref_idx_active_override_flag;
|
||||
static const uint8_t slice_type_map[5]= {P_TYPE, B_TYPE, I_TYPE, SP_TYPE, SI_TYPE};
|
||||
static const uint8_t slice_type_map[5]= {FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE};
|
||||
unsigned int slice_type, tmp, i;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure;
|
||||
@ -3899,14 +3899,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->slice_type_fixed=0;
|
||||
|
||||
slice_type= slice_type_map[ slice_type ];
|
||||
if (slice_type == I_TYPE
|
||||
if (slice_type == FF_I_TYPE
|
||||
|| (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) {
|
||||
default_ref_list_done = 1;
|
||||
}
|
||||
h->slice_type= slice_type;
|
||||
|
||||
s->pict_type= h->slice_type; // to make a few old func happy, it's wrong though
|
||||
if (s->pict_type == B_TYPE && s0->last_picture_ptr == NULL) {
|
||||
if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"B picture before any references, skipping\n");
|
||||
return -1;
|
||||
@ -4109,15 +4109,15 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->ref_count[0]= h->pps.ref_count[0];
|
||||
h->ref_count[1]= h->pps.ref_count[1];
|
||||
|
||||
if(h->slice_type == P_TYPE || h->slice_type == SP_TYPE || h->slice_type == B_TYPE){
|
||||
if(h->slice_type == B_TYPE){
|
||||
if(h->slice_type == FF_P_TYPE || h->slice_type == FF_SP_TYPE || h->slice_type == FF_B_TYPE){
|
||||
if(h->slice_type == FF_B_TYPE){
|
||||
h->direct_spatial_mv_pred= get_bits1(&s->gb);
|
||||
}
|
||||
num_ref_idx_active_override_flag= get_bits1(&s->gb);
|
||||
|
||||
if(num_ref_idx_active_override_flag){
|
||||
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
|
||||
if(h->slice_type==B_TYPE)
|
||||
if(h->slice_type==FF_B_TYPE)
|
||||
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
|
||||
|
||||
if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){
|
||||
@ -4126,7 +4126,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if(h->slice_type == B_TYPE)
|
||||
if(h->slice_type == FF_B_TYPE)
|
||||
h->list_count= 2;
|
||||
else
|
||||
h->list_count= 1;
|
||||
@ -4140,10 +4140,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
if(decode_ref_pic_list_reordering(h) < 0)
|
||||
return -1;
|
||||
|
||||
if( (h->pps.weighted_pred && (h->slice_type == P_TYPE || h->slice_type == SP_TYPE ))
|
||||
|| (h->pps.weighted_bipred_idc==1 && h->slice_type==B_TYPE ) )
|
||||
if( (h->pps.weighted_pred && (h->slice_type == FF_P_TYPE || h->slice_type == FF_SP_TYPE ))
|
||||
|| (h->pps.weighted_bipred_idc==1 && h->slice_type==FF_B_TYPE ) )
|
||||
pred_weight_table(h);
|
||||
else if(h->pps.weighted_bipred_idc==2 && h->slice_type==B_TYPE)
|
||||
else if(h->pps.weighted_bipred_idc==2 && h->slice_type==FF_B_TYPE)
|
||||
implicit_weight_table(h);
|
||||
else
|
||||
h->use_weight = 0;
|
||||
@ -4154,7 +4154,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
if(FRAME_MBAFF)
|
||||
fill_mbaff_ref_list(h);
|
||||
|
||||
if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE && h->pps.cabac ){
|
||||
if( h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE && h->pps.cabac ){
|
||||
tmp = get_ue_golomb(&s->gb);
|
||||
if(tmp > 2){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
|
||||
@ -4173,10 +4173,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale);
|
||||
h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale);
|
||||
//FIXME qscale / qp ... stuff
|
||||
if(h->slice_type == SP_TYPE){
|
||||
if(h->slice_type == FF_SP_TYPE){
|
||||
get_bits1(&s->gb); /* sp_for_switch_flag */
|
||||
}
|
||||
if(h->slice_type==SP_TYPE || h->slice_type == SI_TYPE){
|
||||
if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){
|
||||
get_se_golomb(&s->gb); /* slice_qs_delta */
|
||||
}
|
||||
|
||||
@ -4200,8 +4200,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
|
||||
if( s->avctx->skip_loop_filter >= AVDISCARD_ALL
|
||||
||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type != I_TYPE)
|
||||
||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type == B_TYPE)
|
||||
||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type != FF_I_TYPE)
|
||||
||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type == FF_B_TYPE)
|
||||
||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0))
|
||||
h->deblocking_filter= 0;
|
||||
|
||||
@ -4462,7 +4462,7 @@ static void decode_mb_skip(H264Context *h){
|
||||
if(MB_FIELD)
|
||||
mb_type|= MB_TYPE_INTERLACED;
|
||||
|
||||
if( h->slice_type == B_TYPE )
|
||||
if( h->slice_type == FF_B_TYPE )
|
||||
{
|
||||
// just for fill_caches. pred_direct_motion will set the real mb_type
|
||||
mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
|
||||
@ -4505,7 +4505,7 @@ static int decode_mb_cavlc(H264Context *h){
|
||||
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
|
||||
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
|
||||
down the code */
|
||||
if(h->slice_type != I_TYPE && h->slice_type != SI_TYPE){
|
||||
if(h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE){
|
||||
if(s->mb_skip_run==-1)
|
||||
s->mb_skip_run= get_ue_golomb(&s->gb);
|
||||
|
||||
@ -4529,7 +4529,7 @@ static int decode_mb_cavlc(H264Context *h){
|
||||
h->prev_mb_skipped= 0;
|
||||
|
||||
mb_type= get_ue_golomb(&s->gb);
|
||||
if(h->slice_type == B_TYPE){
|
||||
if(h->slice_type == FF_B_TYPE){
|
||||
if(mb_type < 23){
|
||||
partition_count= b_mb_type_info[mb_type].partition_count;
|
||||
mb_type= b_mb_type_info[mb_type].type;
|
||||
@ -4537,7 +4537,7 @@ static int decode_mb_cavlc(H264Context *h){
|
||||
mb_type -= 23;
|
||||
goto decode_intra_mb;
|
||||
}
|
||||
}else if(h->slice_type == P_TYPE /*|| h->slice_type == SP_TYPE */){
|
||||
}else if(h->slice_type == FF_P_TYPE /*|| h->slice_type == FF_SP_TYPE */){
|
||||
if(mb_type < 5){
|
||||
partition_count= p_mb_type_info[mb_type].partition_count;
|
||||
mb_type= p_mb_type_info[mb_type].type;
|
||||
@ -4546,7 +4546,7 @@ static int decode_mb_cavlc(H264Context *h){
|
||||
goto decode_intra_mb;
|
||||
}
|
||||
}else{
|
||||
assert(h->slice_type == I_TYPE);
|
||||
assert(h->slice_type == FF_I_TYPE);
|
||||
decode_intra_mb:
|
||||
if(mb_type > 25){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
|
||||
@ -4652,7 +4652,7 @@ decode_intra_mb:
|
||||
}else if(partition_count==4){
|
||||
int i, j, sub_partition_count[4], list, ref[2][4];
|
||||
|
||||
if(h->slice_type == B_TYPE){
|
||||
if(h->slice_type == FF_B_TYPE){
|
||||
for(i=0; i<4; i++){
|
||||
h->sub_mb_type[i]= get_ue_golomb(&s->gb);
|
||||
if(h->sub_mb_type[i] >=13){
|
||||
@ -4671,7 +4671,7 @@ decode_intra_mb:
|
||||
h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
|
||||
}
|
||||
}else{
|
||||
assert(h->slice_type == P_TYPE || h->slice_type == SP_TYPE); //FIXME SP correct ?
|
||||
assert(h->slice_type == FF_P_TYPE || h->slice_type == FF_SP_TYPE); //FIXME SP correct ?
|
||||
for(i=0; i<4; i++){
|
||||
h->sub_mb_type[i]= get_ue_golomb(&s->gb);
|
||||
if(h->sub_mb_type[i] >=4){
|
||||
@ -5042,9 +5042,9 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl
|
||||
static int decode_cabac_mb_type( H264Context *h ) {
|
||||
MpegEncContext * const s = &h->s;
|
||||
|
||||
if( h->slice_type == I_TYPE ) {
|
||||
if( h->slice_type == FF_I_TYPE ) {
|
||||
return decode_cabac_intra_mb_type(h, 3, 1);
|
||||
} else if( h->slice_type == P_TYPE ) {
|
||||
} else if( h->slice_type == FF_P_TYPE ) {
|
||||
if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
|
||||
/* P-type */
|
||||
if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
|
||||
@ -5057,7 +5057,7 @@ static int decode_cabac_mb_type( H264Context *h ) {
|
||||
} else {
|
||||
return decode_cabac_intra_mb_type(h, 17, 0) + 5;
|
||||
}
|
||||
} else if( h->slice_type == B_TYPE ) {
|
||||
} else if( h->slice_type == FF_B_TYPE ) {
|
||||
const int mba_xy = h->left_mb_xy[0];
|
||||
const int mbb_xy = h->top_mb_xy;
|
||||
int ctx = 0;
|
||||
@ -5127,7 +5127,7 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
|
||||
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
|
||||
ctx++;
|
||||
|
||||
if( h->slice_type == B_TYPE )
|
||||
if( h->slice_type == FF_B_TYPE )
|
||||
ctx += 13;
|
||||
return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
|
||||
}
|
||||
@ -5264,7 +5264,7 @@ static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
|
||||
int ref = 0;
|
||||
int ctx = 0;
|
||||
|
||||
if( h->slice_type == B_TYPE) {
|
||||
if( h->slice_type == FF_B_TYPE) {
|
||||
if( refa > 0 && !h->direct_cache[scan8[n] - 1] )
|
||||
ctx++;
|
||||
if( refb > 0 && !h->direct_cache[scan8[n] - 8] )
|
||||
@ -5582,7 +5582,7 @@ static int decode_mb_cabac(H264Context *h) {
|
||||
s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong?)
|
||||
|
||||
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
|
||||
if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE ) {
|
||||
if( h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE ) {
|
||||
int skip;
|
||||
/* a skipped mb needs the aff flag from the following mb */
|
||||
if( FRAME_MBAFF && s->mb_x==0 && (s->mb_y&1)==0 )
|
||||
@ -5627,7 +5627,7 @@ static int decode_mb_cabac(H264Context *h) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if( h->slice_type == B_TYPE ) {
|
||||
if( h->slice_type == FF_B_TYPE ) {
|
||||
if( mb_type < 23 ){
|
||||
partition_count= b_mb_type_info[mb_type].partition_count;
|
||||
mb_type= b_mb_type_info[mb_type].type;
|
||||
@ -5635,7 +5635,7 @@ static int decode_mb_cabac(H264Context *h) {
|
||||
mb_type -= 23;
|
||||
goto decode_intra_mb;
|
||||
}
|
||||
} else if( h->slice_type == P_TYPE ) {
|
||||
} else if( h->slice_type == FF_P_TYPE ) {
|
||||
if( mb_type < 5) {
|
||||
partition_count= p_mb_type_info[mb_type].partition_count;
|
||||
mb_type= p_mb_type_info[mb_type].type;
|
||||
@ -5644,7 +5644,7 @@ static int decode_mb_cabac(H264Context *h) {
|
||||
goto decode_intra_mb;
|
||||
}
|
||||
} else {
|
||||
assert(h->slice_type == I_TYPE);
|
||||
assert(h->slice_type == FF_I_TYPE);
|
||||
decode_intra_mb:
|
||||
partition_count = 0;
|
||||
cbp= i_mb_type_info[mb_type].cbp;
|
||||
@ -5747,7 +5747,7 @@ decode_intra_mb:
|
||||
} else if( partition_count == 4 ) {
|
||||
int i, j, sub_partition_count[4], list, ref[2][4];
|
||||
|
||||
if( h->slice_type == B_TYPE ) {
|
||||
if( h->slice_type == FF_B_TYPE ) {
|
||||
for( i = 0; i < 4; i++ ) {
|
||||
h->sub_mb_type[i] = decode_cabac_b_mb_sub_type( h );
|
||||
sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count;
|
||||
@ -6445,7 +6445,7 @@ static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y,
|
||||
int step = IS_8x8DCT(mb_type) ? 2 : 1;
|
||||
edges = (mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
|
||||
s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
|
||||
(h->slice_type == B_TYPE), edges, step, mask_edge0, mask_edge1 );
|
||||
(h->slice_type == FF_B_TYPE), edges, step, mask_edge0, mask_edge1 );
|
||||
}
|
||||
if( IS_INTRA(s->current_picture.mb_type[mb_xy-1]) )
|
||||
bSv[0][0] = 0x0004000400040004ULL;
|
||||
@ -6676,7 +6676,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
|
||||
int b_idx= 8 + 4 + edge * (dir ? 8:1);
|
||||
int bn_idx= b_idx - (dir ? 8:1);
|
||||
int v = 0;
|
||||
for( l = 0; !v && l < 1 + (h->slice_type == B_TYPE); l++ ) {
|
||||
for( l = 0; !v && l < 1 + (h->slice_type == FF_B_TYPE); l++ ) {
|
||||
v |= ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
|
||||
FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
|
||||
FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit;
|
||||
@ -6700,7 +6700,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
|
||||
else if(!mv_done)
|
||||
{
|
||||
bS[i] = 0;
|
||||
for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
|
||||
for( l = 0; l < 1 + (h->slice_type == FF_B_TYPE); l++ ) {
|
||||
if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
|
||||
FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
|
||||
FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit ) {
|
||||
@ -6763,7 +6763,7 @@ static int decode_slice(struct AVCodecContext *avctx, H264Context *h){
|
||||
/* calculate pre-state */
|
||||
for( i= 0; i < 460; i++ ) {
|
||||
int pre;
|
||||
if( h->slice_type == I_TYPE )
|
||||
if( h->slice_type == FF_I_TYPE )
|
||||
pre = av_clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 );
|
||||
else
|
||||
pre = av_clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 );
|
||||
@ -7524,8 +7524,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
s->current_picture_ptr->key_frame|= (hx->nal_unit_type == NAL_IDR_SLICE);
|
||||
if(hx->redundant_pic_count==0 && hx->s.hurry_up < 5
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type!=B_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type==I_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type!=FF_B_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type==FF_I_TYPE)
|
||||
&& avctx->skip_frame < AVDISCARD_ALL)
|
||||
context_count++;
|
||||
break;
|
||||
@ -7549,8 +7549,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
&& s->context_initialized
|
||||
&& s->hurry_up < 5
|
||||
&& (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type!=B_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type==I_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_BIDIR || hx->slice_type!=FF_B_TYPE)
|
||||
&& (avctx->skip_frame < AVDISCARD_NONKEY || hx->slice_type==FF_I_TYPE)
|
||||
&& avctx->skip_frame < AVDISCARD_ALL)
|
||||
context_count++;
|
||||
break;
|
||||
@ -7814,7 +7814,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
else if((out_of_order && pics-1 == s->avctx->has_b_frames && pics < 15)
|
||||
|| (s->low_delay &&
|
||||
((!cross_idr && prev && out->poc > prev->poc + 2)
|
||||
|| cur->pict_type == B_TYPE)))
|
||||
|| cur->pict_type == FF_B_TYPE)))
|
||||
{
|
||||
s->low_delay = 0;
|
||||
s->avctx->has_b_frames++;
|
||||
|
@ -75,7 +75,7 @@ static const AVRational pixel_aspect[17]={
|
||||
};
|
||||
|
||||
static const uint8_t golomb_to_pict_type[5]=
|
||||
{P_TYPE, B_TYPE, I_TYPE, SP_TYPE, SI_TYPE};
|
||||
{FF_P_TYPE, FF_B_TYPE, FF_I_TYPE, FF_SP_TYPE, FF_SI_TYPE};
|
||||
|
||||
static const uint8_t pict_type_to_golomb[7]=
|
||||
{-1, 2, 0, 1, -1, 4, 3};
|
||||
|
@ -134,7 +134,7 @@ float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run){
|
||||
if(!dry_run)
|
||||
s->rc_context.dry_run_qscale= 0;
|
||||
|
||||
if(s->pict_type == B_TYPE) //FIXME this is not exactly identical to xvid
|
||||
if(s->pict_type == FF_B_TYPE) //FIXME this is not exactly identical to xvid
|
||||
return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset;
|
||||
else
|
||||
return xvid_plg_data.quant * FF_QP2LAMBDA;
|
||||
|
@ -173,7 +173,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
p->pict_type= I_TYPE;
|
||||
p->pict_type= FF_I_TYPE;
|
||||
p->key_frame= 1;
|
||||
|
||||
a->bitstream_buffer= av_fast_realloc(a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
@ -2026,7 +2026,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
|
||||
continue;
|
||||
|
||||
for(j=0; j<fcode && j<8; j++){
|
||||
if(s->pict_type==B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
|
||||
if(s->pict_type==FF_B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy])
|
||||
score[j]-= 170;
|
||||
}
|
||||
}
|
||||
@ -2058,7 +2058,7 @@ void ff_fix_long_p_mvs(MpegEncContext * s)
|
||||
MotionEstContext * const c= &s->me;
|
||||
const int f_code= s->f_code;
|
||||
int y, range;
|
||||
assert(s->pict_type==P_TYPE);
|
||||
assert(s->pict_type==FF_P_TYPE);
|
||||
|
||||
range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code);
|
||||
|
||||
|
@ -1035,7 +1035,7 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
|
||||
score_map[0]= dmin;
|
||||
|
||||
//FIXME precalc first term below?
|
||||
if((s->pict_type == B_TYPE && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0)
|
||||
if((s->pict_type == FF_B_TYPE && !(c->flags & FLAG_DIRECT)) || s->flags&CODEC_FLAG_MV0)
|
||||
dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor;
|
||||
|
||||
/* first line */
|
||||
|
@ -221,7 +221,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
||||
assert(s->mb_skipped==0);
|
||||
|
||||
if (s->mb_skip_run-- != 0) {
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
s->mb_skipped = 1;
|
||||
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
|
||||
} else {
|
||||
@ -247,7 +247,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
||||
|
||||
switch(s->pict_type) {
|
||||
default:
|
||||
case I_TYPE:
|
||||
case FF_I_TYPE:
|
||||
if (get_bits1(&s->gb) == 0) {
|
||||
if (get_bits1(&s->gb) == 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
|
||||
@ -258,7 +258,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
||||
mb_type = MB_TYPE_INTRA;
|
||||
}
|
||||
break;
|
||||
case P_TYPE:
|
||||
case FF_P_TYPE:
|
||||
mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
|
||||
if (mb_type < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
|
||||
@ -266,7 +266,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
mb_type = ptype2mb_type[ mb_type ];
|
||||
break;
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
|
||||
if (mb_type < 0){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
|
||||
@ -1365,7 +1365,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
|
||||
return -1;
|
||||
|
||||
vbv_delay= get_bits(&s->gb, 16);
|
||||
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
|
||||
s->full_pel[0] = get_bits1(&s->gb);
|
||||
f_code = get_bits(&s->gb, 3);
|
||||
if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT)
|
||||
@ -1373,7 +1373,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
|
||||
s->mpeg_f_code[0][0] = f_code;
|
||||
s->mpeg_f_code[0][1] = f_code;
|
||||
}
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
s->full_pel[1] = get_bits1(&s->gb);
|
||||
f_code = get_bits(&s->gb, 3);
|
||||
if (f_code == 0 && avctx->error_resilience >= FF_ER_COMPLIANT)
|
||||
@ -1382,7 +1382,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
|
||||
s->mpeg_f_code[1][1] = f_code;
|
||||
}
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == I_TYPE;
|
||||
s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
|
||||
|
||||
if(avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
|
||||
@ -1731,7 +1731,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
||||
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
|
||||
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
|
||||
s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")),
|
||||
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
|
||||
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
|
||||
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
|
||||
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
|
||||
@ -1757,7 +1757,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
||||
|
||||
for(i=0; i<2; i++){
|
||||
for(dir=0; dir<2; dir++){
|
||||
if (s->mb_intra || (dir==1 && s->pict_type != B_TYPE)) {
|
||||
if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) {
|
||||
motion_x = motion_y = 0;
|
||||
}else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
|
||||
motion_x = s->mv[dir][0][0];
|
||||
@ -1795,7 +1795,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
||||
|
||||
if(s->mb_y<<field_pic >= s->mb_height){
|
||||
int left= s->gb.size_in_bits - get_bits_count(&s->gb);
|
||||
int is_d10= s->chroma_format==2 && s->pict_type==I_TYPE && avctx->profile==0 && avctx->level==5
|
||||
int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5
|
||||
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
|
||||
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
|
||||
|
||||
@ -1838,7 +1838,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
||||
}
|
||||
if(s->mb_skip_run){
|
||||
int i;
|
||||
if(s->pict_type == I_TYPE){
|
||||
if(s->pict_type == FF_I_TYPE){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
@ -1851,7 +1851,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
|
||||
s->mv_type = MV_TYPE_16X16;
|
||||
else
|
||||
s->mv_type = MV_TYPE_FIELD;
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
/* if P type, zero motion vector is implied */
|
||||
s->mv_dir = MV_DIR_FORWARD;
|
||||
s->mv[0][0][0] = s->mv[0][0][1] = 0;
|
||||
@ -1935,7 +1935,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
|
||||
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
ff_print_debug_info(s, pict);
|
||||
} else {
|
||||
@ -2288,7 +2288,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
||||
start_code = -1;
|
||||
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
|
||||
if (start_code > 0x1ff){
|
||||
if(s2->pict_type != B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
|
||||
if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
|
||||
if(avctx->thread_count > 1){
|
||||
int i;
|
||||
|
||||
@ -2342,16 +2342,16 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(s2->last_picture_ptr==NULL){
|
||||
/* Skip B-frames if we do not have reference frames. */
|
||||
if(s2->pict_type==B_TYPE) break;
|
||||
if(s2->pict_type==FF_B_TYPE) break;
|
||||
}
|
||||
if(s2->next_picture_ptr==NULL){
|
||||
/* Skip P-frames if we do not have reference frame no valid header. */
|
||||
if(s2->pict_type==P_TYPE && (s2->first_field || s2->picture_structure==PICT_FRAME)) break;
|
||||
if(s2->pict_type==FF_P_TYPE && (s2->first_field || s2->picture_structure==PICT_FRAME)) break;
|
||||
}
|
||||
/* Skip B-frames if we are in a hurry. */
|
||||
if(avctx->hurry_up && s2->pict_type==B_TYPE) break;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=I_TYPE)
|
||||
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
break;
|
||||
/* Skip everything if we are in a hurry>=5. */
|
||||
|
@ -349,7 +349,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
|
||||
|
||||
// RAL: Forward f_code also needed for B frames
|
||||
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
|
||||
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
||||
if(s->codec_id == CODEC_ID_MPEG1VIDEO)
|
||||
put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
|
||||
@ -358,7 +358,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
}
|
||||
|
||||
// RAL: Backward f_code necessary for B frames
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
||||
if(s->codec_id == CODEC_ID_MPEG1VIDEO)
|
||||
put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
|
||||
@ -372,13 +372,13 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
if(s->codec_id == CODEC_ID_MPEG2VIDEO){
|
||||
put_header(s, EXT_START_CODE);
|
||||
put_bits(&s->pb, 4, 8); //pic ext
|
||||
if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
|
||||
put_bits(&s->pb, 4, s->f_code);
|
||||
put_bits(&s->pb, 4, s->f_code);
|
||||
}else{
|
||||
put_bits(&s->pb, 8, 255);
|
||||
}
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
put_bits(&s->pb, 4, s->b_code);
|
||||
put_bits(&s->pb, 4, s->b_code);
|
||||
}else{
|
||||
@ -451,15 +451,15 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
|
||||
|
||||
if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 &&
|
||||
(mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
|
||||
((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) ||
|
||||
(s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
|
||||
((s->pict_type == FF_P_TYPE && (motion_x | motion_y) == 0) ||
|
||||
(s->pict_type == FF_B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
|
||||
((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
|
||||
s->mb_skip_run++;
|
||||
s->qscale -= s->dquant;
|
||||
s->skip_count++;
|
||||
s->misc_bits++;
|
||||
s->last_bits++;
|
||||
if(s->pict_type == P_TYPE){
|
||||
if(s->pict_type == FF_P_TYPE){
|
||||
s->last_mv[0][1][0]= s->last_mv[0][0][0]=
|
||||
s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
|
||||
}
|
||||
@ -471,7 +471,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
|
||||
encode_mb_skip_run(s, s->mb_skip_run);
|
||||
}
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
if(s->dquant && cbp){
|
||||
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
|
||||
put_qscale(s);
|
||||
@ -492,7 +492,7 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
|
||||
s->misc_bits+= get_bits_diff(s);
|
||||
s->i_count++;
|
||||
memset(s->last_mv, 0, sizeof(s->last_mv));
|
||||
} else if (s->pict_type == P_TYPE) {
|
||||
} else if (s->pict_type == FF_P_TYPE) {
|
||||
if(s->mv_type == MV_TYPE_16X16){
|
||||
if (cbp != 0) {
|
||||
if ((motion_x|motion_y) == 0) {
|
||||
|
@ -237,7 +237,7 @@ int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
|
||||
* but it would require an API change. */
|
||||
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
|
||||
s->prev_pict_types[0]= s->pict_type;
|
||||
if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
|
||||
if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
|
||||
pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
|
||||
|
||||
return 0;
|
||||
@ -836,7 +836,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
|
||||
|
||||
/* mark&release old frames */
|
||||
if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
|
||||
if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
|
||||
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
|
||||
avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
|
||||
|
||||
@ -872,7 +872,7 @@ alloc:
|
||||
if (!s->dropable){
|
||||
if (s->codec_id == CODEC_ID_H264)
|
||||
pic->reference = s->picture_structure;
|
||||
else if (s->pict_type != B_TYPE)
|
||||
else if (s->pict_type != FF_B_TYPE)
|
||||
pic->reference = 3;
|
||||
}
|
||||
|
||||
@ -889,11 +889,11 @@ alloc:
|
||||
s->current_picture_ptr->pict_type= s->pict_type;
|
||||
// if(s->flags && CODEC_FLAG_QSCALE)
|
||||
// s->current_picture_ptr->quality= s->new_picture_ptr->quality;
|
||||
s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
|
||||
s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
|
||||
|
||||
copy_picture(&s->current_picture, s->current_picture_ptr);
|
||||
|
||||
if (s->pict_type != B_TYPE) {
|
||||
if (s->pict_type != FF_B_TYPE) {
|
||||
s->last_picture_ptr= s->next_picture_ptr;
|
||||
if(!s->dropable)
|
||||
s->next_picture_ptr= s->current_picture_ptr;
|
||||
@ -907,13 +907,13 @@ alloc:
|
||||
if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
|
||||
if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
|
||||
|
||||
if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){
|
||||
if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable){
|
||||
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
|
||||
assert(s->pict_type != B_TYPE); //these should have been dropped if we don't have a reference
|
||||
assert(s->pict_type != FF_B_TYPE); //these should have been dropped if we don't have a reference
|
||||
goto alloc;
|
||||
}
|
||||
|
||||
assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
|
||||
assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
|
||||
|
||||
if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
|
||||
int i;
|
||||
@ -976,7 +976,7 @@ void MPV_frame_end(MpegEncContext *s)
|
||||
|
||||
s->last_pict_type = s->pict_type;
|
||||
s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
|
||||
if(s->pict_type!=B_TYPE){
|
||||
if(s->pict_type!=FF_B_TYPE){
|
||||
s->last_non_b_pict_type= s->pict_type;
|
||||
}
|
||||
#if 0
|
||||
@ -1602,7 +1602,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][1][0], s->mv[dir][1][1], block_s);
|
||||
} else {
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
|
||||
ref_picture= s->current_picture_ptr->data;
|
||||
}
|
||||
|
||||
@ -1616,7 +1616,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
for(i=0; i<2; i++){
|
||||
uint8_t ** ref2picture;
|
||||
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
|
||||
ref2picture= ref_picture;
|
||||
}else{
|
||||
ref2picture= s->current_picture_ptr->data;
|
||||
@ -1774,14 +1774,14 @@ static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM b
|
||||
else if (s->h263_pred || s->h263_aic)
|
||||
s->mbintra_table[mb_xy]=1;
|
||||
|
||||
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
|
||||
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
|
||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||
int dct_linesize, dct_offset;
|
||||
op_pixels_func (*op_pix)[4];
|
||||
qpel_mc_func (*op_qpix)[16];
|
||||
const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize= s->current_picture.linesize[1];
|
||||
const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
||||
const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
||||
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
|
||||
|
||||
/* avoid copy if macroblock skipped in last frame too */
|
||||
@ -1794,7 +1794,7 @@ static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM b
|
||||
|
||||
if (s->mb_skipped) {
|
||||
s->mb_skipped= 0;
|
||||
assert(s->pict_type!=I_TYPE);
|
||||
assert(s->pict_type!=FF_I_TYPE);
|
||||
|
||||
(*mbskip_ptr) ++; /* indicate that this time we skipped it */
|
||||
if(*mbskip_ptr >99) *mbskip_ptr= 99;
|
||||
@ -1840,7 +1840,7 @@ static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM b
|
||||
}
|
||||
}else{
|
||||
op_qpix= s->me.qpel_put;
|
||||
if ((!s->no_rounding) || s->pict_type==B_TYPE){
|
||||
if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
|
||||
op_pix = s->dsp.put_pixels_tab;
|
||||
}else{
|
||||
op_pix = s->dsp.put_no_rnd_pixels_tab;
|
||||
@ -1859,8 +1859,8 @@ static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM b
|
||||
/* skip dequant / idct if we are really late ;) */
|
||||
if(s->hurry_up>1) goto skip_idct;
|
||||
if(s->avctx->skip_idct){
|
||||
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == B_TYPE)
|
||||
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != I_TYPE)
|
||||
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
|
||||
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
|
||||
|| s->avctx->skip_idct >= AVDISCARD_ALL)
|
||||
goto skip_idct;
|
||||
}
|
||||
@ -1998,14 +1998,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
||||
|
||||
h= FFMIN(h, s->avctx->height - y);
|
||||
|
||||
if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
|
||||
if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
|
||||
src= (AVFrame*)s->current_picture_ptr;
|
||||
else if(s->last_picture_ptr)
|
||||
src= (AVFrame*)s->last_picture_ptr;
|
||||
else
|
||||
return;
|
||||
|
||||
if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
|
||||
if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
|
||||
offset[0]=
|
||||
offset[1]=
|
||||
offset[2]=
|
||||
@ -2041,7 +2041,7 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
|
||||
s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
||||
s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
|
||||
|
||||
if(!(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
|
||||
if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
|
||||
{
|
||||
s->dest[0] += s->mb_y * linesize << mb_size;
|
||||
s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
|
||||
|
@ -61,13 +61,6 @@ enum OutputFormat {
|
||||
#define ME_MAP_SHIFT 3
|
||||
#define ME_MAP_MV_BITS 11
|
||||
|
||||
#define I_TYPE FF_I_TYPE ///< Intra
|
||||
#define P_TYPE FF_P_TYPE ///< Predicted
|
||||
#define B_TYPE FF_B_TYPE ///< Bi-dir predicted
|
||||
#define S_TYPE FF_S_TYPE ///< S(GMC)-VOP MPEG4
|
||||
#define SI_TYPE FF_SI_TYPE ///< Switching Intra
|
||||
#define SP_TYPE FF_SP_TYPE ///< Switching Predicted
|
||||
|
||||
#define MAX_MB_BYTES (30*16*16*3/8 + 120)
|
||||
|
||||
#define INPLACE_OFFSET 16
|
||||
@ -316,7 +309,7 @@ typedef struct MpegEncContext {
|
||||
int *lambda_table;
|
||||
int adaptive_quant; ///< use adaptive quantization
|
||||
int dquant; ///< qscale difference to prev qscale
|
||||
int pict_type; ///< I_TYPE, P_TYPE, B_TYPE, ...
|
||||
int pict_type; ///< FF_I_TYPE, FF_P_TYPE, FF_B_TYPE, ...
|
||||
int last_pict_type; //FIXME removes
|
||||
int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol
|
||||
int dropable;
|
||||
|
@ -608,7 +608,7 @@ static inline void MPV_motion(MpegEncContext *s,
|
||||
|
||||
prefetch_motion(s, ref_picture, dir);
|
||||
|
||||
if(s->obmc && s->pict_type != B_TYPE){
|
||||
if(s->obmc && s->pict_type != FF_B_TYPE){
|
||||
int16_t mv_cache[4][4][2];
|
||||
const int xy= s->mb_x + s->mb_y*s->mb_stride;
|
||||
const int mot_stride= s->b8_stride;
|
||||
@ -770,7 +770,7 @@ static inline void MPV_motion(MpegEncContext *s,
|
||||
s->mv[dir][1][0], s->mv[dir][1][1], 8);
|
||||
}
|
||||
} else {
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
|
||||
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
|
||||
ref_picture= s->current_picture_ptr->data;
|
||||
}
|
||||
|
||||
@ -784,7 +784,7 @@ static inline void MPV_motion(MpegEncContext *s,
|
||||
for(i=0; i<2; i++){
|
||||
uint8_t ** ref2picture;
|
||||
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
|
||||
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
|
||||
ref2picture= ref_picture;
|
||||
}else{
|
||||
ref2picture= s->current_picture_ptr->data;
|
||||
|
@ -939,8 +939,8 @@ static int estimate_best_b_count(MpegEncContext *s){
|
||||
assert(scale>=0 && scale <=3);
|
||||
|
||||
// emms_c();
|
||||
p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
|
||||
b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
|
||||
p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality;
|
||||
b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
|
||||
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
|
||||
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
|
||||
|
||||
@ -995,7 +995,7 @@ static int estimate_best_b_count(MpegEncContext *s){
|
||||
|
||||
c->error[0]= c->error[1]= c->error[2]= 0;
|
||||
|
||||
input[0].pict_type= I_TYPE;
|
||||
input[0].pict_type= FF_I_TYPE;
|
||||
input[0].quality= 1 * FF_QP2LAMBDA;
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
|
||||
// rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
|
||||
@ -1003,7 +1003,7 @@ static int estimate_best_b_count(MpegEncContext *s){
|
||||
for(i=0; i<s->max_b_frames+1; i++){
|
||||
int is_p= i % (j+1) == j || i==s->max_b_frames;
|
||||
|
||||
input[i+1].pict_type= is_p ? P_TYPE : B_TYPE;
|
||||
input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE;
|
||||
input[i+1].quality= is_p ? p_lambda : b_lambda;
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
|
||||
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
|
||||
@ -1045,7 +1045,7 @@ static void select_input_picture(MpegEncContext *s){
|
||||
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
|
||||
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
|
||||
s->reordered_input_picture[0]= s->input_picture[0];
|
||||
s->reordered_input_picture[0]->pict_type= I_TYPE;
|
||||
s->reordered_input_picture[0]->pict_type= FF_I_TYPE;
|
||||
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
|
||||
}else{
|
||||
int b_frames;
|
||||
@ -1080,7 +1080,7 @@ static void select_input_picture(MpegEncContext *s){
|
||||
if(pict_num >= s->rc_context.num_entries)
|
||||
break;
|
||||
if(!s->input_picture[i]){
|
||||
s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE;
|
||||
s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1124,10 +1124,10 @@ static void select_input_picture(MpegEncContext *s){
|
||||
|
||||
for(i= b_frames - 1; i>=0; i--){
|
||||
int type= s->input_picture[i]->pict_type;
|
||||
if(type && type != B_TYPE)
|
||||
if(type && type != FF_B_TYPE)
|
||||
b_frames= i;
|
||||
}
|
||||
if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){
|
||||
if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
|
||||
}
|
||||
|
||||
@ -1137,29 +1137,29 @@ static void select_input_picture(MpegEncContext *s){
|
||||
}else{
|
||||
if(s->flags & CODEC_FLAG_CLOSED_GOP)
|
||||
b_frames=0;
|
||||
s->input_picture[b_frames]->pict_type= I_TYPE;
|
||||
s->input_picture[b_frames]->pict_type= FF_I_TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
|
||||
&& b_frames
|
||||
&& s->input_picture[b_frames]->pict_type== I_TYPE)
|
||||
&& s->input_picture[b_frames]->pict_type== FF_I_TYPE)
|
||||
b_frames--;
|
||||
|
||||
s->reordered_input_picture[0]= s->input_picture[b_frames];
|
||||
if(s->reordered_input_picture[0]->pict_type != I_TYPE)
|
||||
s->reordered_input_picture[0]->pict_type= P_TYPE;
|
||||
if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE)
|
||||
s->reordered_input_picture[0]->pict_type= FF_P_TYPE;
|
||||
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
|
||||
for(i=0; i<b_frames; i++){
|
||||
s->reordered_input_picture[i+1]= s->input_picture[i];
|
||||
s->reordered_input_picture[i+1]->pict_type= B_TYPE;
|
||||
s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE;
|
||||
s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
|
||||
}
|
||||
}
|
||||
}
|
||||
no_output_pic:
|
||||
if(s->reordered_input_picture[0]){
|
||||
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
|
||||
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0;
|
||||
|
||||
copy_picture(&s->new_picture, s->reordered_input_picture[0]);
|
||||
|
||||
@ -1263,11 +1263,11 @@ vbv_retry:
|
||||
s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
|
||||
}
|
||||
s->mb_skipped = 0; //done in MPV_frame_start()
|
||||
if(s->pict_type==P_TYPE){ //done in encode_picture() so we must undo it
|
||||
if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it
|
||||
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
|
||||
s->no_rounding ^= 1;
|
||||
}
|
||||
if(s->pict_type!=B_TYPE){
|
||||
if(s->pict_type!=FF_B_TYPE){
|
||||
s->time_base= s->last_time_base;
|
||||
s->last_non_b_time= s->time - s->pp_time;
|
||||
}
|
||||
@ -1484,7 +1484,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
||||
|
||||
if(s->codec_id==CODEC_ID_MPEG4){
|
||||
if(!s->mb_intra){
|
||||
if(s->pict_type == B_TYPE){
|
||||
if(s->pict_type == FF_B_TYPE){
|
||||
if(s->dquant&1 || s->mv_dir&MV_DIRECT)
|
||||
s->dquant= 0;
|
||||
}
|
||||
@ -1561,7 +1561,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
|
||||
dest_cb = s->dest[1];
|
||||
dest_cr = s->dest[2];
|
||||
|
||||
if ((!s->no_rounding) || s->pict_type==B_TYPE){
|
||||
if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
|
||||
op_pix = s->dsp.put_pixels_tab;
|
||||
op_qpix= s->dsp.put_qpel_pixels_tab;
|
||||
}else{
|
||||
@ -1952,7 +1952,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){
|
||||
s->block_index[3]+=2;
|
||||
|
||||
/* compute motion vector & mb_type and store in context */
|
||||
if(s->pict_type==B_TYPE)
|
||||
if(s->pict_type==FF_B_TYPE)
|
||||
ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
|
||||
else
|
||||
ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
|
||||
@ -2349,7 +2349,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->mv[1][0][0] = best_s.mv[1][0][0];
|
||||
s->mv[1][0][1] = best_s.mv[1][0][1];
|
||||
|
||||
qpi = s->pict_type == B_TYPE ? 2 : 0;
|
||||
qpi = s->pict_type == FF_B_TYPE ? 2 : 0;
|
||||
for(; qpi<4; qpi++){
|
||||
int dquant= dquant_tab[qpi];
|
||||
qp= last_qp + dquant;
|
||||
@ -2451,7 +2451,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->last_bits= put_bits_count(&s->pb);
|
||||
|
||||
if (ENABLE_ANY_H263_ENCODER &&
|
||||
s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
|
||||
s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
|
||||
ff_h263_update_motion_val(s);
|
||||
|
||||
if(next_block==0){ //FIXME 16 vs linesize16
|
||||
@ -2578,7 +2578,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->last_mv_dir = s->mv_dir;
|
||||
|
||||
if (ENABLE_ANY_H263_ENCODER &&
|
||||
s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
|
||||
s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
|
||||
ff_h263_update_motion_val(s);
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
@ -2616,7 +2616,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
}
|
||||
|
||||
//not beautiful here but we must write it before flushing so it has to be here
|
||||
if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
|
||||
if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE)
|
||||
msmpeg4_encode_ext_header(s);
|
||||
|
||||
write_slice_end(s);
|
||||
@ -2712,7 +2712,7 @@ static void set_frame_distances(MpegEncContext * s){
|
||||
assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
|
||||
s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
|
||||
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
|
||||
assert(s->pb_time > 0 && s->pb_time < s->pp_time);
|
||||
}else{
|
||||
@ -2744,10 +2744,10 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
|
||||
// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
if(s->msmpeg4_version >= 3) s->no_rounding=1;
|
||||
else s->no_rounding=0;
|
||||
}else if(s->pict_type!=B_TYPE){
|
||||
}else if(s->pict_type!=FF_B_TYPE){
|
||||
if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
|
||||
s->no_rounding ^= 1;
|
||||
}
|
||||
@ -2757,7 +2757,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
return -1;
|
||||
ff_get_2pass_fcode(s);
|
||||
}else if(!(s->flags & CODEC_FLAG_QSCALE)){
|
||||
if(s->pict_type==B_TYPE)
|
||||
if(s->pict_type==FF_B_TYPE)
|
||||
s->lambda= s->last_lambda_for[s->pict_type];
|
||||
else
|
||||
s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
|
||||
@ -2772,17 +2772,17 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
ff_init_me(s);
|
||||
|
||||
/* Estimate motion for every MB */
|
||||
if(s->pict_type != I_TYPE){
|
||||
if(s->pict_type != FF_I_TYPE){
|
||||
s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
|
||||
s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
|
||||
if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){
|
||||
if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
|
||||
if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){
|
||||
if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){
|
||||
s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
|
||||
}
|
||||
}
|
||||
|
||||
s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
|
||||
}else /* if(s->pict_type == I_TYPE) */{
|
||||
}else /* if(s->pict_type == FF_I_TYPE) */{
|
||||
/* I-Frame */
|
||||
for(i=0; i<s->mb_stride*s->mb_height; i++)
|
||||
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
|
||||
@ -2799,15 +2799,15 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
|
||||
emms_c();
|
||||
|
||||
if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
|
||||
s->pict_type= I_TYPE;
|
||||
if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){
|
||||
s->pict_type= FF_I_TYPE;
|
||||
for(i=0; i<s->mb_stride*s->mb_height; i++)
|
||||
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
|
||||
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
|
||||
}
|
||||
|
||||
if(!s->umvplus){
|
||||
if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
|
||||
if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) {
|
||||
s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
|
||||
|
||||
if(s->flags & CODEC_FLAG_INTERLACED_ME){
|
||||
@ -2829,7 +2829,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
}
|
||||
}
|
||||
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
int a, b;
|
||||
|
||||
a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
|
||||
@ -2863,7 +2863,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
if (estimate_qp(s, 0) < 0)
|
||||
return -1;
|
||||
|
||||
if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
|
||||
if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
|
||||
s->qscale= 3; //reduce clipping problems
|
||||
|
||||
if (s->out_format == FMT_MJPEG) {
|
||||
@ -2881,7 +2881,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
|
||||
|
||||
//FIXME var duplication
|
||||
s->current_picture_ptr->key_frame=
|
||||
s->current_picture.key_frame= s->pict_type == I_TYPE; //FIXME pic_ptr
|
||||
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr
|
||||
s->current_picture_ptr->pict_type=
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
|
||||
|
@ -263,7 +263,7 @@ void ff_find_best_tables(MpegEncContext * s)
|
||||
int intra_luma_count = s->ac_stats[1][0][level][run][last];
|
||||
int intra_chroma_count= s->ac_stats[1][1][level][run][last];
|
||||
|
||||
if(s->pict_type==I_TYPE){
|
||||
if(s->pict_type==FF_I_TYPE){
|
||||
size += intra_luma_count *rl_length[i ][level][run][last];
|
||||
chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
|
||||
}else{
|
||||
@ -288,7 +288,7 @@ void ff_find_best_tables(MpegEncContext * s)
|
||||
// printf("type:%d, best:%d, qp:%d, var:%d, mcvar:%d, size:%d //\n",
|
||||
// s->pict_type, best, s->qscale, s->mb_var_sum, s->mc_mb_var_sum, best_size);
|
||||
|
||||
if(s->pict_type==P_TYPE) chroma_best= best;
|
||||
if(s->pict_type==FF_P_TYPE) chroma_best= best;
|
||||
|
||||
memset(s->ac_stats, 0, sizeof(int)*(MAX_LEVEL+1)*(MAX_RUN+1)*2*2*2);
|
||||
|
||||
@ -297,7 +297,7 @@ void ff_find_best_tables(MpegEncContext * s)
|
||||
|
||||
if(s->pict_type != s->last_non_b_pict_type){
|
||||
s->rl_table_index= 2;
|
||||
if(s->pict_type==I_TYPE)
|
||||
if(s->pict_type==FF_I_TYPE)
|
||||
s->rl_chroma_table_index= 1;
|
||||
else
|
||||
s->rl_chroma_table_index= 2;
|
||||
@ -324,10 +324,10 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
s->use_skip_mb_code = 1; /* only if P frame */
|
||||
s->per_mb_rl_table = 0;
|
||||
if(s->msmpeg4_version==4)
|
||||
s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==P_TYPE);
|
||||
s->inter_intra_pred= (s->width*s->height < 320*240 && s->bit_rate<=II_BITRATE && s->pict_type==FF_P_TYPE);
|
||||
//printf("%d %d %d %d %d\n", s->pict_type, s->bit_rate, s->inter_intra_pred, s->width, s->height);
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
s->slice_height= s->mb_height/1;
|
||||
put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height);
|
||||
|
||||
@ -550,7 +550,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
#endif
|
||||
|
||||
if(s->msmpeg4_version<=2){
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
put_bits(&s->pb,
|
||||
v2_intra_cbpc[cbp&3][1], v2_intra_cbpc[cbp&3][0]);
|
||||
} else {
|
||||
@ -565,7 +565,7 @@ void msmpeg4_encode_mb(MpegEncContext * s,
|
||||
cbpy_tab[cbp>>2][1],
|
||||
cbpy_tab[cbp>>2][0]);
|
||||
}else{
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
put_bits(&s->pb,
|
||||
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
|
||||
} else {
|
||||
@ -1176,15 +1176,15 @@ return -1;
|
||||
}
|
||||
|
||||
s->pict_type = get_bits(&s->gb, 2) + 1;
|
||||
if (s->pict_type != I_TYPE &&
|
||||
s->pict_type != P_TYPE){
|
||||
if (s->pict_type != FF_I_TYPE &&
|
||||
s->pict_type != FF_P_TYPE){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n");
|
||||
return -1;
|
||||
}
|
||||
#if 0
|
||||
{
|
||||
static int had_i=0;
|
||||
if(s->pict_type == I_TYPE) had_i=1;
|
||||
if(s->pict_type == FF_I_TYPE) had_i=1;
|
||||
if(!had_i) return -1;
|
||||
}
|
||||
#endif
|
||||
@ -1194,7 +1194,7 @@ return -1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
code = get_bits(&s->gb, 5);
|
||||
if(s->msmpeg4_version==1){
|
||||
if(code==0 || code>s->mb_height){
|
||||
@ -1428,7 +1428,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
{
|
||||
int cbp, code, i;
|
||||
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
if (s->use_skip_mb_code) {
|
||||
if (get_bits1(&s->gb)) {
|
||||
/* skip mb */
|
||||
@ -1495,7 +1495,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
} else{
|
||||
s->ac_pred = 0;
|
||||
cbp|= get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1)<<2; //FIXME check errors
|
||||
if(s->pict_type==P_TYPE) cbp^=0x3C;
|
||||
if(s->pict_type==FF_P_TYPE) cbp^=0x3C;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1516,7 +1516,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
uint8_t *coded_val;
|
||||
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
|
||||
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
if (s->use_skip_mb_code) {
|
||||
if (get_bits1(&s->gb)) {
|
||||
/* skip mb */
|
||||
|
@ -197,7 +197,7 @@ int av_parser_change(AVCodecParserContext *s,
|
||||
*poutbuf_size= buf_size;
|
||||
if(avctx->extradata){
|
||||
if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER))
|
||||
/*||(s->pict_type != I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
|
||||
/*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/
|
||||
/*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){
|
||||
int size= buf_size + avctx->extradata_size;
|
||||
*poutbuf_size= size;
|
||||
|
@ -144,7 +144,7 @@ int ff_rate_control_init(MpegEncContext *s)
|
||||
/* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */
|
||||
for(i=0; i<rcc->num_entries; i++){
|
||||
RateControlEntry *rce= &rcc->entry[i];
|
||||
rce->pict_type= rce->new_pict_type=P_TYPE;
|
||||
rce->pict_type= rce->new_pict_type=FF_P_TYPE;
|
||||
rce->qscale= rce->new_qscale=FF_QP2LAMBDA * 2;
|
||||
rce->misc_bits= s->mb_num + 10;
|
||||
rce->mb_var_sum= s->mb_num*100;
|
||||
@ -212,9 +212,9 @@ int ff_rate_control_init(MpegEncContext *s)
|
||||
RateControlEntry rce;
|
||||
double q;
|
||||
|
||||
if (i%((s->gop_size+3)/4)==0) rce.pict_type= I_TYPE;
|
||||
else if(i%(s->max_b_frames+1)) rce.pict_type= B_TYPE;
|
||||
else rce.pict_type= P_TYPE;
|
||||
if (i%((s->gop_size+3)/4)==0) rce.pict_type= FF_I_TYPE;
|
||||
else if(i%(s->max_b_frames+1)) rce.pict_type= FF_B_TYPE;
|
||||
else rce.pict_type= FF_P_TYPE;
|
||||
|
||||
rce.new_pict_type= rce.pict_type;
|
||||
rce.mc_mb_var_sum= bits*s->mb_num/100000;
|
||||
@ -224,7 +224,7 @@ int ff_rate_control_init(MpegEncContext *s)
|
||||
rce.b_code = 1;
|
||||
rce.misc_bits= 1;
|
||||
|
||||
if(s->pict_type== I_TYPE){
|
||||
if(s->pict_type== FF_I_TYPE){
|
||||
rce.i_count = s->mb_num;
|
||||
rce.i_tex_bits= bits;
|
||||
rce.p_tex_bits= 0;
|
||||
@ -320,23 +320,23 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
|
||||
rce->p_tex_bits*rce->qscale,
|
||||
(rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale,
|
||||
rce->mv_bits/mb_num,
|
||||
rce->pict_type == B_TYPE ? (rce->f_code + rce->b_code)*0.5 : rce->f_code,
|
||||
rce->pict_type == FF_B_TYPE ? (rce->f_code + rce->b_code)*0.5 : rce->f_code,
|
||||
rce->i_count/mb_num,
|
||||
rce->mc_mb_var_sum/mb_num,
|
||||
rce->mb_var_sum/mb_num,
|
||||
rce->pict_type == I_TYPE,
|
||||
rce->pict_type == P_TYPE,
|
||||
rce->pict_type == B_TYPE,
|
||||
rce->pict_type == FF_I_TYPE,
|
||||
rce->pict_type == FF_P_TYPE,
|
||||
rce->pict_type == FF_B_TYPE,
|
||||
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
|
||||
a->qcompress,
|
||||
/* rcc->last_qscale_for[I_TYPE],
|
||||
rcc->last_qscale_for[P_TYPE],
|
||||
rcc->last_qscale_for[B_TYPE],
|
||||
/* rcc->last_qscale_for[FF_I_TYPE],
|
||||
rcc->last_qscale_for[FF_P_TYPE],
|
||||
rcc->last_qscale_for[FF_B_TYPE],
|
||||
rcc->next_non_b_qscale,*/
|
||||
rcc->i_cplx_sum[I_TYPE] / (double)rcc->frame_count[I_TYPE],
|
||||
rcc->i_cplx_sum[P_TYPE] / (double)rcc->frame_count[P_TYPE],
|
||||
rcc->p_cplx_sum[P_TYPE] / (double)rcc->frame_count[P_TYPE],
|
||||
rcc->p_cplx_sum[B_TYPE] / (double)rcc->frame_count[B_TYPE],
|
||||
rcc->i_cplx_sum[FF_I_TYPE] / (double)rcc->frame_count[FF_I_TYPE],
|
||||
rcc->i_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE],
|
||||
rcc->p_cplx_sum[FF_P_TYPE] / (double)rcc->frame_count[FF_P_TYPE],
|
||||
rcc->p_cplx_sum[FF_B_TYPE] / (double)rcc->frame_count[FF_B_TYPE],
|
||||
(rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type],
|
||||
0
|
||||
};
|
||||
@ -367,9 +367,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
|
||||
q= bits2qp(rce, bits);
|
||||
|
||||
/* I/B difference */
|
||||
if (pict_type==I_TYPE && s->avctx->i_quant_factor<0.0)
|
||||
if (pict_type==FF_I_TYPE && s->avctx->i_quant_factor<0.0)
|
||||
q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset;
|
||||
else if(pict_type==B_TYPE && s->avctx->b_quant_factor<0.0)
|
||||
else if(pict_type==FF_B_TYPE && s->avctx->b_quant_factor<0.0)
|
||||
q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset;
|
||||
if(q<1) q=1;
|
||||
|
||||
@ -380,17 +380,17 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
|
||||
RateControlContext *rcc= &s->rc_context;
|
||||
AVCodecContext *a= s->avctx;
|
||||
const int pict_type= rce->new_pict_type;
|
||||
const double last_p_q = rcc->last_qscale_for[P_TYPE];
|
||||
const double last_p_q = rcc->last_qscale_for[FF_P_TYPE];
|
||||
const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
|
||||
|
||||
if (pict_type==I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==P_TYPE))
|
||||
if (pict_type==FF_I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==FF_P_TYPE))
|
||||
q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset;
|
||||
else if(pict_type==B_TYPE && a->b_quant_factor>0.0)
|
||||
else if(pict_type==FF_B_TYPE && a->b_quant_factor>0.0)
|
||||
q= last_non_b_q* a->b_quant_factor + a->b_quant_offset;
|
||||
if(q<1) q=1;
|
||||
|
||||
/* last qscale / qdiff stuff */
|
||||
if(rcc->last_non_b_pict_type==pict_type || pict_type!=I_TYPE){
|
||||
if(rcc->last_non_b_pict_type==pict_type || pict_type!=FF_I_TYPE){
|
||||
double last_q= rcc->last_qscale_for[pict_type];
|
||||
const int maxdiff= FF_QP2LAMBDA * a->max_qdiff;
|
||||
|
||||
@ -400,7 +400,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
|
||||
|
||||
rcc->last_qscale_for[pict_type]= q; //Note we cannot do that after blurring
|
||||
|
||||
if(pict_type!=B_TYPE)
|
||||
if(pict_type!=FF_B_TYPE)
|
||||
rcc->last_non_b_pict_type= pict_type;
|
||||
|
||||
return q;
|
||||
@ -415,10 +415,10 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
|
||||
|
||||
assert(qmin <= qmax);
|
||||
|
||||
if(pict_type==B_TYPE){
|
||||
if(pict_type==FF_B_TYPE){
|
||||
qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
|
||||
qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
|
||||
}else if(pict_type==I_TYPE){
|
||||
}else if(pict_type==FF_I_TYPE){
|
||||
qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
|
||||
qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
|
||||
}
|
||||
@ -445,7 +445,7 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q,
|
||||
get_qminmax(&qmin, &qmax, s, pict_type);
|
||||
|
||||
/* modulation */
|
||||
if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==P_TYPE)
|
||||
if(s->avctx->rc_qmod_freq && frame_num%s->avctx->rc_qmod_freq==0 && pict_type==FF_P_TYPE)
|
||||
q*= s->avctx->rc_qmod_amp;
|
||||
|
||||
bits= qp2bits(rce, q);
|
||||
@ -689,7 +689,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
//printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate);
|
||||
/* update predictors */
|
||||
if(picture_number>2 && !dry_run){
|
||||
const int last_var= s->last_pict_type == I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
|
||||
const int last_var= s->last_pict_type == FF_I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum;
|
||||
update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits);
|
||||
}
|
||||
|
||||
@ -704,7 +704,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
|
||||
//FIXME add a dts field to AVFrame and ensure its set and use it here instead of reordering
|
||||
//but the reordering is simpler for now until h.264 b pyramid must be handeld
|
||||
if(s->pict_type == B_TYPE || s->low_delay)
|
||||
if(s->pict_type == FF_B_TYPE || s->low_delay)
|
||||
dts_pic= s->current_picture_ptr;
|
||||
else
|
||||
dts_pic= s->last_picture_ptr;
|
||||
@ -722,11 +722,11 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance;
|
||||
if(br_compensation<=0.0) br_compensation=0.001;
|
||||
|
||||
var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
|
||||
var= pict_type == FF_I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
|
||||
|
||||
short_term_q = 0; /* avoid warning */
|
||||
if(s->flags&CODEC_FLAG_PASS2){
|
||||
if(pict_type!=I_TYPE)
|
||||
if(pict_type!=FF_I_TYPE)
|
||||
assert(pict_type == rce->new_pict_type);
|
||||
|
||||
q= rce->new_qscale / br_compensation;
|
||||
@ -742,7 +742,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
rce->misc_bits= 1;
|
||||
|
||||
bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
|
||||
if(pict_type== I_TYPE){
|
||||
if(pict_type== FF_I_TYPE){
|
||||
rce->i_count = s->mb_num;
|
||||
rce->i_tex_bits= bits;
|
||||
rce->p_tex_bits= 0;
|
||||
@ -772,7 +772,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
||||
//printf("%f ", q);
|
||||
assert(q>0.0);
|
||||
|
||||
if(pict_type==P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass
|
||||
if(pict_type==FF_P_TYPE || s->intra_only){ //FIXME type dependent blur like in 2-pass
|
||||
rcc->short_term_qsum*=a->qblur;
|
||||
rcc->short_term_qcount*=a->qblur;
|
||||
|
||||
@ -855,7 +855,7 @@ static int init_pass2(MpegEncContext *s)
|
||||
complexity[rce->new_pict_type]+= (rce->i_tex_bits+ rce->p_tex_bits)*(double)rce->qscale;
|
||||
const_bits[rce->new_pict_type]+= rce->mv_bits + rce->misc_bits;
|
||||
}
|
||||
all_const_bits= const_bits[I_TYPE] + const_bits[P_TYPE] + const_bits[B_TYPE];
|
||||
all_const_bits= const_bits[FF_I_TYPE] + const_bits[FF_P_TYPE] + const_bits[FF_B_TYPE];
|
||||
|
||||
if(all_available_bits < all_const_bits){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
|
||||
|
@ -240,13 +240,13 @@ void rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
|
||||
put_bits(&s->pb, 1, 1); /* marker */
|
||||
|
||||
put_bits(&s->pb, 1, (s->pict_type == P_TYPE));
|
||||
put_bits(&s->pb, 1, (s->pict_type == FF_P_TYPE));
|
||||
|
||||
put_bits(&s->pb, 1, 0); /* not PB frame */
|
||||
|
||||
put_bits(&s->pb, 5, s->qscale);
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
/* specific MPEG like DC coding not used */
|
||||
}
|
||||
/* if multiple packets per frame are sent, the position at which
|
||||
@ -273,13 +273,13 @@ void rv20_encode_picture_header(MpegEncContext *s, int picture_number){
|
||||
|
||||
assert(s->f_code == 1);
|
||||
assert(s->unrestricted_mv == 1);
|
||||
// assert(s->h263_aic== (s->pict_type == I_TYPE));
|
||||
// assert(s->h263_aic== (s->pict_type == FF_I_TYPE));
|
||||
assert(s->alt_inter_vlc == 0);
|
||||
assert(s->umvplus == 0);
|
||||
assert(s->modified_quant==1);
|
||||
assert(s->loop_filter==1);
|
||||
|
||||
s->h263_aic= s->pict_type == I_TYPE;
|
||||
s->h263_aic= s->pict_type == FF_I_TYPE;
|
||||
if(s->h263_aic){
|
||||
s->y_dc_scale_table=
|
||||
s->c_dc_scale_table= ff_aic_dc_scale_table;
|
||||
@ -315,9 +315,9 @@ static int rv10_decode_picture_header(MpegEncContext *s)
|
||||
marker = get_bits1(&s->gb);
|
||||
|
||||
if (get_bits1(&s->gb))
|
||||
s->pict_type = P_TYPE;
|
||||
s->pict_type = FF_P_TYPE;
|
||||
else
|
||||
s->pict_type = I_TYPE;
|
||||
s->pict_type = FF_I_TYPE;
|
||||
//printf("h:%X ver:%d\n",h,s->rv10_version);
|
||||
if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n");
|
||||
pb_frame = get_bits1(&s->gb);
|
||||
@ -337,7 +337,7 @@ static int rv10_decode_picture_header(MpegEncContext *s)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
if (s->rv10_version == 3) {
|
||||
/* specific MPEG like DC coding not used */
|
||||
s->last_dc[0] = get_bits(&s->gb, 8);
|
||||
@ -402,16 +402,16 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
|
||||
i= get_bits(&s->gb, 2);
|
||||
switch(i){
|
||||
case 0: s->pict_type= I_TYPE; break;
|
||||
case 1: s->pict_type= I_TYPE; break; //hmm ...
|
||||
case 2: s->pict_type= P_TYPE; break;
|
||||
case 3: s->pict_type= B_TYPE; break;
|
||||
case 0: s->pict_type= FF_I_TYPE; break;
|
||||
case 1: s->pict_type= FF_I_TYPE; break; //hmm ...
|
||||
case 2: s->pict_type= FF_P_TYPE; break;
|
||||
case 3: s->pict_type= FF_B_TYPE; break;
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(s->last_picture_ptr==NULL && s->pict_type==B_TYPE){
|
||||
if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
|
||||
return -1;
|
||||
}
|
||||
@ -482,7 +482,7 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
if(seq - s->time > 0x4000) seq -= 0x8000;
|
||||
if(seq - s->time < -0x4000) seq += 0x8000;
|
||||
if(seq != s->time){
|
||||
if(s->pict_type!=B_TYPE){
|
||||
if(s->pict_type!=FF_B_TYPE){
|
||||
s->time= seq;
|
||||
s->pp_time= s->time - s->last_non_b_time;
|
||||
s->last_non_b_time= s->time;
|
||||
@ -505,7 +505,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
|
||||
|
||||
s->f_code = 1;
|
||||
s->unrestricted_mv = 1;
|
||||
s->h263_aic= s->pict_type == I_TYPE;
|
||||
s->h263_aic= s->pict_type == FF_I_TYPE;
|
||||
// s->alt_inter_vlc=1;
|
||||
// s->obmc=1;
|
||||
// s->umvplus=1;
|
||||
@ -517,7 +517,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
|
||||
seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
|
||||
}
|
||||
|
||||
assert(s->pict_type != B_TYPE || !s->low_delay);
|
||||
assert(s->pict_type != FF_B_TYPE || !s->low_delay);
|
||||
|
||||
return s->mb_width*s->mb_height - mb_pos;
|
||||
}
|
||||
@ -623,7 +623,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n");
|
||||
return -1;
|
||||
}
|
||||
//if(s->pict_type == P_TYPE) return 0;
|
||||
//if(s->pict_type == FF_P_TYPE) return 0;
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
|
||||
@ -690,7 +690,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
if(s->pict_type != B_TYPE)
|
||||
if(s->pict_type != FF_B_TYPE)
|
||||
ff_h263_update_motion_val(s);
|
||||
MPV_decode_mb(s, s->block);
|
||||
if(s->loop_filter)
|
||||
@ -759,7 +759,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict= *(AVFrame*)s->last_picture_ptr;
|
||||
|
@ -105,7 +105,7 @@ static int rv30_decode_mb_info(RV34DecContext *r)
|
||||
av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n");
|
||||
code -= 6;
|
||||
}
|
||||
if(s->pict_type != B_TYPE)
|
||||
if(s->pict_type != FF_B_TYPE)
|
||||
return rv30_p_types[code];
|
||||
else
|
||||
return rv30_b_types[code];
|
||||
|
@ -691,7 +691,7 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type)
|
||||
fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4);
|
||||
return 0;
|
||||
case RV34_MB_SKIP:
|
||||
if(s->pict_type == P_TYPE){
|
||||
if(s->pict_type == FF_P_TYPE){
|
||||
fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4);
|
||||
rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
|
||||
break;
|
||||
@ -914,9 +914,9 @@ static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
|
||||
s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
|
||||
r->mb_type[mb_pos] = r->block_type;
|
||||
if(r->block_type == RV34_MB_SKIP){
|
||||
if(s->pict_type == P_TYPE)
|
||||
if(s->pict_type == FF_P_TYPE)
|
||||
r->mb_type[mb_pos] = RV34_MB_P_16x16;
|
||||
if(s->pict_type == B_TYPE)
|
||||
if(s->pict_type == FF_B_TYPE)
|
||||
r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
|
||||
}
|
||||
r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
|
||||
@ -1107,7 +1107,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, uint8_t* buf, int buf_s
|
||||
r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
|
||||
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : I_TYPE;
|
||||
s->pict_type = r->si.type ? r->si.type : FF_I_TYPE;
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
@ -1270,7 +1270,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
r->loop_filter(r);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict= *(AVFrame*)s->last_picture_ptr;
|
||||
|
@ -229,7 +229,7 @@ static int rv40_decode_mb_info(RV34DecContext *r)
|
||||
prev_type = i;
|
||||
}
|
||||
}
|
||||
if(s->pict_type == P_TYPE){
|
||||
if(s->pict_type == FF_P_TYPE){
|
||||
prev_type = block_num_to_ptype_vlc_num[prev_type];
|
||||
q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
|
||||
if(q < PBTYPE_ESCAPE)
|
||||
|
@ -3800,7 +3800,7 @@ static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
|
||||
coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
|
||||
assert(coef_sum < INT_MAX);
|
||||
|
||||
if(pict->pict_type == I_TYPE){
|
||||
if(pict->pict_type == FF_I_TYPE){
|
||||
s->m.current_picture.mb_var_sum= coef_sum;
|
||||
s->m.current_picture.mc_mb_var_sum= 0;
|
||||
}else{
|
||||
@ -4198,7 +4198,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
||||
frame_start(s);
|
||||
|
||||
s->m.current_picture_ptr= &s->m.current_picture;
|
||||
if(pict->pict_type == P_TYPE){
|
||||
if(pict->pict_type == FF_P_TYPE){
|
||||
int block_width = (width +15)>>4;
|
||||
int block_height= (height+15)>>4;
|
||||
int stride= s->current_picture.linesize[0];
|
||||
@ -4247,13 +4247,13 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
||||
|
||||
redo_frame:
|
||||
|
||||
if(pict->pict_type == I_TYPE)
|
||||
if(pict->pict_type == FF_I_TYPE)
|
||||
s->spatial_decomposition_count= 5;
|
||||
else
|
||||
s->spatial_decomposition_count= 5;
|
||||
|
||||
s->m.pict_type = pict->pict_type;
|
||||
s->qbias= pict->pict_type == P_TYPE ? 2 : 0;
|
||||
s->qbias= pict->pict_type == FF_P_TYPE ? 2 : 0;
|
||||
|
||||
common_init_after_header(avctx);
|
||||
|
||||
@ -4286,7 +4286,7 @@ redo_frame:
|
||||
predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
|
||||
|
||||
if( plane_index==0
|
||||
&& pict->pict_type == P_TYPE
|
||||
&& pict->pict_type == FF_P_TYPE
|
||||
&& !(avctx->flags&CODEC_FLAG_PASS2)
|
||||
&& s->m.me.scene_change_score > s->avctx->scenechange_threshold){
|
||||
ff_init_range_encoder(c, buf, buf_size);
|
||||
@ -4337,7 +4337,7 @@ redo_frame:
|
||||
if(!QUANTIZE2)
|
||||
quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
|
||||
if(orientation==0)
|
||||
decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == P_TYPE, 0);
|
||||
decorrelate(s, b, b->ibuf, b->stride, pict->pict_type == FF_P_TYPE, 0);
|
||||
encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
|
||||
assert(b->parent==NULL || b->parent->stride == b->stride*2);
|
||||
if(orientation==0)
|
||||
@ -4364,7 +4364,7 @@ redo_frame:
|
||||
predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
|
||||
}else{
|
||||
//ME/MC only
|
||||
if(pict->pict_type == I_TYPE){
|
||||
if(pict->pict_type == FF_I_TYPE){
|
||||
for(y=0; y<h; y++){
|
||||
for(x=0; x<w; x++){
|
||||
s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]=
|
||||
|
@ -125,7 +125,7 @@ static int sp5x_decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->picture.pict_type = I_TYPE;
|
||||
s->picture.pict_type = FF_I_TYPE;
|
||||
s->picture.key_frame = 1;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
|
@ -575,7 +575,7 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
|
||||
if(s->pict_type==4)
|
||||
return -1;
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
|
||||
/* unknown fields */
|
||||
if (s->f_code == 0x50 || s->f_code == 0x60) {
|
||||
@ -678,11 +678,11 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
//FIXME this avoids some confusion for "B frames" without 2 references
|
||||
//this should be removed after libavcodec can handle more flexible picture types & ordering
|
||||
if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size;
|
||||
if(s->pict_type==FF_B_TYPE && s->last_picture_ptr==NULL) return buf_size;
|
||||
|
||||
if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return buf_size;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return buf_size;
|
||||
|
||||
@ -705,13 +705,13 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
current = s->current_picture.data[i];
|
||||
|
||||
if(s->pict_type==B_TYPE){
|
||||
if(s->pict_type==FF_B_TYPE){
|
||||
previous = s->next_picture.data[i];
|
||||
}else{
|
||||
previous = s->last_picture.data[i];
|
||||
}
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
/* keyframe */
|
||||
for (y=0; y < height; y+=16) {
|
||||
for (x=0; x < width; x+=16) {
|
||||
|
@ -82,7 +82,7 @@ static void svq1_write_header(SVQ1Context *s, int frame_type)
|
||||
/* frame type */
|
||||
put_bits(&s->pb, 2, frame_type - 1);
|
||||
|
||||
if (frame_type == I_TYPE) {
|
||||
if (frame_type == FF_I_TYPE) {
|
||||
|
||||
/* no checksum since frame code is 0x20 */
|
||||
|
||||
@ -283,7 +283,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
block_width = (width + 15) / 16;
|
||||
block_height = (height + 15) / 16;
|
||||
|
||||
if(s->picture.pict_type == P_TYPE){
|
||||
if(s->picture.pict_type == FF_P_TYPE){
|
||||
s->m.avctx= s->avctx;
|
||||
s->m.current_picture_ptr= &s->m.current_picture;
|
||||
s->m.last_picture_ptr = &s->m.last_picture;
|
||||
@ -389,11 +389,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
ff_init_block_index(&s->m);
|
||||
ff_update_block_index(&s->m);
|
||||
|
||||
if(s->picture.pict_type == I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
|
||||
if(s->picture.pict_type == FF_I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
|
||||
for(i=0; i<6; i++){
|
||||
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
|
||||
}
|
||||
if(s->picture.pict_type == P_TYPE){
|
||||
if(s->picture.pict_type == FF_P_TYPE){
|
||||
const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
|
||||
put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
|
||||
score[0]= vlc[1]*lambda;
|
||||
@ -408,7 +408,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
|
||||
|
||||
best=0;
|
||||
|
||||
if(s->picture.pict_type == P_TYPE){
|
||||
if(s->picture.pict_type == FF_P_TYPE){
|
||||
const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
|
||||
int mx, my, pred_x, pred_y, dxy;
|
||||
int16_t *motion_ptr;
|
||||
@ -533,8 +533,8 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
|
||||
init_put_bits(&s->pb, buf, buf_size);
|
||||
|
||||
*p = *pict;
|
||||
p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? P_TYPE : I_TYPE;
|
||||
p->key_frame = p->pict_type == I_TYPE;
|
||||
p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? FF_P_TYPE : FF_I_TYPE;
|
||||
p->key_frame = p->pict_type == FF_I_TYPE;
|
||||
|
||||
svq1_write_header(s, p->pict_type);
|
||||
for(i=0; i<3; i++){
|
||||
|
@ -419,10 +419,10 @@ static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
|
||||
h->topright_samples_available = 0xFFFF;
|
||||
|
||||
if (mb_type == 0) { /* SKIP */
|
||||
if (s->pict_type == P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
|
||||
if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
|
||||
svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
|
||||
}
|
||||
|
||||
@ -483,15 +483,15 @@ static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
|
||||
}else
|
||||
memset (&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
|
||||
|
||||
if (s->pict_type != B_TYPE)
|
||||
if (s->pict_type != FF_B_TYPE)
|
||||
break;
|
||||
}
|
||||
|
||||
/* decode motion vector(s) and form prediction(s) */
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
if(svq3_mc_dir (h, (mb_type - 1), mode, 0, 0) < 0)
|
||||
return -1;
|
||||
} else { /* B_TYPE */
|
||||
} else { /* FF_B_TYPE */
|
||||
if (mb_type != 2) {
|
||||
if(svq3_mc_dir (h, 0, mode, 0, 0) < 0)
|
||||
return -1;
|
||||
@ -590,11 +590,11 @@ static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
|
||||
mb_type = MB_TYPE_INTRA16x16;
|
||||
}
|
||||
|
||||
if (!IS_INTER(mb_type) && s->pict_type != I_TYPE) {
|
||||
if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
|
||||
for (i=0; i < 4; i++) {
|
||||
memset (s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||
}
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
for (i=0; i < 4; i++) {
|
||||
memset (s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
|
||||
}
|
||||
@ -603,12 +603,12 @@ static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
|
||||
if (!IS_INTRA4x4(mb_type)) {
|
||||
memset (h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
|
||||
}
|
||||
if (!IS_SKIP(mb_type) || s->pict_type == B_TYPE) {
|
||||
if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
|
||||
memset (h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
|
||||
s->dsp.clear_blocks(h->mb);
|
||||
}
|
||||
|
||||
if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == B_TYPE)) {
|
||||
if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
|
||||
if ((vlc = svq3_get_ue_golomb (&s->gb)) >= 48){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
|
||||
return -1;
|
||||
@ -616,7 +616,7 @@ static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
|
||||
|
||||
cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
|
||||
}
|
||||
if (IS_INTRA16x16(mb_type) || (s->pict_type != I_TYPE && s->adaptive_quant && cbp)) {
|
||||
if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
|
||||
s->qscale += svq3_get_se_golomb (&s->gb);
|
||||
|
||||
if (s->qscale > 31){
|
||||
@ -859,21 +859,21 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
|
||||
/* for hurry_up==5 */
|
||||
s->current_picture.pict_type = s->pict_type;
|
||||
s->current_picture.key_frame = (s->pict_type == I_TYPE);
|
||||
s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
|
||||
|
||||
/* Skip B-frames if we do not have reference frames. */
|
||||
if (s->last_picture_ptr == NULL && s->pict_type == B_TYPE) return 0;
|
||||
if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE) return 0;
|
||||
/* Skip B-frames if we are in a hurry. */
|
||||
if (avctx->hurry_up && s->pict_type == B_TYPE) return 0;
|
||||
if (avctx->hurry_up && s->pict_type == FF_B_TYPE) return 0;
|
||||
/* Skip everything if we are in a hurry >= 5. */
|
||||
if (avctx->hurry_up >= 5) return 0;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
|
||||
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return 0;
|
||||
|
||||
if (s->next_p_frame_damaged) {
|
||||
if (s->pict_type == B_TYPE)
|
||||
if (s->pict_type == FF_B_TYPE)
|
||||
return 0;
|
||||
else
|
||||
s->next_p_frame_damaged = 0;
|
||||
@ -882,7 +882,7 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
if (frame_start (h) < 0)
|
||||
return -1;
|
||||
|
||||
if (s->pict_type == B_TYPE) {
|
||||
if (s->pict_type == FF_B_TYPE) {
|
||||
h->frame_num_offset = (h->slice_num - h->prev_frame_num);
|
||||
|
||||
if (h->frame_num_offset < 0) {
|
||||
@ -930,9 +930,9 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
|
||||
mb_type = svq3_get_ue_golomb (&s->gb);
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
mb_type += 8;
|
||||
} else if (s->pict_type == B_TYPE && mb_type >= 4) {
|
||||
} else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
|
||||
mb_type += 4;
|
||||
}
|
||||
if (mb_type > 33 || svq3_decode_mb (h, mb_type)) {
|
||||
@ -944,9 +944,9 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
hl_decode_mb (h);
|
||||
}
|
||||
|
||||
if (s->pict_type != B_TYPE && !s->low_delay) {
|
||||
if (s->pict_type != FF_B_TYPE && !s->low_delay) {
|
||||
s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
|
||||
(s->pict_type == P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
|
||||
(s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -955,7 +955,7 @@ static int svq3_decode_frame (AVCodecContext *avctx,
|
||||
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*(AVFrame *) data = *(AVFrame *) &s->current_picture;
|
||||
} else {
|
||||
*(AVFrame *) data = *(AVFrame *) &s->last_picture;
|
||||
|
@ -400,7 +400,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
|
||||
my = s->mv[dir][0][1];
|
||||
|
||||
// store motion vectors for further use in B frames
|
||||
if(s->pict_type == P_TYPE) {
|
||||
if(s->pict_type == FF_P_TYPE) {
|
||||
s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
|
||||
s->current_picture.motion_val[1][s->block_index[0]][1] = my;
|
||||
}
|
||||
@ -1041,26 +1041,26 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
v->s.pict_type = get_bits1(gb);
|
||||
if (v->s.avctx->max_b_frames) {
|
||||
if (!v->s.pict_type) {
|
||||
if (get_bits1(gb)) v->s.pict_type = I_TYPE;
|
||||
else v->s.pict_type = B_TYPE;
|
||||
} else v->s.pict_type = P_TYPE;
|
||||
} else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
|
||||
if (get_bits1(gb)) v->s.pict_type = FF_I_TYPE;
|
||||
else v->s.pict_type = FF_B_TYPE;
|
||||
} else v->s.pict_type = FF_P_TYPE;
|
||||
} else v->s.pict_type = v->s.pict_type ? FF_P_TYPE : FF_I_TYPE;
|
||||
|
||||
v->bi_type = 0;
|
||||
if(v->s.pict_type == B_TYPE) {
|
||||
if(v->s.pict_type == FF_B_TYPE) {
|
||||
v->bfraction = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
|
||||
v->bfraction = ff_vc1_bfraction_lut[v->bfraction];
|
||||
if(v->bfraction == 0) {
|
||||
v->s.pict_type = BI_TYPE;
|
||||
v->s.pict_type = FF_BI_TYPE;
|
||||
}
|
||||
}
|
||||
if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
||||
if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
|
||||
skip_bits(gb, 7); // skip buffer fullness
|
||||
|
||||
/* calculate RND */
|
||||
if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
||||
if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
|
||||
v->rnd = 1;
|
||||
if(v->s.pict_type == P_TYPE)
|
||||
if(v->s.pict_type == FF_P_TYPE)
|
||||
v->rnd ^= 1;
|
||||
|
||||
/* Quantizer stuff */
|
||||
@ -1092,18 +1092,18 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
if (v->postprocflag) v->postproc = get_bits1(gb);
|
||||
}
|
||||
else
|
||||
if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
|
||||
if (v->multires && v->s.pict_type != FF_B_TYPE) v->respic = get_bits(gb, 2);
|
||||
|
||||
if(v->res_x8 && (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)){
|
||||
if(v->res_x8 && (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)){
|
||||
v->x8_type = get_bits1(gb);
|
||||
}else v->x8_type = 0;
|
||||
//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
|
||||
// (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
|
||||
// (v->s.pict_type == FF_P_TYPE) ? 'P' : ((v->s.pict_type == FF_I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
|
||||
|
||||
if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
|
||||
if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
|
||||
|
||||
switch(v->s.pict_type) {
|
||||
case P_TYPE:
|
||||
case FF_P_TYPE:
|
||||
if (v->pq < 5) v->tt_index = 0;
|
||||
else if(v->pq < 13) v->tt_index = 1;
|
||||
else v->tt_index = 2;
|
||||
@ -1186,7 +1186,7 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
v->ttfrm = TT_8X8;
|
||||
}
|
||||
break;
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
if (v->pq < 5) v->tt_index = 0;
|
||||
else if(v->pq < 13) v->tt_index = 1;
|
||||
else v->tt_index = 2;
|
||||
@ -1233,7 +1233,7 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
{
|
||||
/* AC Syntax */
|
||||
v->c_ac_table_index = decode012(gb);
|
||||
if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
||||
if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
|
||||
{
|
||||
v->y_ac_table_index = decode012(gb);
|
||||
}
|
||||
@ -1241,8 +1241,8 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
v->s.dc_table_index = get_bits1(gb);
|
||||
}
|
||||
|
||||
if(v->s.pict_type == BI_TYPE) {
|
||||
v->s.pict_type = B_TYPE;
|
||||
if(v->s.pict_type == FF_BI_TYPE) {
|
||||
v->s.pict_type = FF_B_TYPE;
|
||||
v->bi_type = 1;
|
||||
}
|
||||
return 0;
|
||||
@ -1261,19 +1261,19 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
}
|
||||
switch(get_unary(gb, 0, 4)) {
|
||||
case 0:
|
||||
v->s.pict_type = P_TYPE;
|
||||
v->s.pict_type = FF_P_TYPE;
|
||||
break;
|
||||
case 1:
|
||||
v->s.pict_type = B_TYPE;
|
||||
v->s.pict_type = FF_B_TYPE;
|
||||
break;
|
||||
case 2:
|
||||
v->s.pict_type = I_TYPE;
|
||||
v->s.pict_type = FF_I_TYPE;
|
||||
break;
|
||||
case 3:
|
||||
v->s.pict_type = BI_TYPE;
|
||||
v->s.pict_type = FF_BI_TYPE;
|
||||
break;
|
||||
case 4:
|
||||
v->s.pict_type = P_TYPE; // skipped pic
|
||||
v->s.pict_type = FF_P_TYPE; // skipped pic
|
||||
v->p_frame_skipped = 1;
|
||||
return 0;
|
||||
}
|
||||
@ -1294,11 +1294,11 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
if(v->interlace)
|
||||
v->uvsamp = get_bits1(gb);
|
||||
if(v->finterpflag) v->interpfrm = get_bits1(gb);
|
||||
if(v->s.pict_type == B_TYPE) {
|
||||
if(v->s.pict_type == FF_B_TYPE) {
|
||||
v->bfraction = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
|
||||
v->bfraction = ff_vc1_bfraction_lut[v->bfraction];
|
||||
if(v->bfraction == 0) {
|
||||
v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
|
||||
v->s.pict_type = FF_BI_TYPE; /* XXX: should not happen here */
|
||||
}
|
||||
}
|
||||
pqindex = get_bits(gb, 5);
|
||||
@ -1320,11 +1320,11 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
|
||||
v->pquantizer = get_bits1(gb);
|
||||
|
||||
if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
|
||||
if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
|
||||
|
||||
switch(v->s.pict_type) {
|
||||
case I_TYPE:
|
||||
case BI_TYPE:
|
||||
case FF_I_TYPE:
|
||||
case FF_BI_TYPE:
|
||||
status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
|
||||
if (status < 0) return -1;
|
||||
av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
|
||||
@ -1340,7 +1340,7 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
}
|
||||
}
|
||||
break;
|
||||
case P_TYPE:
|
||||
case FF_P_TYPE:
|
||||
if(v->postprocflag)
|
||||
v->postproc = get_bits1(gb);
|
||||
if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
|
||||
@ -1431,7 +1431,7 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
v->ttfrm = TT_8X8;
|
||||
}
|
||||
break;
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
if(v->postprocflag)
|
||||
v->postproc = get_bits1(gb);
|
||||
if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
|
||||
@ -1485,20 +1485,20 @@ static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
|
||||
/* AC Syntax */
|
||||
v->c_ac_table_index = decode012(gb);
|
||||
if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
||||
if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
|
||||
{
|
||||
v->y_ac_table_index = decode012(gb);
|
||||
}
|
||||
/* DC Syntax */
|
||||
v->s.dc_table_index = get_bits1(gb);
|
||||
if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
|
||||
if ((v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE) && v->dquant) {
|
||||
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
||||
vop_dquant_decoding(v);
|
||||
}
|
||||
|
||||
v->bi_type = 0;
|
||||
if(v->s.pict_type == BI_TYPE) {
|
||||
v->s.pict_type = B_TYPE;
|
||||
if(v->s.pict_type == FF_BI_TYPE) {
|
||||
v->s.pict_type = FF_B_TYPE;
|
||||
v->bi_type = 1;
|
||||
}
|
||||
return 0;
|
||||
@ -3736,7 +3736,7 @@ static void vc1_decode_skip_blocks(VC1Context *v)
|
||||
ff_draw_horiz_band(s, s->mb_y * 16, 16);
|
||||
s->first_slice_line = 0;
|
||||
}
|
||||
s->pict_type = P_TYPE;
|
||||
s->pict_type = FF_P_TYPE;
|
||||
}
|
||||
|
||||
static void vc1_decode_blocks(VC1Context *v)
|
||||
@ -3748,19 +3748,19 @@ static void vc1_decode_blocks(VC1Context *v)
|
||||
}else{
|
||||
|
||||
switch(v->s.pict_type) {
|
||||
case I_TYPE:
|
||||
case FF_I_TYPE:
|
||||
if(v->profile == PROFILE_ADVANCED)
|
||||
vc1_decode_i_blocks_adv(v);
|
||||
else
|
||||
vc1_decode_i_blocks(v);
|
||||
break;
|
||||
case P_TYPE:
|
||||
case FF_P_TYPE:
|
||||
if(v->p_frame_skipped)
|
||||
vc1_decode_skip_blocks(v);
|
||||
else
|
||||
vc1_decode_p_blocks(v);
|
||||
break;
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
if(v->bi_type){
|
||||
if(v->profile == PROFILE_ADVANCED)
|
||||
vc1_decode_i_blocks_adv(v);
|
||||
@ -4030,24 +4030,24 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
}
|
||||
|
||||
if(s->pict_type != I_TYPE && !v->res_rtm_flag){
|
||||
if(s->pict_type != FF_I_TYPE && !v->res_rtm_flag){
|
||||
av_free(buf2);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// for hurry_up==5
|
||||
s->current_picture.pict_type= s->pict_type;
|
||||
s->current_picture.key_frame= s->pict_type == I_TYPE;
|
||||
s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
|
||||
|
||||
/* skip B-frames if we don't have reference frames */
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
|
||||
if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)){
|
||||
av_free(buf2);
|
||||
return -1;//buf_size;
|
||||
}
|
||||
/* skip b frames if we are in a hurry */
|
||||
if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
|
||||
if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL) {
|
||||
av_free(buf2);
|
||||
return buf_size;
|
||||
@ -4059,7 +4059,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
if(s->next_p_frame_damaged){
|
||||
if(s->pict_type==B_TYPE)
|
||||
if(s->pict_type==FF_B_TYPE)
|
||||
return buf_size;
|
||||
else
|
||||
s->next_p_frame_damaged=0;
|
||||
@ -4086,7 +4086,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
|
||||
assert(s->current_picture.pict_type == s->pict_type);
|
||||
if (s->pict_type == B_TYPE || s->low_delay) {
|
||||
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict= *(AVFrame*)s->last_picture_ptr;
|
||||
|
@ -127,9 +127,6 @@ enum TransformTypes {
|
||||
};
|
||||
//@}
|
||||
|
||||
/** One more frame type */
|
||||
#define BI_TYPE FF_BI_TYPE
|
||||
|
||||
enum CodingSet {
|
||||
CS_HIGH_MOT_INTRA = 0,
|
||||
CS_HIGH_MOT_INTER,
|
||||
|
@ -128,7 +128,7 @@ return -1;
|
||||
decode_ext_header(w);
|
||||
|
||||
s->pict_type = get_bits1(&s->gb) + 1;
|
||||
if(s->pict_type == I_TYPE){
|
||||
if(s->pict_type == FF_I_TYPE){
|
||||
code = get_bits(&s->gb, 7);
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code);
|
||||
}
|
||||
@ -143,7 +143,7 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s)
|
||||
{
|
||||
Wmv2Context * const w= (Wmv2Context*)s;
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
if(w->j_type_bit) w->j_type= get_bits1(&s->gb);
|
||||
else w->j_type= 0; //FIXME check
|
||||
|
||||
@ -354,7 +354,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
|
||||
if(w->j_type) return 0;
|
||||
|
||||
if (s->pict_type == P_TYPE) {
|
||||
if (s->pict_type == FF_P_TYPE) {
|
||||
if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
|
||||
/* skip mb */
|
||||
s->mb_intra = 0;
|
||||
@ -431,7 +431,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
//if(s->pict_type==P_TYPE)
|
||||
//if(s->pict_type==FF_P_TYPE)
|
||||
// printf("%d%d ", s->inter_intra_pred, cbp);
|
||||
//printf("I at %d %d %d %06X\n", s->mb_x, s->mb_y, ((cbp&3)? 1 : 0) +((cbp&0x3C)? 2 : 0), show_bits(&s->gb, 24));
|
||||
s->ac_pred = get_bits1(&s->gb);
|
||||
|
@ -84,7 +84,7 @@ int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
Wmv2Context * const w= (Wmv2Context*)s;
|
||||
|
||||
put_bits(&s->pb, 1, s->pict_type - 1);
|
||||
if(s->pict_type == I_TYPE){
|
||||
if(s->pict_type == FF_I_TYPE){
|
||||
put_bits(&s->pb, 7, 0);
|
||||
}
|
||||
put_bits(&s->pb, 5, s->qscale);
|
||||
@ -100,7 +100,7 @@ int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number)
|
||||
|
||||
assert(s->flipflop_rounding);
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
assert(s->no_rounding==1);
|
||||
if(w->j_type_bit) put_bits(&s->pb, 1, w->j_type);
|
||||
|
||||
@ -208,7 +208,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
|
||||
printf("cbp=%x %x\n", cbp, coded_cbp);
|
||||
#endif
|
||||
|
||||
if (s->pict_type == I_TYPE) {
|
||||
if (s->pict_type == FF_I_TYPE) {
|
||||
put_bits(&s->pb,
|
||||
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][0]);
|
||||
} else {
|
||||
|
@ -90,9 +90,9 @@ xvmc_render_state_t * render,* last, * next;
|
||||
render->p_past_surface = NULL;
|
||||
|
||||
switch(s->pict_type){
|
||||
case I_TYPE:
|
||||
case FF_I_TYPE:
|
||||
return 0;// no prediction from other frames
|
||||
case B_TYPE:
|
||||
case FF_B_TYPE:
|
||||
next = (xvmc_render_state_t*)s->next_picture.data[2];
|
||||
assert(next!=NULL);
|
||||
assert(next->state & MP_XVMC_STATE_PREDICTION);
|
||||
@ -100,7 +100,7 @@ xvmc_render_state_t * render,* last, * next;
|
||||
if(next->magic != MP_XVMC_RENDER_MAGIC) return -1;
|
||||
render->p_future_surface = next->p_surface;
|
||||
//no return here, going to set forward prediction
|
||||
case P_TYPE:
|
||||
case FF_P_TYPE:
|
||||
last = (xvmc_render_state_t*)s->last_picture.data[2];
|
||||
if(last == NULL)// && !s->first_field)
|
||||
last = render;//predict second field from the first
|
||||
|
Loading…
Reference in New Issue
Block a user