mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2025-01-22 03:46:46 +00:00
AVCodec.flush()
ff_draw_horiz_band() in coded order / cleanup Originally committed as revision 2064 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
bc3513865a
commit
7a06ff148d
@ -15,8 +15,8 @@ extern "C" {
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT 0x000406
|
||||
#define LIBAVCODEC_VERSION "0.4.6"
|
||||
#define LIBAVCODEC_BUILD 4669
|
||||
#define LIBAVCODEC_BUILD_STR "4669"
|
||||
#define LIBAVCODEC_BUILD 4670
|
||||
#define LIBAVCODEC_BUILD_STR "4670"
|
||||
|
||||
#define LIBAVCODEC_IDENT "FFmpeg" LIBAVCODEC_VERSION "b" LIBAVCODEC_BUILD_STR
|
||||
|
||||
@ -473,7 +473,7 @@ typedef struct AVCodecContext {
|
||||
* - decoding: set by user.
|
||||
*/
|
||||
void (*draw_horiz_band)(struct AVCodecContext *s,
|
||||
uint8_t **src_ptr, int linesize,
|
||||
AVFrame *src, int offset[4],
|
||||
int y, int width, int height);
|
||||
|
||||
/* audio only */
|
||||
@ -1209,6 +1209,7 @@ typedef struct AVCodec {
|
||||
int capabilities;
|
||||
const AVOption *options;
|
||||
struct AVCodec *next;
|
||||
void (*flush)(AVCodecContext *);
|
||||
} AVCodec;
|
||||
|
||||
/**
|
||||
@ -1400,7 +1401,12 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
|
||||
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
|
||||
void avcodec_default_free_buffers(AVCodecContext *s);
|
||||
|
||||
/**
|
||||
* opens / inits the AVCodecContext.
|
||||
* not thread save!
|
||||
*/
|
||||
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
|
||||
|
||||
int avcodec_decode_audio(AVCodecContext *avctx, int16_t *samples,
|
||||
int *frame_size_ptr,
|
||||
uint8_t *buf, int buf_size);
|
||||
|
@ -723,6 +723,7 @@ AVCodec mpeg4_decoder = {
|
||||
ff_h263_decode_frame,
|
||||
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED,
|
||||
.options = mpeg4_decoptions,
|
||||
.flush= ff_mpeg_flush,
|
||||
};
|
||||
|
||||
AVCodec h263_decoder = {
|
||||
@ -735,6 +736,7 @@ AVCodec h263_decoder = {
|
||||
ff_h263_decode_end,
|
||||
ff_h263_decode_frame,
|
||||
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED,
|
||||
.flush= ff_mpeg_flush,
|
||||
};
|
||||
|
||||
AVCodec msmpeg4v1_decoder = {
|
||||
|
@ -673,7 +673,7 @@ static void decode_bgr_bitstream(HYuvContext *s, int count){
|
||||
|
||||
static void draw_slice(HYuvContext *s, int y){
|
||||
int h, cy;
|
||||
uint8_t *src_ptr[3];
|
||||
int offset[4];
|
||||
|
||||
if(s->avctx->draw_horiz_band==NULL)
|
||||
return;
|
||||
@ -686,13 +686,14 @@ static void draw_slice(HYuvContext *s, int y){
|
||||
}else{
|
||||
cy= y;
|
||||
}
|
||||
|
||||
src_ptr[0] = s->picture.data[0] + s->picture.linesize[0]*y;
|
||||
src_ptr[1] = s->picture.data[1] + s->picture.linesize[1]*cy;
|
||||
src_ptr[2] = s->picture.data[2] + s->picture.linesize[2]*cy;
|
||||
|
||||
offset[0] = s->picture.linesize[0]*y;
|
||||
offset[1] = s->picture.linesize[1]*cy;
|
||||
offset[2] = s->picture.linesize[2]*cy;
|
||||
offset[3] = 0;
|
||||
emms_c();
|
||||
|
||||
s->avctx->draw_horiz_band(s->avctx, src_ptr, s->picture.linesize[0], y, s->width, h);
|
||||
s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, s->width, h);
|
||||
|
||||
s->last_slice_end= y + h;
|
||||
}
|
||||
|
@ -2356,4 +2356,5 @@ AVCodec mpeg_decoder = {
|
||||
mpeg_decode_end,
|
||||
mpeg_decode_frame,
|
||||
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED,
|
||||
.flush= ff_mpeg_flush,
|
||||
};
|
||||
|
@ -2725,29 +2725,26 @@ static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move
|
||||
* @param h is the normal height, this will be reduced automatically if needed for the last row
|
||||
*/
|
||||
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
|
||||
if ( s->avctx->draw_horiz_band
|
||||
&& (s->last_picture_ptr || s->low_delay) ) {
|
||||
if (s->avctx->draw_horiz_band) {
|
||||
uint8_t *src_ptr[3];
|
||||
int offset;
|
||||
int offset[4];
|
||||
h= FFMIN(h, s->height - y);
|
||||
|
||||
if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
|
||||
offset = 0;
|
||||
else
|
||||
offset = y * s->linesize;
|
||||
|
||||
if(s->pict_type==B_TYPE || s->low_delay){
|
||||
src_ptr[0] = s->current_picture.data[0] + offset;
|
||||
src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
|
||||
src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
|
||||
} else {
|
||||
src_ptr[0] = s->last_picture.data[0] + offset;
|
||||
src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
|
||||
src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
|
||||
if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME){
|
||||
offset[0]=
|
||||
offset[1]=
|
||||
offset[2]=
|
||||
offset[3]= 0;
|
||||
}else{
|
||||
offset[0]= y * s->linesize;;
|
||||
offset[1]=
|
||||
offset[2]= (y>>1) * s->uvlinesize;;
|
||||
offset[3]= 0;
|
||||
}
|
||||
|
||||
emms_c();
|
||||
|
||||
s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
|
||||
s->avctx->draw_horiz_band(s->avctx, (AVFrame*)s->current_picture_ptr, offset,
|
||||
y, s->width, h);
|
||||
}
|
||||
}
|
||||
@ -3076,6 +3073,18 @@ int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_mpeg_flush(AVCodecContext *avctx){
|
||||
int i;
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|
||||
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
|
||||
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
|
||||
}
|
||||
s->last_picture_ptr = s->next_picture_ptr = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ENCODERS
|
||||
void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
|
||||
{
|
||||
|
@ -713,6 +713,7 @@ void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w,
|
||||
int src_x, int src_y, int w, int h);
|
||||
#define END_NOT_FOUND -100
|
||||
int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size);
|
||||
void ff_mpeg_flush(AVCodecContext *avctx);
|
||||
void ff_print_debug_info(MpegEncContext *s, Picture *pict);
|
||||
|
||||
void ff_er_frame_start(MpegEncContext *s);
|
||||
|
@ -838,4 +838,5 @@ AVCodec svq1_decoder = {
|
||||
svq1_decode_end,
|
||||
svq1_decode_frame,
|
||||
CODEC_CAP_DR1,
|
||||
.flush= ff_mpeg_flush,
|
||||
};
|
||||
|
@ -590,39 +590,13 @@ void avcodec_init(void)
|
||||
dsputil_static_init();
|
||||
}
|
||||
|
||||
/* this can be called after seeking and before trying to decode the next keyframe */
|
||||
/**
|
||||
* Flush buffers, should be called when seeking or when swicthing to a different stream.
|
||||
*/
|
||||
void avcodec_flush_buffers(AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
switch(avctx->codec_id){
|
||||
case CODEC_ID_MPEG1VIDEO:
|
||||
case CODEC_ID_H263:
|
||||
case CODEC_ID_RV10:
|
||||
// case CODEC_ID_MJPEG:
|
||||
// case CODEC_ID_MJPEGB:
|
||||
case CODEC_ID_MPEG4:
|
||||
case CODEC_ID_MSMPEG4V1:
|
||||
case CODEC_ID_MSMPEG4V2:
|
||||
case CODEC_ID_MSMPEG4V3:
|
||||
case CODEC_ID_WMV1:
|
||||
case CODEC_ID_WMV2:
|
||||
case CODEC_ID_H263P:
|
||||
case CODEC_ID_H263I:
|
||||
case CODEC_ID_FLV1:
|
||||
case CODEC_ID_SVQ1:
|
||||
for(i=0; i<MAX_PICTURE_COUNT; i++){
|
||||
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|
||||
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
|
||||
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
|
||||
}
|
||||
s->last_picture_ptr = s->next_picture_ptr = NULL;
|
||||
break;
|
||||
default:
|
||||
//FIXME
|
||||
break;
|
||||
}
|
||||
if(avctx->codec->flush)
|
||||
avctx->codec->flush(avctx);
|
||||
}
|
||||
|
||||
void avcodec_default_free_buffers(AVCodecContext *s){
|
||||
|
Loading…
x
Reference in New Issue
Block a user