mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-24 03:59:43 +00:00
mpegvideo: Add ff_ prefix to nonstatic functions
Signed-off-by: Martin Storsjö <martin@martin.st>
This commit is contained in:
parent
0ca1bdb37d
commit
efd29844eb
@ -103,7 +103,7 @@ static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block,
|
||||
dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
|
||||
}
|
||||
|
||||
void MPV_common_init_axp(MpegEncContext *s)
|
||||
void ff_MPV_common_init_axp(MpegEncContext *s)
|
||||
{
|
||||
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
|
||||
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;
|
||||
|
@ -38,17 +38,17 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, DCTELEM *block,
|
||||
void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, DCTELEM *block,
|
||||
int n, int qscale);
|
||||
|
||||
void MPV_common_init_arm(MpegEncContext *s)
|
||||
void ff_MPV_common_init_arm(MpegEncContext *s)
|
||||
{
|
||||
/* IWMMXT support is a superset of armv5te, so
|
||||
* allow optimized functions for armv5te unless
|
||||
* a better iwmmxt function exists
|
||||
*/
|
||||
#if HAVE_ARMV5TE
|
||||
MPV_common_init_armv5te(s);
|
||||
ff_MPV_common_init_armv5te(s);
|
||||
#endif
|
||||
#if HAVE_IWMMXT
|
||||
MPV_common_init_iwmmxt(s);
|
||||
ff_MPV_common_init_iwmmxt(s);
|
||||
#endif
|
||||
|
||||
if (HAVE_NEON) {
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
#include "libavcodec/mpegvideo.h"
|
||||
|
||||
void MPV_common_init_iwmmxt(MpegEncContext *s);
|
||||
void MPV_common_init_armv5te(MpegEncContext *s);
|
||||
void ff_MPV_common_init_iwmmxt(MpegEncContext *s);
|
||||
void ff_MPV_common_init_armv5te(MpegEncContext *s);
|
||||
|
||||
#endif /* AVCODEC_ARM_MPEGVIDEO_H */
|
||||
|
@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
|
||||
ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
|
||||
}
|
||||
|
||||
void MPV_common_init_armv5te(MpegEncContext *s)
|
||||
void ff_MPV_common_init_armv5te(MpegEncContext *s)
|
||||
{
|
||||
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
|
||||
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
|
||||
|
@ -93,7 +93,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
|
||||
block_orig[0] = level;
|
||||
}
|
||||
|
||||
void MPV_common_init_iwmmxt(MpegEncContext *s)
|
||||
void ff_MPV_common_init_iwmmxt(MpegEncContext *s)
|
||||
{
|
||||
if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;
|
||||
|
||||
|
@ -141,7 +141,7 @@ static int dct_quantize_bfin (MpegEncContext *s,
|
||||
return last_non_zero;
|
||||
}
|
||||
|
||||
void MPV_common_init_bfin (MpegEncContext *s)
|
||||
void ff_MPV_common_init_bfin (MpegEncContext *s)
|
||||
{
|
||||
/* s->dct_quantize= dct_quantize_bfin; */
|
||||
}
|
||||
|
@ -671,7 +671,7 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
|
||||
AVSContext *h = avctx->priv_data;
|
||||
MpegEncContext * const s = &h->s;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
ff_cavsdsp_init(&h->cdsp, avctx);
|
||||
s->avctx = avctx;
|
||||
|
||||
|
@ -469,7 +469,7 @@ static int decode_pic(AVSContext *h) {
|
||||
|
||||
if (!s->context_initialized) {
|
||||
s->avctx->idct_algo = FF_IDCT_CAVS;
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ static void decode_mb(MpegEncContext *s, int ref)
|
||||
ff_h264_hl_decode_mb(h);
|
||||
} else {
|
||||
assert(ref == 0);
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,9 +89,9 @@ AVCodec ff_flv_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_FLV1,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
|
||||
};
|
||||
|
@ -76,7 +76,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx){
|
||||
MpegEncContext * const s = &h->s;
|
||||
|
||||
// set defaults
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
s->avctx = avctx;
|
||||
|
||||
s->width = s->avctx->coded_width;
|
||||
@ -221,7 +221,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
|
||||
s->mb_skipped = 1;
|
||||
h->mtype &= ~MB_TYPE_H261_FIL;
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -349,7 +349,7 @@ intra:
|
||||
s->block_last_index[i]= -1;
|
||||
}
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
|
||||
return SLICE_OK;
|
||||
}
|
||||
@ -565,7 +565,7 @@ retry:
|
||||
init_get_bits(&s->gb, buf, buf_size*8);
|
||||
|
||||
if(!s->context_initialized){
|
||||
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
|
||||
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -588,7 +588,7 @@ retry:
|
||||
if (s->width != avctx->coded_width || s->height != avctx->coded_height){
|
||||
ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
|
||||
s->parse_context.buffer=0;
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
s->parse_context= pc;
|
||||
}
|
||||
if (!s->context_initialized) {
|
||||
@ -606,7 +606,7 @@ retry:
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return get_consumed_bytes(s, buf_size);
|
||||
|
||||
if(MPV_frame_start(s, avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, avctx) < 0)
|
||||
return -1;
|
||||
|
||||
ff_er_frame_start(s);
|
||||
@ -620,7 +620,7 @@ retry:
|
||||
break;
|
||||
h261_decode_gob(h);
|
||||
}
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
|
||||
assert(s->current_picture.f.pict_type == s->pict_type);
|
||||
@ -637,7 +637,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx)
|
||||
H261Context *h= avctx->priv_data;
|
||||
MpegEncContext *s = &h->s;
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -326,9 +326,9 @@ AVCodec ff_h261_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_H261,
|
||||
.priv_data_size = sizeof(H261Context),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
|
||||
};
|
||||
|
@ -54,7 +54,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
s->workaround_bugs= avctx->workaround_bugs;
|
||||
|
||||
// set defaults
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
s->quant_precision=5;
|
||||
s->decode_mb= ff_h263_decode_mb;
|
||||
s->low_delay= 1;
|
||||
@ -110,7 +110,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
||||
|
||||
/* for h263, we allocate the images after having read the header */
|
||||
if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4)
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
ff_h263_decode_init_vlc(s);
|
||||
@ -122,7 +122,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ static int decode_slice(MpegEncContext *s){
|
||||
if(ret<0){
|
||||
const int xy= s->mb_x + s->mb_y*s->mb_stride;
|
||||
if(ret==SLICE_END){
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
if(s->loop_filter)
|
||||
ff_h263_loop_filter(s);
|
||||
|
||||
@ -232,7 +232,7 @@ static int decode_slice(MpegEncContext *s){
|
||||
if(++s->mb_x >= s->mb_width){
|
||||
s->mb_x=0;
|
||||
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
|
||||
MPV_report_decode_progress(s);
|
||||
ff_MPV_report_decode_progress(s);
|
||||
s->mb_y++;
|
||||
}
|
||||
return 0;
|
||||
@ -247,13 +247,13 @@ static int decode_slice(MpegEncContext *s){
|
||||
return -1;
|
||||
}
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
if(s->loop_filter)
|
||||
ff_h263_loop_filter(s);
|
||||
}
|
||||
|
||||
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
|
||||
MPV_report_decode_progress(s);
|
||||
ff_MPV_report_decode_progress(s);
|
||||
|
||||
s->mb_x= 0;
|
||||
}
|
||||
@ -390,7 +390,7 @@ retry:
|
||||
s->bitstream_buffer_size=0;
|
||||
|
||||
if (!s->context_initialized) {
|
||||
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
|
||||
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -572,7 +572,7 @@ retry:
|
||||
/* H.263 could change picture size any time */
|
||||
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
|
||||
s->parse_context.buffer=0;
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
s->parse_context= pc;
|
||||
}
|
||||
if (!s->context_initialized) {
|
||||
@ -613,7 +613,7 @@ retry:
|
||||
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
|
||||
}
|
||||
|
||||
if(MPV_frame_start(s, avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, avctx) < 0)
|
||||
return -1;
|
||||
|
||||
if (!s->divx_packed) ff_thread_finish_setup(avctx);
|
||||
@ -631,7 +631,7 @@ retry:
|
||||
ff_er_frame_start(s);
|
||||
|
||||
//the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
|
||||
//which is not available before MPV_frame_start()
|
||||
//which is not available before ff_MPV_frame_start()
|
||||
if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){
|
||||
ret = ff_wmv2_decode_secondary_picture_header(s);
|
||||
if(ret<0) return ret;
|
||||
@ -707,7 +707,7 @@ intrax8_decoded:
|
||||
return -1;
|
||||
}
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
|
||||
assert(s->current_picture.f.pict_type == s->pict_type);
|
||||
|
@ -1100,7 +1100,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
|
||||
MpegEncContext * const s = &h->s;
|
||||
int i;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
|
||||
s->avctx = avctx;
|
||||
common_init(h);
|
||||
@ -1281,11 +1281,11 @@ int ff_h264_frame_start(H264Context *h){
|
||||
int i;
|
||||
const int pixel_shift = h->pixel_shift;
|
||||
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
/*
|
||||
* MPV_frame_start uses pict_type to derive key_frame.
|
||||
* ff_MPV_frame_start uses pict_type to derive key_frame.
|
||||
* This is incorrect for H.264; IDR markings must be used.
|
||||
* Zero here; IDR markings per slice in frame or fields are ORed in later.
|
||||
* See decode_nal_units().
|
||||
@ -1319,7 +1319,7 @@ int ff_h264_frame_start(H264Context *h){
|
||||
|
||||
// We mark the current picture as non-reference after allocating it, so
|
||||
// that if we break out due to an error it can be released automatically
|
||||
// in the next MPV_frame_start().
|
||||
// in the next ff_MPV_frame_start().
|
||||
// SVQ3 as well as most other codecs have only last/next/current and thus
|
||||
// get released even with set reference, besides SVQ3 and others do not
|
||||
// mark frames as reference later "naturally".
|
||||
@ -2562,7 +2562,7 @@ static int field_end(H264Context *h, int in_setup){
|
||||
if (!FIELD_PICTURE)
|
||||
ff_er_frame_end(s);
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
h->current_slice=0;
|
||||
|
||||
@ -2625,7 +2625,7 @@ int ff_h264_get_profile(SPS *sps)
|
||||
|
||||
/**
|
||||
* Decode a slice header.
|
||||
* This will also call MPV_common_init() and frame_start() as needed.
|
||||
* This will also call ff_MPV_common_init() and frame_start() as needed.
|
||||
*
|
||||
* @param h h264context
|
||||
* @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
|
||||
@ -2734,7 +2734,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
}
|
||||
if (!s->context_initialized) {
|
||||
if (h != h0) {
|
||||
@ -2806,8 +2806,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
|
||||
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
|
||||
|
||||
if (MPV_common_init(s) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "MPV_common_init() failed.\n");
|
||||
if (ff_MPV_common_init(s) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
s->first_field = 0;
|
||||
@ -4119,7 +4119,7 @@ av_cold int ff_h264_decode_end(AVCodecContext *avctx)
|
||||
|
||||
ff_h264_free_context(h);
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
|
||||
// memset(h, 0, sizeof(H264Context));
|
||||
|
||||
|
@ -191,8 +191,8 @@ AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need t
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_LJPEG,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = encode_picture_lossless,
|
||||
.close = MPV_encode_end,
|
||||
.close = ff_MPV_encode_end,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
|
||||
};
|
||||
|
@ -80,7 +80,7 @@ static void dct_unquantize_h263_mmi(MpegEncContext *s,
|
||||
}
|
||||
|
||||
|
||||
void MPV_common_init_mmi(MpegEncContext *s)
|
||||
void ff_MPV_common_init_mmi(MpegEncContext *s)
|
||||
{
|
||||
s->dct_unquantize_h263_intra =
|
||||
s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;
|
||||
|
@ -450,9 +450,9 @@ AVCodec ff_mjpeg_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_MJPEG,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
|
||||
};
|
||||
|
@ -1117,7 +1117,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
|
||||
for (i = 0; i < 64; i++)
|
||||
s2->dsp.idct_permutation[i]=i;
|
||||
|
||||
MPV_decode_defaults(s2);
|
||||
ff_MPV_decode_defaults(s2);
|
||||
|
||||
s->mpeg_enc_ctx.avctx = avctx;
|
||||
s->mpeg_enc_ctx.flags = avctx->flags;
|
||||
@ -1219,7 +1219,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
|
||||
if (s1->mpeg_enc_ctx_allocated) {
|
||||
ParseContext pc = s->parse_context;
|
||||
s->parse_context.buffer = 0;
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
s->parse_context = pc;
|
||||
}
|
||||
|
||||
@ -1298,7 +1298,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
|
||||
* if DCT permutation is changed. */
|
||||
memcpy(old_permutation, s->dsp.idct_permutation, 64 * sizeof(uint8_t));
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -2;
|
||||
|
||||
quant_matrix_rebuild(s->intra_matrix, old_permutation, s->dsp.idct_permutation);
|
||||
@ -1563,7 +1563,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||
|
||||
/* start frame decoding */
|
||||
if (s->first_field || s->picture_structure == PICT_FRAME) {
|
||||
if (MPV_frame_start(s, avctx) < 0)
|
||||
if (ff_MPV_frame_start(s, avctx) < 0)
|
||||
return -1;
|
||||
|
||||
ff_er_frame_start(s);
|
||||
@ -1753,13 +1753,13 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
|
||||
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
|
||||
if (++s->mb_x >= s->mb_width) {
|
||||
const int mb_size = 16 >> s->avctx->lowres;
|
||||
|
||||
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
|
||||
MPV_report_decode_progress(s);
|
||||
ff_MPV_report_decode_progress(s);
|
||||
|
||||
s->mb_x = 0;
|
||||
s->mb_y += 1 << field_pic;
|
||||
@ -1912,7 +1912,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
|
||||
|
||||
ff_er_frame_end(s);
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = *(AVFrame*)s->current_picture_ptr;
|
||||
@ -2022,7 +2022,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
|
||||
/* start new MPEG-1 context decoding */
|
||||
s->out_format = FMT_MPEG1;
|
||||
if (s1->mpeg_enc_ctx_allocated) {
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
}
|
||||
s->width = avctx->coded_width;
|
||||
s->height = avctx->coded_height;
|
||||
@ -2037,7 +2037,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
|
||||
if (avctx->idct_algo == FF_IDCT_AUTO)
|
||||
avctx->idct_algo = FF_IDCT_SIMPLE;
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
exchange_uv(s); // common init reset pblocks, so we swap them here
|
||||
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
|
||||
@ -2478,7 +2478,7 @@ static int mpeg_decode_end(AVCodecContext *avctx)
|
||||
Mpeg1Context *s = avctx->priv_data;
|
||||
|
||||
if (s->mpeg_enc_ctx_allocated)
|
||||
MPV_common_end(&s->mpeg_enc_ctx);
|
||||
ff_MPV_common_end(&s->mpeg_enc_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
if(MPV_encode_init(avctx) < 0)
|
||||
if(ff_MPV_encode_init(avctx) < 0)
|
||||
return -1;
|
||||
|
||||
if(find_frame_rate_index(s) < 0){
|
||||
@ -954,8 +954,8 @@ AVCodec ff_mpeg1video_encoder = {
|
||||
.id = CODEC_ID_MPEG1VIDEO,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.supported_framerates= avpriv_frame_rate_tab+1,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
|
||||
@ -969,8 +969,8 @@ AVCodec ff_mpeg2video_encoder = {
|
||||
.id = CODEC_ID_MPEG2VIDEO,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.supported_framerates= avpriv_frame_rate_tab+1,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
|
||||
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
|
||||
|
@ -1222,7 +1222,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
int ret;
|
||||
static int done = 0;
|
||||
|
||||
if((ret=MPV_encode_init(avctx)) < 0)
|
||||
if((ret=ff_MPV_encode_init(avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
if (!done) {
|
||||
@ -1336,8 +1336,8 @@ AVCodec ff_mpeg4_encoder = {
|
||||
.id = CODEC_ID_MPEG4,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
|
||||
|
@ -188,17 +188,17 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
|
||||
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
|
||||
|
||||
#if HAVE_MMX
|
||||
MPV_common_init_mmx(s);
|
||||
ff_MPV_common_init_mmx(s);
|
||||
#elif ARCH_ALPHA
|
||||
MPV_common_init_axp(s);
|
||||
ff_MPV_common_init_axp(s);
|
||||
#elif HAVE_MMI
|
||||
MPV_common_init_mmi(s);
|
||||
ff_MPV_common_init_mmi(s);
|
||||
#elif ARCH_ARM
|
||||
MPV_common_init_arm(s);
|
||||
ff_MPV_common_init_arm(s);
|
||||
#elif HAVE_ALTIVEC
|
||||
MPV_common_init_altivec(s);
|
||||
ff_MPV_common_init_altivec(s);
|
||||
#elif ARCH_BFIN
|
||||
MPV_common_init_bfin(s);
|
||||
ff_MPV_common_init_bfin(s);
|
||||
#endif
|
||||
|
||||
/* load & permutate scantables
|
||||
@ -458,7 +458,7 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
return -1; // free() through MPV_common_end()
|
||||
return -1; // free() through ff_MPV_common_end()
|
||||
}
|
||||
|
||||
static void free_duplicate_context(MpegEncContext *s)
|
||||
@ -543,7 +543,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
|
||||
s->bitstream_buffer = NULL;
|
||||
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
|
||||
|
||||
MPV_common_init(s);
|
||||
ff_MPV_common_init(s);
|
||||
}
|
||||
|
||||
s->avctx->coded_height = s1->avctx->coded_height;
|
||||
@ -615,7 +615,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
|
||||
* The changed fields will not depend upon the
|
||||
* prior state of the MpegEncContext.
|
||||
*/
|
||||
void MPV_common_defaults(MpegEncContext *s)
|
||||
void ff_MPV_common_defaults(MpegEncContext *s)
|
||||
{
|
||||
s->y_dc_scale_table =
|
||||
s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
|
||||
@ -644,16 +644,16 @@ void MPV_common_defaults(MpegEncContext *s)
|
||||
* the changed fields will not depend upon
|
||||
* the prior state of the MpegEncContext.
|
||||
*/
|
||||
void MPV_decode_defaults(MpegEncContext *s)
|
||||
void ff_MPV_decode_defaults(MpegEncContext *s)
|
||||
{
|
||||
MPV_common_defaults(s);
|
||||
ff_MPV_common_defaults(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* init common structure for both encoder and decoder.
|
||||
* this assumes that some variables like width/height are already set
|
||||
*/
|
||||
av_cold int MPV_common_init(MpegEncContext *s)
|
||||
av_cold int ff_MPV_common_init(MpegEncContext *s)
|
||||
{
|
||||
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
|
||||
int nb_slices = (HAVE_THREADS &&
|
||||
@ -913,12 +913,12 @@ av_cold int MPV_common_init(MpegEncContext *s)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* init common structure for both encoder and decoder */
|
||||
void MPV_common_end(MpegEncContext *s)
|
||||
void ff_MPV_common_end(MpegEncContext *s)
|
||||
{
|
||||
int i, j, k;
|
||||
|
||||
@ -1158,7 +1158,7 @@ static void update_noise_reduction(MpegEncContext *s)
|
||||
* generic function for encode/decode called after coding/decoding
|
||||
* the header and before a frame is coded/decoded.
|
||||
*/
|
||||
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
{
|
||||
int i;
|
||||
Picture *pic;
|
||||
@ -1347,7 +1347,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
|
||||
/* generic function for encode/decode called after a
|
||||
* frame has been coded/decoded. */
|
||||
void MPV_frame_end(MpegEncContext *s)
|
||||
void ff_MPV_frame_end(MpegEncContext *s)
|
||||
{
|
||||
int i;
|
||||
/* redraw edges for the frame if decoding didn't complete */
|
||||
@ -2175,7 +2175,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
/**
|
||||
* find the lowest MB row referenced in the MVs
|
||||
*/
|
||||
int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
|
||||
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
|
||||
{
|
||||
int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
|
||||
int my, off, i, mvs;
|
||||
@ -2365,10 +2365,10 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
|
||||
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
|
||||
ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0);
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
|
||||
ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2519,7 +2519,7 @@ skip_idct:
|
||||
}
|
||||
}
|
||||
|
||||
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
|
||||
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
|
||||
#if !CONFIG_SMALL
|
||||
if(s->out_format == FMT_MPEG1) {
|
||||
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
|
||||
@ -2893,7 +2893,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
|
||||
s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
|
||||
}
|
||||
|
||||
void MPV_report_decode_progress(MpegEncContext *s)
|
||||
void ff_MPV_report_decode_progress(MpegEncContext *s)
|
||||
{
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
|
||||
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
|
||||
|
@ -686,21 +686,21 @@ typedef struct MpegEncContext {
|
||||
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
|
||||
: NULL)
|
||||
|
||||
void MPV_decode_defaults(MpegEncContext *s);
|
||||
int MPV_common_init(MpegEncContext *s);
|
||||
void MPV_common_end(MpegEncContext *s);
|
||||
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
|
||||
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
|
||||
void MPV_frame_end(MpegEncContext *s);
|
||||
int MPV_encode_init(AVCodecContext *avctx);
|
||||
int MPV_encode_end(AVCodecContext *avctx);
|
||||
int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
|
||||
void MPV_common_init_mmx(MpegEncContext *s);
|
||||
void MPV_common_init_axp(MpegEncContext *s);
|
||||
void MPV_common_init_mmi(MpegEncContext *s);
|
||||
void MPV_common_init_arm(MpegEncContext *s);
|
||||
void MPV_common_init_altivec(MpegEncContext *s);
|
||||
void MPV_common_init_bfin(MpegEncContext *s);
|
||||
void ff_MPV_decode_defaults(MpegEncContext *s);
|
||||
int ff_MPV_common_init(MpegEncContext *s);
|
||||
void ff_MPV_common_end(MpegEncContext *s);
|
||||
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
|
||||
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
|
||||
void ff_MPV_frame_end(MpegEncContext *s);
|
||||
int ff_MPV_encode_init(AVCodecContext *avctx);
|
||||
int ff_MPV_encode_end(AVCodecContext *avctx);
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
|
||||
void ff_MPV_common_init_mmx(MpegEncContext *s);
|
||||
void ff_MPV_common_init_axp(MpegEncContext *s);
|
||||
void ff_MPV_common_init_mmi(MpegEncContext *s);
|
||||
void ff_MPV_common_init_arm(MpegEncContext *s);
|
||||
void ff_MPV_common_init_altivec(MpegEncContext *s);
|
||||
void ff_MPV_common_init_bfin(MpegEncContext *s);
|
||||
void ff_clean_intra_table_entries(MpegEncContext *s);
|
||||
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
|
||||
void ff_mpeg_flush(AVCodecContext *avctx);
|
||||
@ -710,8 +710,8 @@ void ff_release_unused_pictures(MpegEncContext *s, int remove_current);
|
||||
int ff_find_unused_picture(MpegEncContext *s, int shared);
|
||||
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
|
||||
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
|
||||
int MPV_lowest_referenced_row(MpegEncContext *s, int dir);
|
||||
void MPV_report_decode_progress(MpegEncContext *s);
|
||||
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
|
||||
void ff_MPV_report_decode_progress(MpegEncContext *s);
|
||||
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
|
||||
const uint8_t *avpriv_mpv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state);
|
||||
void ff_set_qscale(MpegEncContext * s, int qscale);
|
||||
|
@ -45,7 +45,7 @@ int ff_dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int
|
||||
* Set the given MpegEncContext to common defaults (same for encoding and decoding).
|
||||
* The changed fields will not depend upon the prior state of the MpegEncContext.
|
||||
*/
|
||||
void MPV_common_defaults(MpegEncContext *s);
|
||||
void ff_MPV_common_defaults(MpegEncContext *s);
|
||||
|
||||
static inline void gmc1_motion(MpegEncContext *s,
|
||||
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
|
@ -264,7 +264,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst,
|
||||
static void MPV_encode_defaults(MpegEncContext *s)
|
||||
{
|
||||
int i;
|
||||
MPV_common_defaults(s);
|
||||
ff_MPV_common_defaults(s);
|
||||
|
||||
for (i = -16; i < 16; i++) {
|
||||
default_fcode_tab[i + MAX_MV] = 1;
|
||||
@ -274,7 +274,7 @@ static void MPV_encode_defaults(MpegEncContext *s)
|
||||
}
|
||||
|
||||
/* init video encoder */
|
||||
av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
int i;
|
||||
@ -764,7 +764,7 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
s->alternate_scan);
|
||||
|
||||
/* init */
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
if (!s->dct_quantize)
|
||||
@ -831,13 +831,13 @@ av_cold int MPV_encode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
av_cold int MPV_encode_end(AVCodecContext *avctx)
|
||||
av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
ff_rate_control_uninit(s);
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
|
||||
s->out_format == FMT_MJPEG)
|
||||
ff_mjpeg_encode_close(s);
|
||||
@ -1373,8 +1373,8 @@ no_output_pic:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int MPV_encode_picture(AVCodecContext *avctx,
|
||||
unsigned char *buf, int buf_size, void *data)
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx,
|
||||
unsigned char *buf, int buf_size, void *data)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
AVFrame *pic_arg = data;
|
||||
@ -1406,7 +1406,7 @@ int MPV_encode_picture(AVCodecContext *avctx,
|
||||
//emms_c();
|
||||
//printf("qs:%f %f %d\n", s->new_picture.quality,
|
||||
// s->current_picture.quality, s->qscale);
|
||||
MPV_frame_start(s, avctx);
|
||||
ff_MPV_frame_start(s, avctx);
|
||||
vbv_retry:
|
||||
if (encode_picture(s, s->picture_number) < 0)
|
||||
return -1;
|
||||
@ -1421,7 +1421,7 @@ vbv_retry:
|
||||
avctx->p_count = s->mb_num - s->i_count - s->skip_count;
|
||||
avctx->skip_count = s->skip_count;
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
|
||||
ff_mjpeg_encode_picture_trailer(s);
|
||||
@ -2137,7 +2137,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
|
||||
}
|
||||
|
||||
if(s->avctx->mb_decision == FF_MB_DECISION_RD){
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
|
||||
score *= s->lambda2;
|
||||
score += sse_mb(s) << FF_LAMBDA_SHIFT;
|
||||
@ -2743,7 +2743,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
}
|
||||
|
||||
if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
} else {
|
||||
int motion_x = 0, motion_y = 0;
|
||||
s->mv_type=MV_TYPE_16X16;
|
||||
@ -2863,7 +2863,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
|
||||
ff_h263_update_motion_val(s);
|
||||
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
}
|
||||
|
||||
/* clean the MV table in IPS frames for direct mode in B frames */
|
||||
@ -4040,9 +4040,9 @@ AVCodec ff_h263_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_H263,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
|
||||
.priv_class = &h263_class,
|
||||
@ -4067,9 +4067,9 @@ AVCodec ff_h263p_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_H263P,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.capabilities = CODEC_CAP_SLICE_THREADS,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
|
||||
@ -4081,9 +4081,9 @@ AVCodec ff_msmpeg4v2_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_MSMPEG4V2,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
|
||||
};
|
||||
@ -4093,9 +4093,9 @@ AVCodec ff_msmpeg4v3_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_MSMPEG4V3,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
|
||||
};
|
||||
@ -4105,9 +4105,9 @@ AVCodec ff_wmv1_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_WMV1,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
|
||||
};
|
||||
|
@ -554,7 +554,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
|
||||
}
|
||||
|
||||
|
||||
void MPV_common_init_altivec(MpegEncContext *s)
|
||||
void ff_MPV_common_init_altivec(MpegEncContext *s)
|
||||
{
|
||||
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return;
|
||||
|
||||
|
@ -351,11 +351,11 @@ static int rv20_decode_picture_header(MpegEncContext *s)
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
|
||||
if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0)
|
||||
return -1;
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
avcodec_set_dimensions(s->avctx, new_w, new_h);
|
||||
s->width = new_w;
|
||||
s->height = new_h;
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -427,7 +427,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
|
||||
s->avctx= avctx;
|
||||
s->out_format = FMT_H263;
|
||||
@ -468,7 +468,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
ff_h263_decode_init_vlc(s);
|
||||
@ -491,7 +491,7 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -526,10 +526,10 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
|
||||
}
|
||||
if(MPV_frame_start(s, avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
} else {
|
||||
@ -596,7 +596,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
|
||||
}
|
||||
if(s->pict_type != AV_PICTURE_TYPE_B)
|
||||
ff_h263_update_motion_val(s);
|
||||
MPV_decode_mb(s, s->block);
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
if(s->loop_filter)
|
||||
ff_h263_loop_filter(s);
|
||||
|
||||
@ -674,7 +674,7 @@ static int rv10_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||
|
@ -61,9 +61,9 @@ AVCodec ff_rv10_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_RV10,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
|
||||
};
|
||||
|
@ -62,9 +62,9 @@ AVCodec ff_rv20_encoder = {
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_RV20,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = MPV_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
|
||||
};
|
||||
|
@ -1427,17 +1427,17 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
r->si.width, r->si.height);
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
s->height = r->si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
if ((err = ff_MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
if (!r->tmp_b_block_base) {
|
||||
@ -1541,7 +1541,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
MpegEncContext *s = &r->s;
|
||||
int ret;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
s->avctx = avctx;
|
||||
s->out_format = FMT_H263;
|
||||
s->codec_id = avctx->codec_id;
|
||||
@ -1556,7 +1556,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
avctx->has_b_frames = 1;
|
||||
s->low_delay = 0;
|
||||
|
||||
if ((ret = MPV_common_init(s)) < 0)
|
||||
if ((ret = ff_MPV_common_init(s)) < 0)
|
||||
return ret;
|
||||
|
||||
ff_h264_pred_init(&r->h, CODEC_ID_RV40, 8, 1);
|
||||
@ -1588,7 +1588,7 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
|
||||
|
||||
if (avctx->internal->is_copy) {
|
||||
r->tmp_b_block_base = NULL;
|
||||
if ((err = MPV_common_init(&r->s)) < 0)
|
||||
if ((err = ff_MPV_common_init(&r->s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_alloc(r)) < 0)
|
||||
return err;
|
||||
@ -1606,10 +1606,10 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
return 0;
|
||||
|
||||
if (s->height != s1->height || s->width != s1->width) {
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
s->height = s1->height;
|
||||
s->width = s1->width;
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
if ((err = ff_MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
@ -1625,7 +1625,7 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
memset(&r->si, 0, sizeof(r->si));
|
||||
|
||||
/* necessary since it is it the condition checked for in decode_slice
|
||||
* to call MPV_frame_start. cmp. comment at the end of decode_frame */
|
||||
* to call ff_MPV_frame_start. cmp. comment at the end of decode_frame */
|
||||
s->current_picture_ptr = NULL;
|
||||
|
||||
return 0;
|
||||
@ -1737,7 +1737,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
@ -1761,7 +1761,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
|
||||
MPV_common_end(&r->s);
|
||||
ff_MPV_common_end(&r->s);
|
||||
rv34_decoder_free(r);
|
||||
|
||||
return 0;
|
||||
|
@ -670,7 +670,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return buf_size;
|
||||
|
||||
if(MPV_frame_start(s, avctx) < 0)
|
||||
if(ff_MPV_frame_start(s, avctx) < 0)
|
||||
return -1;
|
||||
|
||||
pmv = av_malloc((FFALIGN(s->width, 16)/8 + 3) * sizeof(*pmv));
|
||||
@ -738,7 +738,7 @@ static int svq1_decode_frame(AVCodecContext *avctx,
|
||||
*pict = *(AVFrame*)&s->current_picture;
|
||||
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
*data_size=sizeof(AVFrame);
|
||||
result = buf_size;
|
||||
@ -753,7 +753,7 @@ static av_cold int svq1_decode_init(AVCodecContext *avctx)
|
||||
int i;
|
||||
int offset = 0;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
ff_MPV_decode_defaults(s);
|
||||
|
||||
s->avctx = avctx;
|
||||
s->width = (avctx->width+3)&~3;
|
||||
@ -762,7 +762,7 @@ static av_cold int svq1_decode_init(AVCodecContext *avctx)
|
||||
avctx->pix_fmt = PIX_FMT_YUV410P;
|
||||
avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
|
||||
s->flags= avctx->flags;
|
||||
if (MPV_common_init(s) < 0) return -1;
|
||||
if (ff_MPV_common_init(s) < 0) return -1;
|
||||
|
||||
INIT_VLC_STATIC(&svq1_block_type, 2, 4,
|
||||
&ff_svq1_block_type_vlc[0][1], 2, 1,
|
||||
@ -804,7 +804,7 @@ static av_cold int svq1_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -928,7 +928,7 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
if (ff_MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
|
||||
h->b_stride = 4*s->mb_width;
|
||||
@ -1073,7 +1073,7 @@ static int svq3_decode_frame(AVCodecContext *avctx,
|
||||
ff_draw_horiz_band(s, 16*s->mb_y, 16);
|
||||
}
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*(AVFrame *) data = *(AVFrame *) &s->current_picture;
|
||||
@ -1097,7 +1097,7 @@ static int svq3_decode_end(AVCodecContext *avctx)
|
||||
|
||||
ff_h264_free_context(h);
|
||||
|
||||
MPV_common_end(s);
|
||||
ff_MPV_common_end(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5396,7 +5396,7 @@ static av_cold int vc1_decode_end(AVCodecContext *avctx)
|
||||
av_freep(&v->sr_rows[i >> 1][i & 1]);
|
||||
av_freep(&v->hrd_rate);
|
||||
av_freep(&v->hrd_buffer);
|
||||
MPV_common_end(&v->s);
|
||||
ff_MPV_common_end(&v->s);
|
||||
av_freep(&v->mv_type_mb_plane);
|
||||
av_freep(&v->direct_mb_plane);
|
||||
av_freep(&v->forward_mb_plane);
|
||||
@ -5648,7 +5648,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
s->next_p_frame_damaged = 0;
|
||||
}
|
||||
|
||||
if (MPV_frame_start(s, avctx) < 0) {
|
||||
if (ff_MPV_frame_start(s, avctx) < 0) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -5732,7 +5732,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
ff_er_frame_end(s);
|
||||
}
|
||||
|
||||
MPV_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
|
||||
image:
|
||||
|
@ -55,7 +55,7 @@ static int encode_ext_header(Wmv2Context *w){
|
||||
static av_cold int wmv2_encode_init(AVCodecContext *avctx){
|
||||
Wmv2Context * const w= avctx->priv_data;
|
||||
|
||||
if(MPV_encode_init(avctx) < 0)
|
||||
if(ff_MPV_encode_init(avctx) < 0)
|
||||
return -1;
|
||||
|
||||
ff_wmv2_common_init(w);
|
||||
@ -217,8 +217,8 @@ AVCodec ff_wmv2_encoder = {
|
||||
.id = CODEC_ID_WMV2,
|
||||
.priv_data_size = sizeof(Wmv2Context),
|
||||
.init = wmv2_encode_init,
|
||||
.encode = MPV_encode_picture,
|
||||
.close = MPV_encode_end,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
|
||||
};
|
||||
|
@ -626,7 +626,7 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){
|
||||
#include "mpegvideo_mmx_template.c"
|
||||
#endif
|
||||
|
||||
void MPV_common_init_mmx(MpegEncContext *s)
|
||||
void ff_MPV_common_init_mmx(MpegEncContext *s)
|
||||
{
|
||||
int mm_flags = av_get_cpu_flags();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user