mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-24 03:59:43 +00:00
various security fixes and precautionary checks
Originally committed as revision 3822 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
f14d4e7e21
commit
0ecca7a49f
@ -35,10 +35,10 @@ void show_help_options(const OptionDef *options, const char *msg, int mask, int
|
|||||||
printf("%s", msg);
|
printf("%s", msg);
|
||||||
first = 0;
|
first = 0;
|
||||||
}
|
}
|
||||||
strcpy(buf, po->name);
|
pstrcpy(buf, sizeof(buf), po->name);
|
||||||
if (po->flags & HAS_ARG) {
|
if (po->flags & HAS_ARG) {
|
||||||
strcat(buf, " ");
|
pstrcat(buf, sizeof(buf), " ");
|
||||||
strcat(buf, po->argname);
|
pstrcat(buf, sizeof(buf), po->argname);
|
||||||
}
|
}
|
||||||
printf("-%-17s %s\n", buf, po->help);
|
printf("-%-17s %s\n", buf, po->help);
|
||||||
}
|
}
|
||||||
|
18
ffmpeg.c
18
ffmpeg.c
@ -1020,21 +1020,21 @@ static void print_report(AVFormatContext **output_files,
|
|||||||
os = output_files[ost->file_index];
|
os = output_files[ost->file_index];
|
||||||
enc = &ost->st->codec;
|
enc = &ost->st->codec;
|
||||||
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
sprintf(buf + strlen(buf), "q=%2.1f ",
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
|
||||||
enc->coded_frame->quality/(float)FF_QP2LAMBDA);
|
enc->coded_frame->quality/(float)FF_QP2LAMBDA);
|
||||||
}
|
}
|
||||||
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
|
||||||
frame_number = ost->frame_number;
|
frame_number = ost->frame_number;
|
||||||
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d q=%2.1f ",
|
||||||
frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
|
frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
|
||||||
if(is_last_report)
|
if(is_last_report)
|
||||||
sprintf(buf + strlen(buf), "L");
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
|
||||||
if (enc->flags&CODEC_FLAG_PSNR){
|
if (enc->flags&CODEC_FLAG_PSNR){
|
||||||
int j;
|
int j;
|
||||||
double error, error_sum=0;
|
double error, error_sum=0;
|
||||||
double scale, scale_sum=0;
|
double scale, scale_sum=0;
|
||||||
char type[3]= {'Y','U','V'};
|
char type[3]= {'Y','U','V'};
|
||||||
sprintf(buf + strlen(buf), "PSNR=");
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
|
||||||
for(j=0; j<3; j++){
|
for(j=0; j<3; j++){
|
||||||
if(is_last_report){
|
if(is_last_report){
|
||||||
error= enc->error[j];
|
error= enc->error[j];
|
||||||
@ -1046,9 +1046,9 @@ static void print_report(AVFormatContext **output_files,
|
|||||||
if(j) scale/=4;
|
if(j) scale/=4;
|
||||||
error_sum += error;
|
error_sum += error;
|
||||||
scale_sum += scale;
|
scale_sum += scale;
|
||||||
sprintf(buf + strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
|
||||||
}
|
}
|
||||||
sprintf(buf + strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
|
||||||
}
|
}
|
||||||
vid = 1;
|
vid = 1;
|
||||||
}
|
}
|
||||||
@ -1063,12 +1063,12 @@ static void print_report(AVFormatContext **output_files,
|
|||||||
if (verbose || is_last_report) {
|
if (verbose || is_last_report) {
|
||||||
bitrate = (double)(total_size * 8) / ti1 / 1000.0;
|
bitrate = (double)(total_size * 8) / ti1 / 1000.0;
|
||||||
|
|
||||||
sprintf(buf + strlen(buf),
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||||
"size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
|
"size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
|
||||||
(double)total_size / 1024, ti1, bitrate);
|
(double)total_size / 1024, ti1, bitrate);
|
||||||
|
|
||||||
if (verbose > 1)
|
if (verbose > 1)
|
||||||
sprintf(buf + strlen(buf), " dup=%d drop=%d",
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
|
||||||
nb_frames_dup, nb_frames_drop);
|
nb_frames_dup, nb_frames_drop);
|
||||||
|
|
||||||
if (verbose >= 0)
|
if (verbose >= 0)
|
||||||
@ -3331,7 +3331,7 @@ static void opt_output_file(const char *filename)
|
|||||||
|
|
||||||
output_files[nb_output_files++] = oc;
|
output_files[nb_output_files++] = oc;
|
||||||
|
|
||||||
strcpy(oc->filename, filename);
|
pstrcpy(oc->filename, sizeof(oc->filename), filename);
|
||||||
|
|
||||||
/* check filename in case of an image number is expected */
|
/* check filename in case of an image number is expected */
|
||||||
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
||||||
|
@ -323,13 +323,19 @@ static int decode_p_frame(FourXContext *f, uint8_t *buf, int length){
|
|||||||
uint16_t *src= (uint16_t*)f->last_picture.data[0];
|
uint16_t *src= (uint16_t*)f->last_picture.data[0];
|
||||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||||
const int stride= f->current_picture.linesize[0]>>1;
|
const int stride= f->current_picture.linesize[0]>>1;
|
||||||
const int bitstream_size= get32(buf+8);
|
const unsigned int bitstream_size= get32(buf+8);
|
||||||
const int bytestream_size= get32(buf+16);
|
const unsigned int bytestream_size= get32(buf+16);
|
||||||
const int wordstream_size= get32(buf+12);
|
const unsigned int wordstream_size= get32(buf+12);
|
||||||
|
|
||||||
if(bitstream_size+ bytestream_size+ wordstream_size + 20 != length)
|
if(bitstream_size+ bytestream_size+ wordstream_size + 20 != length
|
||||||
|
|| bitstream_size > (1<<26)
|
||||||
|
|| bytestream_size > (1<<26)
|
||||||
|
|| wordstream_size > (1<<26)
|
||||||
|
){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
|
||||||
bitstream_size+ bytestream_size+ wordstream_size - length);
|
bitstream_size+ bytestream_size+ wordstream_size - length);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
f->bitstream_buffer= av_fast_realloc(f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
f->bitstream_buffer= av_fast_realloc(f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
f->dsp.bswap_buf((uint32_t*)f->bitstream_buffer, (uint32_t*)(buf + 20), bitstream_size/4);
|
f->dsp.bswap_buf((uint32_t*)f->bitstream_buffer, (uint32_t*)(buf + 20), bitstream_size/4);
|
||||||
@ -550,13 +556,17 @@ static int decode_i_frame(FourXContext *f, uint8_t *buf, int length){
|
|||||||
const int height= f->avctx->height;
|
const int height= f->avctx->height;
|
||||||
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
uint16_t *dst= (uint16_t*)f->current_picture.data[0];
|
||||||
const int stride= f->current_picture.linesize[0]>>1;
|
const int stride= f->current_picture.linesize[0]>>1;
|
||||||
const int bitstream_size= get32(buf);
|
const unsigned int bitstream_size= get32(buf);
|
||||||
const int token_count __attribute__((unused)) = get32(buf + bitstream_size + 8);
|
const int token_count __attribute__((unused)) = get32(buf + bitstream_size + 8);
|
||||||
int prestream_size= 4*get32(buf + bitstream_size + 4);
|
unsigned int prestream_size= 4*get32(buf + bitstream_size + 4);
|
||||||
uint8_t *prestream= buf + bitstream_size + 12;
|
uint8_t *prestream= buf + bitstream_size + 12;
|
||||||
|
|
||||||
if(prestream_size + bitstream_size + 12 != length)
|
if(prestream_size + bitstream_size + 12 != length
|
||||||
|
|| bitstream_size > (1<<26)
|
||||||
|
|| prestream_size > (1<<26)){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d %d\n", prestream_size, bitstream_size, length);
|
av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d %d\n", prestream_size, bitstream_size, length);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
prestream= read_huffman_tables(f, prestream);
|
prestream= read_huffman_tables(f, prestream);
|
||||||
|
|
||||||
|
@ -339,8 +339,13 @@ static inline int decode_mb(ASV1Context *a, DCTELEM block[6][64]){
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void encode_mb(ASV1Context *a, DCTELEM block[6][64]){
|
static inline int encode_mb(ASV1Context *a, DCTELEM block[6][64]){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if(a->pb.buf_end - a->pb.buf - (put_bits_count(&a->pb)>>3) < 30*16*16*3/2/8){
|
||||||
|
av_log(a->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if(a->avctx->codec_id == CODEC_ID_ASV1){
|
if(a->avctx->codec_id == CODEC_ID_ASV1){
|
||||||
for(i=0; i<6; i++)
|
for(i=0; i<6; i++)
|
||||||
@ -349,6 +354,7 @@ static inline void encode_mb(ASV1Context *a, DCTELEM block[6][64]){
|
|||||||
for(i=0; i<6; i++)
|
for(i=0; i<6; i++)
|
||||||
asv2_encode_block(a, block[i]);
|
asv2_encode_block(a, block[i]);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
|
static inline void idct_put(ASV1Context *a, int mb_x, int mb_y){
|
||||||
|
@ -17,7 +17,7 @@ extern "C" {
|
|||||||
|
|
||||||
#define FFMPEG_VERSION_INT 0x000409
|
#define FFMPEG_VERSION_INT 0x000409
|
||||||
#define FFMPEG_VERSION "0.4.9-pre1"
|
#define FFMPEG_VERSION "0.4.9-pre1"
|
||||||
#define LIBAVCODEC_BUILD 4736
|
#define LIBAVCODEC_BUILD 4737
|
||||||
|
|
||||||
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
|
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
|
||||||
#define LIBAVCODEC_VERSION FFMPEG_VERSION
|
#define LIBAVCODEC_VERSION FFMPEG_VERSION
|
||||||
@ -235,6 +235,12 @@ enum SampleFormat {
|
|||||||
*/
|
*/
|
||||||
#define FF_INPUT_BUFFER_PADDING_SIZE 8
|
#define FF_INPUT_BUFFER_PADDING_SIZE 8
|
||||||
|
|
||||||
|
/**
|
||||||
|
* minimum encoding buffer size.
|
||||||
|
* used to avoid some checks during header writing
|
||||||
|
*/
|
||||||
|
#define FF_MIN_BUFFER_SIZE 16384
|
||||||
|
|
||||||
/* motion estimation type, EPZS by default */
|
/* motion estimation type, EPZS by default */
|
||||||
enum Motion_Est_ID {
|
enum Motion_Est_ID {
|
||||||
ME_ZERO = 1,
|
ME_ZERO = 1,
|
||||||
@ -2112,6 +2118,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
|
|||||||
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
|
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
|
||||||
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
|
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
|
||||||
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
|
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
|
||||||
|
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h);
|
||||||
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
|
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
|
||||||
|
|
||||||
int avcodec_thread_init(AVCodecContext *s, int thread_count);
|
int avcodec_thread_init(AVCodecContext *s, int thread_count);
|
||||||
|
@ -931,7 +931,9 @@ static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
|
|||||||
s->sys = dv_codec_profile(c);
|
s->sys = dv_codec_profile(c);
|
||||||
if (!s->sys)
|
if (!s->sys)
|
||||||
return -1;
|
return -1;
|
||||||
|
if(buf_size < s->sys->frame_size)
|
||||||
|
return -1;
|
||||||
|
|
||||||
c->pix_fmt = s->sys->pix_fmt;
|
c->pix_fmt = s->sys->pix_fmt;
|
||||||
s->picture = *((AVFrame *)data);
|
s->picture = *((AVFrame *)data);
|
||||||
|
|
||||||
|
@ -354,7 +354,7 @@ static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
|
static inline int encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
|
||||||
PlaneContext * const p= &s->plane[plane_index];
|
PlaneContext * const p= &s->plane[plane_index];
|
||||||
RangeCoder * const c= &s->c;
|
RangeCoder * const c= &s->c;
|
||||||
int x;
|
int x;
|
||||||
@ -362,6 +362,18 @@ static inline void encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], i
|
|||||||
int run_count=0;
|
int run_count=0;
|
||||||
int run_mode=0;
|
int run_mode=0;
|
||||||
|
|
||||||
|
if(s->ac){
|
||||||
|
if(c->bytestream_end - c->bytestream < w*20){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}else{
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for(x=0; x<w; x++){
|
for(x=0; x<w; x++){
|
||||||
int diff, context;
|
int diff, context;
|
||||||
|
|
||||||
@ -416,6 +428,8 @@ static inline void encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], i
|
|||||||
put_bits(&s->pb, 1, 1);
|
put_bits(&s->pb, 1, 1);
|
||||||
}
|
}
|
||||||
s->run_index= run_index;
|
s->run_index= run_index;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
|
static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
|
||||||
@ -896,7 +910,7 @@ static int read_header(FFV1Context *f){
|
|||||||
context_count=1;
|
context_count=1;
|
||||||
for(i=0; i<5; i++){
|
for(i=0; i<5; i++){
|
||||||
context_count*= read_quant_table(c, f->quant_table[i], context_count);
|
context_count*= read_quant_table(c, f->quant_table[i], context_count);
|
||||||
if(context_count < 0){
|
if(context_count < 0 || context_count > 32768){
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,7 @@ static int flic_decode_frame(AVCodecContext *avctx,
|
|||||||
for (j = 0; j < color_changes; j++) {
|
for (j = 0; j < color_changes; j++) {
|
||||||
|
|
||||||
/* wrap around, for good measure */
|
/* wrap around, for good measure */
|
||||||
if (palette_ptr >= 256)
|
if ((unsigned)palette_ptr >= 256)
|
||||||
palette_ptr = 0;
|
palette_ptr = 0;
|
||||||
|
|
||||||
r = buf[stream_ptr++] << color_shift;
|
r = buf[stream_ptr++] << color_shift;
|
||||||
|
@ -6115,7 +6115,7 @@ int flv_h263_decode_picture_header(MpegEncContext *s)
|
|||||||
width = height = 0;
|
width = height = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if ((width == 0) || (height == 0))
|
if(avcodec_check_dimensions(s->avctx, width, height))
|
||||||
return -1;
|
return -1;
|
||||||
s->width = width;
|
s->width = width;
|
||||||
s->height = height;
|
s->height = height;
|
||||||
|
@ -724,9 +724,8 @@ retry:
|
|||||||
if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_packed){
|
if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_packed){
|
||||||
int current_pos= get_bits_count(&s->gb)>>3;
|
int current_pos= get_bits_count(&s->gb)>>3;
|
||||||
int startcode_found=0;
|
int startcode_found=0;
|
||||||
|
|
||||||
if( buf_size - current_pos > 5
|
if(buf_size - current_pos > 5){
|
||||||
&& buf_size - current_pos < BITSTREAM_BUFFER_SIZE){
|
|
||||||
int i;
|
int i;
|
||||||
for(i=current_pos; i<buf_size-3; i++){
|
for(i=current_pos; i<buf_size-3; i++){
|
||||||
if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){
|
if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){
|
||||||
@ -741,6 +740,10 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(startcode_found){
|
if(startcode_found){
|
||||||
|
s->bitstream_buffer= av_fast_realloc(
|
||||||
|
s->bitstream_buffer,
|
||||||
|
&s->allocated_bitstream_buffer_size,
|
||||||
|
buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos);
|
||||||
s->bitstream_buffer_size= buf_size - current_pos;
|
s->bitstream_buffer_size= buf_size - current_pos;
|
||||||
}
|
}
|
||||||
|
@ -5862,6 +5862,10 @@ static inline int decode_seq_parameter_set(H264Context *h){
|
|||||||
sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb);
|
sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb);
|
||||||
sps->mb_width= get_ue_golomb(&s->gb) + 1;
|
sps->mb_width= get_ue_golomb(&s->gb) + 1;
|
||||||
sps->mb_height= get_ue_golomb(&s->gb) + 1;
|
sps->mb_height= get_ue_golomb(&s->gb) + 1;
|
||||||
|
if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 ||
|
||||||
|
avcodec_check_dimensions(NULL, 16*sps->mb_width, 16*sps->mb_height))
|
||||||
|
return -1;
|
||||||
|
|
||||||
sps->frame_mbs_only_flag= get_bits1(&s->gb);
|
sps->frame_mbs_only_flag= get_bits1(&s->gb);
|
||||||
if(!sps->frame_mbs_only_flag)
|
if(!sps->frame_mbs_only_flag)
|
||||||
sps->mb_aff= get_bits1(&s->gb);
|
sps->mb_aff= get_bits1(&s->gb);
|
||||||
|
@ -65,13 +65,14 @@ typedef struct HYuvContext{
|
|||||||
int context;
|
int context;
|
||||||
int picture_number;
|
int picture_number;
|
||||||
int last_slice_end;
|
int last_slice_end;
|
||||||
uint8_t __align8 temp[3][2560];
|
uint8_t *temp[3];
|
||||||
uint64_t stats[3][256];
|
uint64_t stats[3][256];
|
||||||
uint8_t len[3][256];
|
uint8_t len[3][256];
|
||||||
uint32_t bits[3][256];
|
uint32_t bits[3][256];
|
||||||
VLC vlc[3];
|
VLC vlc[3];
|
||||||
AVFrame picture;
|
AVFrame picture;
|
||||||
uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
|
uint8_t *bitstream_buffer;
|
||||||
|
int bitstream_buffer_size;
|
||||||
DSPContext dsp;
|
DSPContext dsp;
|
||||||
}HYuvContext;
|
}HYuvContext;
|
||||||
|
|
||||||
@ -347,24 +348,36 @@ static int read_old_huffman_tables(HYuvContext *s){
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_init(AVCodecContext *avctx)
|
static int common_init(AVCodecContext *avctx){
|
||||||
{
|
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
int width, height;
|
int i;
|
||||||
|
|
||||||
s->avctx= avctx;
|
s->avctx= avctx;
|
||||||
s->flags= avctx->flags;
|
s->flags= avctx->flags;
|
||||||
|
|
||||||
dsputil_init(&s->dsp, avctx);
|
dsputil_init(&s->dsp, avctx);
|
||||||
|
|
||||||
|
s->width= avctx->width;
|
||||||
|
s->height= avctx->height;
|
||||||
|
assert(s->width>0 && s->height>0);
|
||||||
|
|
||||||
|
for(i=0; i<3; i++){
|
||||||
|
s->temp[i]= av_malloc(avctx->width + 16);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int decode_init(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
HYuvContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
common_init(avctx);
|
||||||
memset(s->vlc, 0, 3*sizeof(VLC));
|
memset(s->vlc, 0, 3*sizeof(VLC));
|
||||||
|
|
||||||
width= s->width= avctx->width;
|
|
||||||
height= s->height= avctx->height;
|
|
||||||
avctx->coded_frame= &s->picture;
|
avctx->coded_frame= &s->picture;
|
||||||
s->interlaced= height > 288;
|
s->interlaced= s->height > 288;
|
||||||
|
|
||||||
s->bgr32=1;
|
s->bgr32=1;
|
||||||
assert(width && height);
|
|
||||||
//if(avctx->extradata)
|
//if(avctx->extradata)
|
||||||
// printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
|
// printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
|
||||||
if(avctx->extradata_size){
|
if(avctx->extradata_size){
|
||||||
@ -474,20 +487,12 @@ static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
|
|||||||
static int encode_init(AVCodecContext *avctx)
|
static int encode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
int i, j, width, height;
|
int i, j;
|
||||||
|
|
||||||
s->avctx= avctx;
|
common_init(avctx);
|
||||||
s->flags= avctx->flags;
|
|
||||||
|
|
||||||
dsputil_init(&s->dsp, avctx);
|
|
||||||
|
|
||||||
width= s->width= avctx->width;
|
avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
|
||||||
height= s->height= avctx->height;
|
avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
|
||||||
|
|
||||||
assert(width && height);
|
|
||||||
|
|
||||||
avctx->extradata= av_mallocz(1024*30);
|
|
||||||
avctx->stats_out= av_mallocz(1024*30);
|
|
||||||
s->version=2;
|
s->version=2;
|
||||||
|
|
||||||
avctx->coded_frame= &s->picture;
|
avctx->coded_frame= &s->picture;
|
||||||
@ -524,7 +529,7 @@ static int encode_init(AVCodecContext *avctx)
|
|||||||
av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
|
av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if(s->interlaced != ( height > 288 ))
|
if(s->interlaced != ( s->height > 288 ))
|
||||||
av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
|
av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
|
||||||
}else if(avctx->strict_std_compliance>=0){
|
}else if(avctx->strict_std_compliance>=0){
|
||||||
av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n");
|
av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n");
|
||||||
@ -580,7 +585,7 @@ static int encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
if(s->context){
|
if(s->context){
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
int pels = width*height / (i?40:10);
|
int pels = s->width*s->height / (i?40:10);
|
||||||
for(j=0; j<256; j++){
|
for(j=0; j<256; j++){
|
||||||
int d= FFMIN(j, 256-j);
|
int d= FFMIN(j, 256-j);
|
||||||
s->stats[i][j]= pels/(d+1);
|
s->stats[i][j]= pels/(d+1);
|
||||||
@ -623,9 +628,14 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_422_bitstream(HYuvContext *s, int count){
|
static int encode_422_bitstream(HYuvContext *s, int count){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
count/=2;
|
count/=2;
|
||||||
if(s->flags&CODEC_FLAG_PASS1){
|
if(s->flags&CODEC_FLAG_PASS1){
|
||||||
for(i=0; i<count; i++){
|
for(i=0; i<count; i++){
|
||||||
@ -653,11 +663,17 @@ static void encode_422_bitstream(HYuvContext *s, int count){
|
|||||||
put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
|
put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_gray_bitstream(HYuvContext *s, int count){
|
static int encode_gray_bitstream(HYuvContext *s, int count){
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
count/=2;
|
count/=2;
|
||||||
if(s->flags&CODEC_FLAG_PASS1){
|
if(s->flags&CODEC_FLAG_PASS1){
|
||||||
for(i=0; i<count; i++){
|
for(i=0; i<count; i++){
|
||||||
@ -677,6 +693,7 @@ static void encode_gray_bitstream(HYuvContext *s, int count){
|
|||||||
put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
|
put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void decode_bgr_bitstream(HYuvContext *s, int count){
|
static void decode_bgr_bitstream(HYuvContext *s, int count){
|
||||||
@ -756,6 +773,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
|
|||||||
/* no supplementary picture */
|
/* no supplementary picture */
|
||||||
if (buf_size == 0)
|
if (buf_size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
|
||||||
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
|
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
|
||||||
|
|
||||||
@ -981,11 +1000,23 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
|
|||||||
return (get_bits_count(&s->gb)+31)/32*4;
|
return (get_bits_count(&s->gb)+31)/32*4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int common_end(HYuvContext *s){
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for(i=0; i<3; i++){
|
||||||
|
av_freep(&s->temp[i]);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int decode_end(AVCodecContext *avctx)
|
static int decode_end(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
common_end(s);
|
||||||
|
av_freep(&s->bitstream_buffer);
|
||||||
|
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
free_vlc(&s->vlc[i]);
|
free_vlc(&s->vlc[i]);
|
||||||
}
|
}
|
||||||
@ -1161,7 +1192,9 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
|
|
||||||
static int encode_end(AVCodecContext *avctx)
|
static int encode_end(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
// HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
common_end(s);
|
||||||
|
|
||||||
av_freep(&avctx->extradata);
|
av_freep(&avctx->extradata);
|
||||||
av_freep(&avctx->stats_out);
|
av_freep(&avctx->stats_out);
|
||||||
|
@ -268,6 +268,9 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr,
|
|||||||
int size, w2, h2, size2;
|
int size, w2, h2, size2;
|
||||||
PixFmtInfo *pinfo;
|
PixFmtInfo *pinfo;
|
||||||
|
|
||||||
|
if(avcodec_check_dimensions(NULL, width, height))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
pinfo = &pix_fmt_info[pix_fmt];
|
pinfo = &pix_fmt_info[pix_fmt];
|
||||||
size = width * height;
|
size = width * height;
|
||||||
switch(pix_fmt) {
|
switch(pix_fmt) {
|
||||||
@ -344,6 +347,7 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr,
|
|||||||
picture->linesize[1] = 4;
|
picture->linesize[1] = 4;
|
||||||
return size2 + 256 * 4;
|
return size2 + 256 * 4;
|
||||||
default:
|
default:
|
||||||
|
fail:
|
||||||
picture->data[0] = NULL;
|
picture->data[0] = NULL;
|
||||||
picture->data[1] = NULL;
|
picture->data[1] = NULL;
|
||||||
picture->data[2] = NULL;
|
picture->data[2] = NULL;
|
||||||
@ -360,7 +364,7 @@ int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
|
|||||||
const unsigned char* s;
|
const unsigned char* s;
|
||||||
int size = avpicture_get_size(pix_fmt, width, height);
|
int size = avpicture_get_size(pix_fmt, width, height);
|
||||||
|
|
||||||
if (size > dest_size)
|
if (size > dest_size || size < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
|
if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
|
||||||
@ -1920,6 +1924,8 @@ int avpicture_alloc(AVPicture *picture,
|
|||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
size = avpicture_get_size(pix_fmt, width, height);
|
size = avpicture_get_size(pix_fmt, width, height);
|
||||||
|
if(size<0)
|
||||||
|
goto fail;
|
||||||
ptr = av_malloc(size);
|
ptr = av_malloc(size);
|
||||||
if (!ptr)
|
if (!ptr)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -561,6 +561,8 @@ ImgReSampleContext *img_resample_full_init(int owidth, int oheight,
|
|||||||
s = av_mallocz(sizeof(ImgReSampleContext));
|
s = av_mallocz(sizeof(ImgReSampleContext));
|
||||||
if (!s)
|
if (!s)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if((unsigned)owidth >= UINT_MAX / (LINE_BUF_HEIGHT + NB_TAPS))
|
||||||
|
return NULL;
|
||||||
s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
|
s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS));
|
||||||
if (!s->line_buf)
|
if (!s->line_buf)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -196,6 +196,10 @@ static unsigned long iv_decode_frame(Indeo3DecodeContext *s,
|
|||||||
hdr_height = le2me_16(*(uint16_t *)buf_pos);
|
hdr_height = le2me_16(*(uint16_t *)buf_pos);
|
||||||
buf_pos += 2;
|
buf_pos += 2;
|
||||||
hdr_width = le2me_16(*(uint16_t *)buf_pos);
|
hdr_width = le2me_16(*(uint16_t *)buf_pos);
|
||||||
|
|
||||||
|
if(avcodec_check_dimensions(NULL, hdr_width, hdr_height))
|
||||||
|
return -1;
|
||||||
|
|
||||||
buf_pos += 2;
|
buf_pos += 2;
|
||||||
chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc;
|
chroma_height = ((hdr_height >> 2) + 3) & 0x7ffc;
|
||||||
chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc;
|
chroma_width = ((hdr_width >> 2) + 3) & 0x7ffc;
|
||||||
|
@ -45,6 +45,10 @@
|
|||||||
void *av_malloc(unsigned int size)
|
void *av_malloc(unsigned int size)
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
|
/* lets disallow possible ambiguous cases */
|
||||||
|
if(size > INT_MAX)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
#ifdef MEMALIGN_HACK
|
#ifdef MEMALIGN_HACK
|
||||||
int diff;
|
int diff;
|
||||||
@ -93,6 +97,10 @@ void *av_malloc(unsigned int size)
|
|||||||
*/
|
*/
|
||||||
void *av_realloc(void *ptr, unsigned int size)
|
void *av_realloc(void *ptr, unsigned int size)
|
||||||
{
|
{
|
||||||
|
/* lets disallow possible ambiguous cases */
|
||||||
|
if(size > INT_MAX)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
#ifdef MEMALIGN_HACK
|
#ifdef MEMALIGN_HACK
|
||||||
//FIXME this isnt aligned correctly though it probably isnt needed
|
//FIXME this isnt aligned correctly though it probably isnt needed
|
||||||
int diff;
|
int diff;
|
||||||
|
@ -659,11 +659,11 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
|
|||||||
mjpeg_picture_header(s);
|
mjpeg_picture_header(s);
|
||||||
|
|
||||||
s->header_bits= put_bits_count(&s->pb);
|
s->header_bits= put_bits_count(&s->pb);
|
||||||
|
|
||||||
if(avctx->pix_fmt == PIX_FMT_RGBA32){
|
if(avctx->pix_fmt == PIX_FMT_RGBA32){
|
||||||
int x, y, i;
|
int x, y, i;
|
||||||
const int linesize= p->linesize[0];
|
const int linesize= p->linesize[0];
|
||||||
uint16_t buffer[2048][4];
|
uint16_t (*buffer)[4]= s->rd_scratchpad;
|
||||||
int left[3], top[3], topleft[3];
|
int left[3], top[3], topleft[3];
|
||||||
|
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
@ -674,6 +674,11 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
|
|||||||
const int modified_predictor= y ? predictor : 1;
|
const int modified_predictor= y ? predictor : 1;
|
||||||
uint8_t *ptr = p->data[0] + (linesize * y);
|
uint8_t *ptr = p->data[0] + (linesize * y);
|
||||||
|
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
top[i]= left[i]= topleft[i]= buffer[0][i];
|
top[i]= left[i]= topleft[i]= buffer[0][i];
|
||||||
}
|
}
|
||||||
@ -707,6 +712,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, in
|
|||||||
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
|
const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
|
||||||
|
|
||||||
for(mb_y = 0; mb_y < mb_height; mb_y++) {
|
for(mb_y = 0; mb_y < mb_height; mb_y++) {
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
for(mb_x = 0; mb_x < mb_width; mb_x++) {
|
for(mb_x = 0; mb_x < mb_width; mb_x++) {
|
||||||
if(mb_x==0 || mb_y==0){
|
if(mb_x==0 || mb_y==0){
|
||||||
for(i=0;i<3;i++) {
|
for(i=0;i<3;i++) {
|
||||||
@ -1060,7 +1069,10 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
}
|
}
|
||||||
height = get_bits(&s->gb, 16);
|
height = get_bits(&s->gb, 16);
|
||||||
width = get_bits(&s->gb, 16);
|
width = get_bits(&s->gb, 16);
|
||||||
|
|
||||||
dprintf("sof0: picture: %dx%d\n", width, height);
|
dprintf("sof0: picture: %dx%d\n", width, height);
|
||||||
|
if(avcodec_check_dimensions(s->avctx, width, height))
|
||||||
|
return -1;
|
||||||
|
|
||||||
nb_components = get_bits(&s->gb, 8);
|
nb_components = get_bits(&s->gb, 8);
|
||||||
if (nb_components <= 0 ||
|
if (nb_components <= 0 ||
|
||||||
@ -1228,11 +1240,14 @@ static int decode_block(MJpegDecodeContext *s, DCTELEM *block,
|
|||||||
|
|
||||||
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){
|
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){
|
||||||
int i, mb_x, mb_y;
|
int i, mb_x, mb_y;
|
||||||
uint16_t buffer[2048][4];
|
uint16_t buffer[32768][4];
|
||||||
int left[3], top[3], topleft[3];
|
int left[3], top[3], topleft[3];
|
||||||
const int linesize= s->linesize[0];
|
const int linesize= s->linesize[0];
|
||||||
const int mask= (1<<s->bits)-1;
|
const int mask= (1<<s->bits)-1;
|
||||||
|
|
||||||
|
if((unsigned)s->mb_width > 32768) //dynamic alloc
|
||||||
|
return -1;
|
||||||
|
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
buffer[0][i]= 1 << (s->bits + point_transform - 1);
|
buffer[0][i]= 1 << (s->bits + point_transform - 1);
|
||||||
}
|
}
|
||||||
|
@ -619,6 +619,9 @@ int MPV_common_init(MpegEncContext *s)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
|
||||||
|
return -1;
|
||||||
|
|
||||||
dsputil_init(&s->dsp, s->avctx);
|
dsputil_init(&s->dsp, s->avctx);
|
||||||
DCT_common_init(s);
|
DCT_common_init(s);
|
||||||
|
|
||||||
@ -742,9 +745,6 @@ int MPV_common_init(MpegEncContext *s)
|
|||||||
CHECKED_ALLOCZ(s->coded_block_base, y_size);
|
CHECKED_ALLOCZ(s->coded_block_base, y_size);
|
||||||
s->coded_block= s->coded_block_base + s->b8_stride + 1;
|
s->coded_block= s->coded_block_base + s->b8_stride + 1;
|
||||||
|
|
||||||
/* divx501 bitstream reorder buffer */
|
|
||||||
CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
|
|
||||||
|
|
||||||
/* cbp, ac_pred, pred_dir */
|
/* cbp, ac_pred, pred_dir */
|
||||||
CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
|
CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
|
||||||
CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
|
CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
|
||||||
@ -849,6 +849,8 @@ void MPV_common_end(MpegEncContext *s)
|
|||||||
av_freep(&s->mbskip_table);
|
av_freep(&s->mbskip_table);
|
||||||
av_freep(&s->prev_pict_types);
|
av_freep(&s->prev_pict_types);
|
||||||
av_freep(&s->bitstream_buffer);
|
av_freep(&s->bitstream_buffer);
|
||||||
|
s->allocated_bitstream_buffer_size=0;
|
||||||
|
|
||||||
av_freep(&s->avctx->stats_out);
|
av_freep(&s->avctx->stats_out);
|
||||||
av_freep(&s->ac_stats);
|
av_freep(&s->ac_stats);
|
||||||
av_freep(&s->error_status_table);
|
av_freep(&s->error_status_table);
|
||||||
@ -2314,6 +2316,11 @@ int MPV_encode_picture(AVCodecContext *avctx,
|
|||||||
|
|
||||||
stuffing_count= ff_vbv_update(s, s->frame_bits);
|
stuffing_count= ff_vbv_update(s, s->frame_bits);
|
||||||
if(stuffing_count){
|
if(stuffing_count){
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
switch(s->codec_id){
|
switch(s->codec_id){
|
||||||
case CODEC_ID_MPEG1VIDEO:
|
case CODEC_ID_MPEG1VIDEO:
|
||||||
case CODEC_ID_MPEG2VIDEO:
|
case CODEC_ID_MPEG2VIDEO:
|
||||||
@ -4555,16 +4562,16 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
int mb_x, mb_y, pdif = 0;
|
int mb_x, mb_y, pdif = 0;
|
||||||
int i, j;
|
int i, j;
|
||||||
MpegEncContext best_s, backup_s;
|
MpegEncContext best_s, backup_s;
|
||||||
uint8_t bit_buf[2][3000];
|
uint8_t bit_buf[2][MAX_MB_BYTES];
|
||||||
uint8_t bit_buf2[2][3000];
|
uint8_t bit_buf2[2][MAX_MB_BYTES];
|
||||||
uint8_t bit_buf_tex[2][3000];
|
uint8_t bit_buf_tex[2][MAX_MB_BYTES];
|
||||||
PutBitContext pb[2], pb2[2], tex_pb[2];
|
PutBitContext pb[2], pb2[2], tex_pb[2];
|
||||||
//printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
|
//printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
|
||||||
|
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
init_put_bits(&pb [i], bit_buf [i], 3000);
|
init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
|
||||||
init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
|
init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
|
||||||
init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
|
init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
|
||||||
}
|
}
|
||||||
|
|
||||||
s->last_bits= put_bits_count(&s->pb);
|
s->last_bits= put_bits_count(&s->pb);
|
||||||
@ -4622,6 +4629,18 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
|||||||
int dmin= INT_MAX;
|
int dmin= INT_MAX;
|
||||||
int dir;
|
int dir;
|
||||||
|
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if(s->data_partitioning){
|
||||||
|
if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
|
||||||
|
|| s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
s->mb_x = mb_x;
|
s->mb_x = mb_x;
|
||||||
s->mb_y = mb_y; // moved into loop, can get changed by H.261
|
s->mb_y = mb_y; // moved into loop, can get changed by H.261
|
||||||
ff_update_block_index(s);
|
ff_update_block_index(s);
|
||||||
|
@ -68,6 +68,8 @@ enum OutputFormat {
|
|||||||
#define SI_TYPE FF_SI_TYPE ///< Switching Intra
|
#define SI_TYPE FF_SI_TYPE ///< Switching Intra
|
||||||
#define SP_TYPE FF_SP_TYPE ///< Switching Predicted
|
#define SP_TYPE FF_SP_TYPE ///< Switching Predicted
|
||||||
|
|
||||||
|
#define MAX_MB_BYTES (30*16*16*3/8 + 120)
|
||||||
|
|
||||||
typedef struct Predictor{
|
typedef struct Predictor{
|
||||||
double coeff;
|
double coeff;
|
||||||
double count;
|
double count;
|
||||||
@ -599,9 +601,9 @@ typedef struct MpegEncContext {
|
|||||||
int divx_version;
|
int divx_version;
|
||||||
int divx_build;
|
int divx_build;
|
||||||
int divx_packed;
|
int divx_packed;
|
||||||
#define BITSTREAM_BUFFER_SIZE 1024*256
|
|
||||||
uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them
|
uint8_t *bitstream_buffer; //Divx 5.01 puts several frames in a single one, this is used to reorder them
|
||||||
int bitstream_buffer_size;
|
int bitstream_buffer_size;
|
||||||
|
int allocated_bitstream_buffer_size;
|
||||||
|
|
||||||
int xvid_build;
|
int xvid_build;
|
||||||
|
|
||||||
|
@ -140,6 +140,8 @@ static int png_probe(AVProbeData *pd)
|
|||||||
#endif
|
#endif
|
||||||
static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
|
static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
|
||||||
{
|
{
|
||||||
|
if(items >= UINT_MAX / size)
|
||||||
|
return NULL;
|
||||||
return av_malloc(items * size);
|
return av_malloc(items * size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -522,6 +524,10 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
goto fail;
|
goto fail;
|
||||||
s->width = get32(&s->bytestream);
|
s->width = get32(&s->bytestream);
|
||||||
s->height = get32(&s->bytestream);
|
s->height = get32(&s->bytestream);
|
||||||
|
if(avcodec_check_dimensions(avctx, s->width, s->height)){
|
||||||
|
s->width= s->height= 0;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
s->bit_depth = *s->bytestream++;
|
s->bit_depth = *s->bytestream++;
|
||||||
s->color_type = *s->bytestream++;
|
s->color_type = *s->bytestream++;
|
||||||
s->compression_type = *s->bytestream++;
|
s->compression_type = *s->bytestream++;
|
||||||
@ -727,7 +733,8 @@ static int png_write_row(PNGContext *s, const uint8_t *data, int size)
|
|||||||
if (ret != Z_OK)
|
if (ret != Z_OK)
|
||||||
return -1;
|
return -1;
|
||||||
if (s->zstream.avail_out == 0) {
|
if (s->zstream.avail_out == 0) {
|
||||||
png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
|
if(s->bytestream_end - s->bytestream > IOBUF_SIZE + 100)
|
||||||
|
png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
|
||||||
s->zstream.avail_out = IOBUF_SIZE;
|
s->zstream.avail_out = IOBUF_SIZE;
|
||||||
s->zstream.next_out = s->buf;
|
s->zstream.next_out = s->buf;
|
||||||
}
|
}
|
||||||
@ -895,7 +902,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
|
|||||||
ret = deflate(&s->zstream, Z_FINISH);
|
ret = deflate(&s->zstream, Z_FINISH);
|
||||||
if (ret == Z_OK || ret == Z_STREAM_END) {
|
if (ret == Z_OK || ret == Z_STREAM_END) {
|
||||||
len = IOBUF_SIZE - s->zstream.avail_out;
|
len = IOBUF_SIZE - s->zstream.avail_out;
|
||||||
if (len > 0) {
|
if (len > 0 && s->bytestream_end - s->bytestream > len + 100) {
|
||||||
png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
|
png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
|
||||||
}
|
}
|
||||||
s->zstream.avail_out = IOBUF_SIZE;
|
s->zstream.avail_out = IOBUF_SIZE;
|
||||||
|
@ -109,8 +109,9 @@ static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* check that all tags are present */
|
/* check that all tags are present */
|
||||||
if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
|
if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
avctx->width = w;
|
avctx->width = w;
|
||||||
avctx->height = h;
|
avctx->height = h;
|
||||||
if (depth == 1) {
|
if (depth == 1) {
|
||||||
@ -135,7 +136,7 @@ static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
|
|||||||
return -1;
|
return -1;
|
||||||
pnm_get(s, buf1, sizeof(buf1));
|
pnm_get(s, buf1, sizeof(buf1));
|
||||||
avctx->height = atoi(buf1);
|
avctx->height = atoi(buf1);
|
||||||
if (avctx->height <= 0)
|
if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
|
||||||
return -1;
|
return -1;
|
||||||
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
|
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
|
||||||
pnm_get(s, buf1, sizeof(buf1));
|
pnm_get(s, buf1, sizeof(buf1));
|
||||||
@ -264,6 +265,11 @@ static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu
|
|||||||
int i, h, h1, c, n, linesize;
|
int i, h, h1, c, n, linesize;
|
||||||
uint8_t *ptr, *ptr1, *ptr2;
|
uint8_t *ptr, *ptr1, *ptr2;
|
||||||
|
|
||||||
|
if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
*p = *pict;
|
*p = *pict;
|
||||||
p->pict_type= FF_I_TYPE;
|
p->pict_type= FF_I_TYPE;
|
||||||
p->key_frame= 1;
|
p->key_frame= 1;
|
||||||
@ -338,6 +344,11 @@ static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int bu
|
|||||||
const char *tuple_type;
|
const char *tuple_type;
|
||||||
uint8_t *ptr;
|
uint8_t *ptr;
|
||||||
|
|
||||||
|
if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
*p = *pict;
|
*p = *pict;
|
||||||
p->pict_type= FF_I_TYPE;
|
p->pict_type= FF_I_TYPE;
|
||||||
p->key_frame= 1;
|
p->key_frame= 1;
|
||||||
|
@ -74,6 +74,8 @@ int ff_rate_control_init(MpegEncContext *s)
|
|||||||
p= strchr(p+1, ';');
|
p= strchr(p+1, ';');
|
||||||
}
|
}
|
||||||
i+= s->max_b_frames;
|
i+= s->max_b_frames;
|
||||||
|
if(i<=0 || i>=INT_MAX / sizeof(RateControlEntry))
|
||||||
|
return -1;
|
||||||
rcc->entry = (RateControlEntry*)av_mallocz(i*sizeof(RateControlEntry));
|
rcc->entry = (RateControlEntry*)av_mallocz(i*sizeof(RateControlEntry));
|
||||||
rcc->num_entries= i;
|
rcc->num_entries= i;
|
||||||
|
|
||||||
|
@ -1292,7 +1292,7 @@ void ff_spatial_idwt(DWTELEM *buffer, int width, int height, int stride, int typ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){
|
static int encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){
|
||||||
const int w= b->width;
|
const int w= b->width;
|
||||||
const int h= b->height;
|
const int h= b->height;
|
||||||
int x, y;
|
int x, y;
|
||||||
@ -1347,6 +1347,10 @@ static void encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTEL
|
|||||||
put_symbol2(&s->c, b->state[1], run, 3);
|
put_symbol2(&s->c, b->state[1], run, 3);
|
||||||
|
|
||||||
for(y=0; y<h; y++){
|
for(y=0; y<h; y++){
|
||||||
|
if(&s->c.bytestream_end - &s->c.bytestream < w*40){
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
for(x=0; x<w; x++){
|
for(x=0; x<w; x++){
|
||||||
int v, p=0;
|
int v, p=0;
|
||||||
int /*ll=0, */l=0, lt=0, t=0, rt=0;
|
int /*ll=0, */l=0, lt=0, t=0, rt=0;
|
||||||
@ -1398,12 +1402,13 @@ static void encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTEL
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_subband(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){
|
static int encode_subband(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){
|
||||||
// encode_subband_qtree(s, b, src, parent, stride, orientation);
|
// encode_subband_qtree(s, b, src, parent, stride, orientation);
|
||||||
// encode_subband_z0run(s, b, src, parent, stride, orientation);
|
// encode_subband_z0run(s, b, src, parent, stride, orientation);
|
||||||
encode_subband_c0run(s, b, src, parent, stride, orientation);
|
return encode_subband_c0run(s, b, src, parent, stride, orientation);
|
||||||
// encode_subband_dzr(s, b, src, parent, stride, orientation);
|
// encode_subband_dzr(s, b, src, parent, stride, orientation);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1918,6 +1923,10 @@ static void encode_blocks(SnowContext *s){
|
|||||||
int h= s->b_height;
|
int h= s->b_height;
|
||||||
|
|
||||||
for(y=0; y<h; y++){
|
for(y=0; y<h; y++){
|
||||||
|
if(&s->c.bytestream_end - &s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
for(x=0; x<w; x++){
|
for(x=0; x<w; x++){
|
||||||
encode_q_branch(s, 0, x, y);
|
encode_q_branch(s, 0, x, y);
|
||||||
}
|
}
|
||||||
|
@ -1081,7 +1081,7 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
|
|||||||
|
|
||||||
#ifdef CONFIG_ENCODERS
|
#ifdef CONFIG_ENCODERS
|
||||||
|
|
||||||
static void svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
|
static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
|
||||||
int width, int height, int src_stride, int stride)
|
int width, int height, int src_stride, int stride)
|
||||||
{
|
{
|
||||||
int x, y;
|
int x, y;
|
||||||
@ -1188,6 +1188,11 @@ static void svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plan
|
|||||||
uint8_t *ref= ref_plane + offset;
|
uint8_t *ref= ref_plane + offset;
|
||||||
int score[4]={0,0,0,0}, best;
|
int score[4]={0,0,0,0}, best;
|
||||||
uint8_t temp[16*stride];
|
uint8_t temp[16*stride];
|
||||||
|
|
||||||
|
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
s->m.mb_x= x;
|
s->m.mb_x= x;
|
||||||
ff_init_block_index(&s->m);
|
ff_init_block_index(&s->m);
|
||||||
@ -1280,6 +1285,7 @@ static void svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plan
|
|||||||
}
|
}
|
||||||
s->m.first_slice_line=0;
|
s->m.first_slice_line=0;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svq1_encode_init(AVCodecContext *avctx)
|
static int svq1_encode_init(AVCodecContext *avctx)
|
||||||
@ -1341,10 +1347,11 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
|
|||||||
|
|
||||||
svq1_write_header(s, p->pict_type);
|
svq1_write_header(s, p->pict_type);
|
||||||
for(i=0; i<3; i++){
|
for(i=0; i<3; i++){
|
||||||
svq1_encode_plane(s, i,
|
if(svq1_encode_plane(s, i,
|
||||||
s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
|
s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
|
||||||
s->frame_width / (i?4:1), s->frame_height / (i?4:1),
|
s->frame_width / (i?4:1), s->frame_height / (i?4:1),
|
||||||
s->picture.linesize[i], s->current_picture.linesize[i]);
|
s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// align_put_bits(&s->pb);
|
// align_put_bits(&s->pb);
|
||||||
|
@ -82,7 +82,7 @@ void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size)
|
|||||||
if(min_size < *size)
|
if(min_size < *size)
|
||||||
return ptr;
|
return ptr;
|
||||||
|
|
||||||
*size= 17*min_size/16 + 32;
|
*size= FFMAX(17*min_size/16 + 32, min_size);
|
||||||
|
|
||||||
return av_realloc(ptr, *size);
|
return av_realloc(ptr, *size);
|
||||||
}
|
}
|
||||||
@ -101,6 +101,8 @@ void *av_mallocz_static(unsigned int size)
|
|||||||
|
|
||||||
if(ptr){
|
if(ptr){
|
||||||
array_static =av_fast_realloc(array_static, &allocated_static, sizeof(void*)*(last_static+1));
|
array_static =av_fast_realloc(array_static, &allocated_static, sizeof(void*)*(last_static+1));
|
||||||
|
if(!array_static)
|
||||||
|
return NULL;
|
||||||
array_static[last_static++] = ptr;
|
array_static[last_static++] = ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,16 +235,27 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
|
|||||||
*height= ALIGN(*height, h_align);
|
*height= ALIGN(*height, h_align);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h){
|
||||||
|
if((int)w>0 && (int)h>0 && (w+128)*(uint64_t)(h+128) < INT_MAX/4)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
av_log(av_log_ctx, AV_LOG_ERROR, "picture size invalid (%ux%u)\n", w, h);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
|
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
|
||||||
int i;
|
int i;
|
||||||
int w= s->width;
|
int w= s->width;
|
||||||
int h= s->height;
|
int h= s->height;
|
||||||
InternalBuffer *buf;
|
InternalBuffer *buf;
|
||||||
int *picture_number;
|
int *picture_number;
|
||||||
|
|
||||||
assert(pic->data[0]==NULL);
|
assert(pic->data[0]==NULL);
|
||||||
assert(INTERNAL_BUFFER_SIZE > s->internal_buffer_count);
|
assert(INTERNAL_BUFFER_SIZE > s->internal_buffer_count);
|
||||||
|
|
||||||
|
if(avcodec_check_dimensions(s,w,h))
|
||||||
|
return -1;
|
||||||
|
|
||||||
if(s->internal_buffer==NULL){
|
if(s->internal_buffer==NULL){
|
||||||
s->internal_buffer= av_mallocz(INTERNAL_BUFFER_SIZE*sizeof(InternalBuffer));
|
s->internal_buffer= av_mallocz(INTERNAL_BUFFER_SIZE*sizeof(InternalBuffer));
|
||||||
}
|
}
|
||||||
@ -509,6 +522,11 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec)
|
|||||||
else if(avctx->width && avctx->height)
|
else if(avctx->width && avctx->height)
|
||||||
avcodec_set_dimensions(avctx, avctx->width, avctx->height);
|
avcodec_set_dimensions(avctx, avctx->width, avctx->height);
|
||||||
|
|
||||||
|
if((avctx->coded_width||avctx->coded_height) && avcodec_check_dimensions(avctx,avctx->coded_width,avctx->coded_height)){
|
||||||
|
av_freep(&avctx->priv_data);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
ret = avctx->codec->init(avctx);
|
ret = avctx->codec->init(avctx);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_freep(&avctx->priv_data);
|
av_freep(&avctx->priv_data);
|
||||||
@ -520,6 +538,10 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec)
|
|||||||
int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||||
const short *samples)
|
const short *samples)
|
||||||
{
|
{
|
||||||
|
if(buf_size < FF_MIN_BUFFER_SIZE && 0){
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "buffer smaller then minimum size\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
|
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
|
||||||
int ret = avctx->codec->encode(avctx, buf, buf_size, (void *)samples);
|
int ret = avctx->codec->encode(avctx, buf, buf_size, (void *)samples);
|
||||||
avctx->frame_number++;
|
avctx->frame_number++;
|
||||||
@ -531,6 +553,12 @@ int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
|||||||
int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
||||||
const AVFrame *pict)
|
const AVFrame *pict)
|
||||||
{
|
{
|
||||||
|
if(buf_size < FF_MIN_BUFFER_SIZE){
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "buffer smaller then minimum size\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if(avcodec_check_dimensions(avctx,avctx->width,avctx->height))
|
||||||
|
return -1;
|
||||||
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){
|
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){
|
||||||
int ret = avctx->codec->encode(avctx, buf, buf_size, (void *)pict);
|
int ret = avctx->codec->encode(avctx, buf, buf_size, (void *)pict);
|
||||||
avctx->frame_number++;
|
avctx->frame_number++;
|
||||||
@ -557,6 +585,8 @@ int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
*got_picture_ptr= 0;
|
*got_picture_ptr= 0;
|
||||||
|
if((avctx->coded_width||avctx->coded_height) && avcodec_check_dimensions(avctx,avctx->coded_width,avctx->coded_height))
|
||||||
|
return -1;
|
||||||
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
|
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
|
||||||
buf, buf_size);
|
buf, buf_size);
|
||||||
|
|
||||||
|
@ -2093,6 +2093,9 @@ static void render_fragments(Vp3DecodeContext *s,
|
|||||||
upper_motion_limit = 7 * s->current_frame.linesize[2];
|
upper_motion_limit = 7 * s->current_frame.linesize[2];
|
||||||
lower_motion_limit = height * s->current_frame.linesize[2] + width - 8;
|
lower_motion_limit = height * s->current_frame.linesize[2] + width - 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if((unsigned)stride > 2048)
|
||||||
|
return; //various tables are fixed size
|
||||||
|
|
||||||
/* for each fragment row... */
|
/* for each fragment row... */
|
||||||
for (y = 0; y < height; y += 8) {
|
for (y = 0; y < height; y += 8) {
|
||||||
@ -2681,6 +2684,11 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb)
|
|||||||
s->width = get_bits(&gb, 16) << 4;
|
s->width = get_bits(&gb, 16) << 4;
|
||||||
s->height = get_bits(&gb, 16) << 4;
|
s->height = get_bits(&gb, 16) << 4;
|
||||||
|
|
||||||
|
if(avcodec_check_dimensions(avctx, s->width, s->height)){
|
||||||
|
s->width= s->height= 0;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
skip_bits(&gb, 24); /* frame width */
|
skip_bits(&gb, 24); /* frame width */
|
||||||
skip_bits(&gb, 24); /* frame height */
|
skip_bits(&gb, 24); /* frame height */
|
||||||
|
|
||||||
|
@ -151,6 +151,10 @@ static int vqa_decode_init(AVCodecContext *avctx)
|
|||||||
s->vqa_version = vqa_header[0];
|
s->vqa_version = vqa_header[0];
|
||||||
s->width = LE_16(&vqa_header[6]);
|
s->width = LE_16(&vqa_header[6]);
|
||||||
s->height = LE_16(&vqa_header[8]);
|
s->height = LE_16(&vqa_header[8]);
|
||||||
|
if(avcodec_check_dimensions(avctx, s->width, s->height)){
|
||||||
|
s->width= s->height= 0;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
s->vector_width = vqa_header[10];
|
s->vector_width = vqa_header[10];
|
||||||
s->vector_height = vqa_header[11];
|
s->vector_height = vqa_header[11];
|
||||||
s->partial_count = s->partial_countdown = vqa_header[13];
|
s->partial_count = s->partial_countdown = vqa_header[13];
|
||||||
|
@ -132,6 +132,9 @@ static int xan_decode_init(AVCodecContext *avctx)
|
|||||||
v_b_table[i] = V_B * i;
|
v_b_table[i] = V_B * i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
|
||||||
|
return -1;
|
||||||
|
|
||||||
s->buffer1 = av_malloc(avctx->width * avctx->height);
|
s->buffer1 = av_malloc(avctx->width * avctx->height);
|
||||||
s->buffer2 = av_malloc(avctx->width * avctx->height);
|
s->buffer2 = av_malloc(avctx->width * avctx->height);
|
||||||
if (!s->buffer1 || !s->buffer2)
|
if (!s->buffer1 || !s->buffer2)
|
||||||
|
@ -279,7 +279,7 @@ static int fourxm_read_packet(AVFormatContext *s,
|
|||||||
|
|
||||||
/* allocate 8 more bytes than 'size' to account for fourcc
|
/* allocate 8 more bytes than 'size' to account for fourcc
|
||||||
* and size */
|
* and size */
|
||||||
if (av_new_packet(pkt, size + 8))
|
if (size + 8 < size || av_new_packet(pkt, size + 8))
|
||||||
return AVERROR_IO;
|
return AVERROR_IO;
|
||||||
pkt->stream_index = fourxm->video_stream_index;
|
pkt->stream_index = fourxm->video_stream_index;
|
||||||
pkt->pts = fourxm->video_pts;
|
pkt->pts = fourxm->video_pts;
|
||||||
|
@ -333,7 +333,7 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
|||||||
{
|
{
|
||||||
value = (char *)av_mallocz(value_len);
|
value = (char *)av_mallocz(value_len);
|
||||||
get_str16_nolen(pb, value_len, value, value_len);
|
get_str16_nolen(pb, value_len, value, value_len);
|
||||||
if (strcmp(name,"WM/AlbumTitle")==0) { strcpy(s->album, value); }
|
if (strcmp(name,"WM/AlbumTitle")==0) { pstrcpy(s->album, sizeof(s->album), value); }
|
||||||
av_free(value);
|
av_free(value);
|
||||||
}
|
}
|
||||||
if ((value_type >= 2) || (value_type <= 5)) // boolean or DWORD or QWORD or WORD
|
if ((value_type >= 2) || (value_type <= 5)) // boolean or DWORD or QWORD or WORD
|
||||||
|
@ -350,6 +350,9 @@ int get_buffer(ByteIOContext *s, unsigned char *buf, int size)
|
|||||||
int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size)
|
int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size)
|
||||||
{
|
{
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
|
if(size<0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
len = s->buf_end - s->buf_ptr;
|
len = s->buf_end - s->buf_ptr;
|
||||||
if (len == 0) {
|
if (len == 0) {
|
||||||
|
@ -171,7 +171,7 @@ static int flic_read_packet(AVFormatContext *s,
|
|||||||
size = LE_32(&preamble[0]);
|
size = LE_32(&preamble[0]);
|
||||||
magic = LE_16(&preamble[4]);
|
magic = LE_16(&preamble[4]);
|
||||||
|
|
||||||
if ((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) {
|
if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
|
||||||
if (av_new_packet(pkt, size)) {
|
if (av_new_packet(pkt, size)) {
|
||||||
ret = AVERROR_IO;
|
ret = AVERROR_IO;
|
||||||
break;
|
break;
|
||||||
|
@ -196,6 +196,8 @@ static int roq_read_packet(AVFormatContext *s,
|
|||||||
|
|
||||||
chunk_type = LE_16(&preamble[0]);
|
chunk_type = LE_16(&preamble[0]);
|
||||||
chunk_size = LE_32(&preamble[2]);
|
chunk_size = LE_32(&preamble[2]);
|
||||||
|
if(chunk_size > INT_MAX)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
switch (chunk_type) {
|
switch (chunk_type) {
|
||||||
|
|
||||||
|
@ -231,6 +231,8 @@ static int film_read_packet(AVFormatContext *s,
|
|||||||
(film->video_type == CODEC_ID_CINEPAK)) {
|
(film->video_type == CODEC_ID_CINEPAK)) {
|
||||||
if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
|
if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
|
||||||
return AVERROR_NOMEM;
|
return AVERROR_NOMEM;
|
||||||
|
if(pkt->size < 10)
|
||||||
|
return -1;
|
||||||
ret = get_buffer(pb, pkt->data, 10);
|
ret = get_buffer(pb, pkt->data, 10);
|
||||||
/* skip the non-spec CVID bytes */
|
/* skip the non-spec CVID bytes */
|
||||||
url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
|
url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
|
||||||
|
@ -57,7 +57,7 @@ int match_ext(const char *filename, const char *extensions)
|
|||||||
p = extensions;
|
p = extensions;
|
||||||
for(;;) {
|
for(;;) {
|
||||||
q = ext1;
|
q = ext1;
|
||||||
while (*p != '\0' && *p != ',')
|
while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
|
||||||
*q++ = *p++;
|
*q++ = *p++;
|
||||||
*q = '\0';
|
*q = '\0';
|
||||||
if (!strcasecmp(ext1, ext))
|
if (!strcasecmp(ext1, ext))
|
||||||
|
@ -169,14 +169,16 @@ static int wc3_read_header(AVFormatContext *s,
|
|||||||
if ((ret = get_buffer(pb, preamble, 4)) != 4)
|
if ((ret = get_buffer(pb, preamble, 4)) != 4)
|
||||||
return AVERROR_IO;
|
return AVERROR_IO;
|
||||||
wc3->palette_count = LE_32(&preamble[0]);
|
wc3->palette_count = LE_32(&preamble[0]);
|
||||||
if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE)
|
if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){
|
||||||
|
wc3->palette_count= 0;
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE);
|
wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case BNAM_TAG:
|
case BNAM_TAG:
|
||||||
/* load up the name */
|
/* load up the name */
|
||||||
if (size < 512)
|
if ((unsigned)size < 512)
|
||||||
bytes_to_read = size;
|
bytes_to_read = size;
|
||||||
else
|
else
|
||||||
bytes_to_read = 512;
|
bytes_to_read = 512;
|
||||||
@ -195,7 +197,7 @@ static int wc3_read_header(AVFormatContext *s,
|
|||||||
|
|
||||||
case PALT_TAG:
|
case PALT_TAG:
|
||||||
/* one of several palettes */
|
/* one of several palettes */
|
||||||
if (current_palette >= wc3->palette_count)
|
if ((unsigned)current_palette >= wc3->palette_count)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
if ((ret = get_buffer(pb,
|
if ((ret = get_buffer(pb,
|
||||||
&wc3->palettes[current_palette * PALETTE_SIZE],
|
&wc3->palettes[current_palette * PALETTE_SIZE],
|
||||||
@ -331,7 +333,7 @@ static int wc3_read_packet(AVFormatContext *s,
|
|||||||
#if 0
|
#if 0
|
||||||
url_fseek(pb, size, SEEK_CUR);
|
url_fseek(pb, size, SEEK_CUR);
|
||||||
#else
|
#else
|
||||||
if ((ret = get_buffer(pb, text, size)) != size)
|
if ((unsigned)size > sizeof(text) || (ret = get_buffer(pb, text, size)) != size)
|
||||||
ret = AVERROR_IO;
|
ret = AVERROR_IO;
|
||||||
else {
|
else {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user