mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-24 03:59:43 +00:00
Merge commit 'a0cabd0a27587525e90a44660c795d40d2f44fe2'
* commit 'a0cabd0a27587525e90a44660c795d40d2f44fe2': mimic: cosmetics, reformat iff: drop ff_ prefix from a static function. pngdec: cosmetics, reformat. dxa: return meaningful error codes. eatgq: cosmetics, reformat. Conflicts: libavcodec/dxa.c libavcodec/eatgq.c libavcodec/iff.c libavcodec/mimic.c libavcodec/pngdec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
8ab97a60ef
@ -211,7 +211,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
pc = 1;
|
||||
}
|
||||
|
||||
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0){
|
||||
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -234,7 +234,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
||||
if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
|
||||
bytestream2_get_bytes_left(&gb)) != Z_OK) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
}
|
||||
switch(compr){
|
||||
|
@ -40,137 +40,141 @@
|
||||
typedef struct TgqContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
int width,height;
|
||||
int width, height;
|
||||
ScanTable scantable;
|
||||
int qtable[64];
|
||||
DECLARE_ALIGNED(16, int16_t, block)[6][64];
|
||||
GetByteContext gb;
|
||||
} TgqContext;
|
||||
|
||||
static av_cold int tgq_decode_init(AVCodecContext *avctx){
|
||||
static av_cold int tgq_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
TgqContext *s = avctx->priv_data;
|
||||
uint8_t idct_permutation[64];
|
||||
s->avctx = avctx;
|
||||
ff_init_scantable_permutation(idct_permutation, FF_NO_IDCT_PERM);
|
||||
ff_init_scantable(idct_permutation, &s->scantable, ff_zigzag_direct);
|
||||
avctx->time_base = (AVRational){1, 15};
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb){
|
||||
static void tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb)
|
||||
{
|
||||
uint8_t *perm = s->scantable.permutated;
|
||||
int i,j,value;
|
||||
block[0] = get_sbits(gb,8) * s->qtable[0];
|
||||
for(i=1; i<64; ) {
|
||||
switch(show_bits(gb,3)) {
|
||||
int i, j, value;
|
||||
block[0] = get_sbits(gb, 8) * s->qtable[0];
|
||||
for (i = 1; i < 64;) {
|
||||
switch (show_bits(gb, 3)) {
|
||||
case 4:
|
||||
block[perm[i++]] = 0;
|
||||
case 0:
|
||||
block[perm[i++]] = 0;
|
||||
skip_bits(gb,3);
|
||||
skip_bits(gb, 3);
|
||||
break;
|
||||
case 5:
|
||||
case 1:
|
||||
skip_bits(gb,2);
|
||||
value = get_bits(gb,6);
|
||||
for(j=0; j<value; j++)
|
||||
skip_bits(gb, 2);
|
||||
value = get_bits(gb, 6);
|
||||
for (j = 0; j < value; j++)
|
||||
block[perm[i++]] = 0;
|
||||
break;
|
||||
case 6:
|
||||
skip_bits(gb,3);
|
||||
skip_bits(gb, 3);
|
||||
block[perm[i]] = -s->qtable[perm[i]];
|
||||
i++;
|
||||
break;
|
||||
case 2:
|
||||
skip_bits(gb,3);
|
||||
skip_bits(gb, 3);
|
||||
block[perm[i]] = s->qtable[perm[i]];
|
||||
i++;
|
||||
break;
|
||||
case 7: // 111b
|
||||
case 3: // 011b
|
||||
skip_bits(gb,2);
|
||||
if (show_bits(gb,6)==0x3F) {
|
||||
skip_bits(gb, 2);
|
||||
if (show_bits(gb, 6) == 0x3F) {
|
||||
skip_bits(gb, 6);
|
||||
block[perm[i]] = get_sbits(gb,8)*s->qtable[perm[i]];
|
||||
}else{
|
||||
block[perm[i]] = get_sbits(gb,6)*s->qtable[perm[i]];
|
||||
block[perm[i]] = get_sbits(gb, 8) * s->qtable[perm[i]];
|
||||
} else {
|
||||
block[perm[i]] = get_sbits(gb, 6) * s->qtable[perm[i]];
|
||||
}
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
block[0] += 128<<4;
|
||||
block[0] += 128 << 4;
|
||||
}
|
||||
|
||||
static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64], int mb_x, int mb_y){
|
||||
int linesize= s->frame.linesize[0];
|
||||
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
|
||||
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
|
||||
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
|
||||
static void tgq_idct_put_mb(TgqContext *s, int16_t (*block)[64],
|
||||
int mb_x, int mb_y)
|
||||
{
|
||||
int linesize = s->frame.linesize[0];
|
||||
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
|
||||
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
|
||||
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
|
||||
|
||||
ff_ea_idct_put_c(dest_y , linesize, block[0]);
|
||||
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
|
||||
ff_ea_idct_put_c(dest_y + 8*linesize , linesize, block[2]);
|
||||
ff_ea_idct_put_c(dest_y + 8*linesize + 8, linesize, block[3]);
|
||||
if(!(s->avctx->flags&CODEC_FLAG_GRAY)){
|
||||
ff_ea_idct_put_c(dest_y , linesize, block[0]);
|
||||
ff_ea_idct_put_c(dest_y + 8, linesize, block[1]);
|
||||
ff_ea_idct_put_c(dest_y + 8 * linesize , linesize, block[2]);
|
||||
ff_ea_idct_put_c(dest_y + 8 * linesize + 8, linesize, block[3]);
|
||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
||||
ff_ea_idct_put_c(dest_cb, s->frame.linesize[1], block[4]);
|
||||
ff_ea_idct_put_c(dest_cr, s->frame.linesize[2], block[5]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void tgq_dconly(TgqContext *s, unsigned char *dst, int dst_stride, int dc){
|
||||
int level = av_clip_uint8((dc*s->qtable[0] + 2056)>>4);
|
||||
static inline void tgq_dconly(TgqContext *s, unsigned char *dst,
|
||||
int dst_stride, int dc)
|
||||
{
|
||||
int level = av_clip_uint8((dc*s->qtable[0] + 2056) >> 4);
|
||||
int j;
|
||||
for(j=0;j<8;j++)
|
||||
memset(dst+j*dst_stride, level, 8);
|
||||
for (j = 0; j < 8; j++)
|
||||
memset(dst + j * dst_stride, level, 8);
|
||||
}
|
||||
|
||||
static void tgq_idct_put_mb_dconly(TgqContext *s, int mb_x, int mb_y, const int8_t *dc)
|
||||
{
|
||||
int linesize= s->frame.linesize[0];
|
||||
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
|
||||
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
|
||||
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
|
||||
tgq_dconly(s,dest_y , linesize, dc[0]);
|
||||
tgq_dconly(s,dest_y + 8, linesize, dc[1]);
|
||||
tgq_dconly(s,dest_y + 8*linesize , linesize, dc[2]);
|
||||
tgq_dconly(s,dest_y + 8*linesize + 8, linesize, dc[3]);
|
||||
if(!(s->avctx->flags&CODEC_FLAG_GRAY)) {
|
||||
tgq_dconly(s,dest_cb, s->frame.linesize[1], dc[4]);
|
||||
tgq_dconly(s,dest_cr, s->frame.linesize[2], dc[5]);
|
||||
int linesize = s->frame.linesize[0];
|
||||
uint8_t *dest_y = s->frame.data[0] + (mb_y * 16 * linesize) + mb_x * 16;
|
||||
uint8_t *dest_cb = s->frame.data[1] + (mb_y * 8 * s->frame.linesize[1]) + mb_x * 8;
|
||||
uint8_t *dest_cr = s->frame.data[2] + (mb_y * 8 * s->frame.linesize[2]) + mb_x * 8;
|
||||
tgq_dconly(s, dest_y, linesize, dc[0]);
|
||||
tgq_dconly(s, dest_y + 8, linesize, dc[1]);
|
||||
tgq_dconly(s, dest_y + 8 * linesize, linesize, dc[2]);
|
||||
tgq_dconly(s, dest_y + 8 * linesize + 8, linesize, dc[3]);
|
||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
||||
tgq_dconly(s, dest_cb, s->frame.linesize[1], dc[4]);
|
||||
tgq_dconly(s, dest_cr, s->frame.linesize[2], dc[5]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return <0 on error
|
||||
*/
|
||||
static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x){
|
||||
static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x)
|
||||
{
|
||||
int mode;
|
||||
int i;
|
||||
int8_t dc[6];
|
||||
|
||||
mode = bytestream2_get_byte(&s->gb);
|
||||
if (mode>12) {
|
||||
if (mode > 12) {
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, s->gb.buffer, FFMIN(bytestream2_get_bytes_left(&s->gb), mode) * 8);
|
||||
for(i=0; i<6; i++)
|
||||
for (i = 0; i < 6; i++)
|
||||
tgq_decode_block(s, s->block[i], &gb);
|
||||
tgq_idct_put_mb(s, s->block, mb_x, mb_y);
|
||||
bytestream2_skip(&s->gb, mode);
|
||||
}else{
|
||||
if (mode==3) {
|
||||
} else {
|
||||
if (mode == 3) {
|
||||
memset(dc, bytestream2_get_byte(&s->gb), 4);
|
||||
dc[4] = bytestream2_get_byte(&s->gb);
|
||||
dc[5] = bytestream2_get_byte(&s->gb);
|
||||
}else if (mode==6) {
|
||||
} else if (mode == 6) {
|
||||
bytestream2_get_buffer(&s->gb, dc, 6);
|
||||
}else if (mode==12) {
|
||||
} else if (mode == 12) {
|
||||
for (i = 0; i < 6; i++) {
|
||||
dc[i] = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_skip(&s->gb, 1);
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode);
|
||||
return -1;
|
||||
}
|
||||
@ -179,21 +183,24 @@ static int tgq_decode_mb(TgqContext *s, int mb_y, int mb_x){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tgq_calculate_qtable(TgqContext *s, int quant){
|
||||
int i,j;
|
||||
const int a = (14*(100-quant))/100 + 1;
|
||||
const int b = (11*(100-quant))/100 + 4;
|
||||
for(j=0;j<8;j++)
|
||||
for(i=0;i<8;i++)
|
||||
s->qtable[j*8+i] = ((a*(j+i)/(7+7) + b)*ff_inv_aanscales[j*8+i])>>(14-4);
|
||||
static void tgq_calculate_qtable(TgqContext *s, int quant)
|
||||
{
|
||||
int i, j;
|
||||
const int a = (14 * (100 - quant)) / 100 + 1;
|
||||
const int b = (11 * (100 - quant)) / 100 + 4;
|
||||
for (j = 0; j < 8; j++)
|
||||
for (i = 0; i < 8; i++)
|
||||
s->qtable[j * 8 + i] = ((a * (j + i) / (7 + 7) + b) *
|
||||
ff_inv_aanscales[j * 8 + i]) >> (14 - 4);
|
||||
}
|
||||
|
||||
static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *got_frame,
|
||||
AVPacket *avpkt){
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TgqContext *s = avctx->priv_data;
|
||||
int buf_size = avpkt->size;
|
||||
TgqContext *s = avctx->priv_data;
|
||||
int x, y, ret;
|
||||
int big_endian;
|
||||
|
||||
@ -240,7 +247,8 @@ static int tgq_decode_frame(AVCodecContext *avctx,
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int tgq_decode_end(AVCodecContext *avctx){
|
||||
static av_cold int tgq_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
TgqContext *s = avctx->priv_data;
|
||||
if (s->frame.data[0])
|
||||
s->avctx->release_buffer(avctx, &s->frame);
|
||||
|
@ -139,7 +139,7 @@ static av_always_inline uint32_t gray2rgb(const uint32_t x) {
|
||||
/**
|
||||
* Convert CMAP buffer (stored in extradata) to lavc palette format
|
||||
*/
|
||||
static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
|
||||
static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
|
||||
{
|
||||
IffContext *s = avctx->priv_data;
|
||||
int count, i;
|
||||
@ -671,10 +671,10 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return res;
|
||||
} else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||
if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
|
||||
if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
|
||||
return res;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_RGB32 && avctx->bits_per_coded_sample <= 8) {
|
||||
if ((res = ff_cmap_read_palette(avctx, s->mask_palbuf)) < 0)
|
||||
if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0)
|
||||
return res;
|
||||
}
|
||||
s->init = 1;
|
||||
|
@ -114,9 +114,9 @@ static av_cold int mimic_decode_init(AVCodecContext *avctx)
|
||||
MimicContext *ctx = avctx->priv_data;
|
||||
|
||||
ctx->prev_index = 0;
|
||||
ctx->cur_index = 15;
|
||||
ctx->cur_index = 15;
|
||||
|
||||
if(init_vlc(&ctx->vlc, 11, FF_ARRAY_ELEMS(huffbits),
|
||||
if (init_vlc(&ctx->vlc, 11, FF_ARRAY_ELEMS(huffbits),
|
||||
huffbits, 1, 1, huffcodes, 4, 4, 0)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error initializing vlc table\n");
|
||||
return -1;
|
||||
@ -131,7 +131,8 @@ static int mimic_decode_update_thread_context(AVCodecContext *avctx, const AVCod
|
||||
{
|
||||
MimicContext *dst = avctx->priv_data, *src = avctx_from->priv_data;
|
||||
|
||||
if (avctx == avctx_from) return 0;
|
||||
if (avctx == avctx_from)
|
||||
return 0;
|
||||
|
||||
dst->cur_index = src->next_cur_index;
|
||||
dst->prev_index = src->next_prev_index;
|
||||
@ -190,22 +191,22 @@ static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale)
|
||||
|
||||
block[0] = get_bits(&ctx->gb, 8) << 3;
|
||||
|
||||
for(pos = 1; pos < num_coeffs; pos++) {
|
||||
for (pos = 1; pos < num_coeffs; pos++) {
|
||||
uint32_t vlc, num_bits;
|
||||
int value;
|
||||
int coeff;
|
||||
|
||||
vlc = get_vlc2(&ctx->gb, ctx->vlc.table, ctx->vlc.bits, 3);
|
||||
if(!vlc) /* end-of-block code */
|
||||
if (!vlc) /* end-of-block code */
|
||||
return 1;
|
||||
if(vlc == -1)
|
||||
if (vlc == -1)
|
||||
return 0;
|
||||
|
||||
/* pos_add and num_bits are coded in the vlc code */
|
||||
pos += vlc&15; // pos_add
|
||||
num_bits = vlc>>4; // num_bits
|
||||
pos += vlc & 15; // pos_add
|
||||
num_bits = vlc >> 4; // num_bits
|
||||
|
||||
if(pos >= 64)
|
||||
if (pos >= 64)
|
||||
return 0;
|
||||
|
||||
value = get_bits(&ctx->gb, num_bits);
|
||||
@ -214,7 +215,7 @@ static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale)
|
||||
* a factor of 4 was added to the input */
|
||||
|
||||
coeff = vlcdec_lookup[num_bits][value];
|
||||
if(pos<3)
|
||||
if (pos < 3)
|
||||
coeff <<= 4;
|
||||
else /* TODO Use >> 10 instead of / 1001 */
|
||||
coeff = (coeff * qscale) / 1001;
|
||||
@ -230,39 +231,38 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
|
||||
{
|
||||
int y, x, plane, cur_row = 0;
|
||||
|
||||
for(plane = 0; plane < 3; plane++) {
|
||||
for (plane = 0; plane < 3; plane++) {
|
||||
const int is_chroma = !!plane;
|
||||
const int qscale = av_clip(10000-quality,is_chroma?1000:2000,10000)<<2;
|
||||
const int stride = ctx->flipped_ptrs[ctx->cur_index].linesize[plane];
|
||||
const uint8_t *src = ctx->flipped_ptrs[ctx->prev_index].data[plane];
|
||||
uint8_t *dst = ctx->flipped_ptrs[ctx->cur_index ].data[plane];
|
||||
|
||||
for(y = 0; y < ctx->num_vblocks[plane]; y++) {
|
||||
for(x = 0; x < ctx->num_hblocks[plane]; x++) {
|
||||
const int qscale = av_clip(10000 - quality, is_chroma ? 1000 : 2000,
|
||||
10000) << 2;
|
||||
const int stride = ctx->flipped_ptrs[ctx->cur_index ].linesize[plane];
|
||||
const uint8_t *src = ctx->flipped_ptrs[ctx->prev_index].data[plane];
|
||||
uint8_t *dst = ctx->flipped_ptrs[ctx->cur_index ].data[plane];
|
||||
|
||||
for (y = 0; y < ctx->num_vblocks[plane]; y++) {
|
||||
for (x = 0; x < ctx->num_hblocks[plane]; x++) {
|
||||
/* Check for a change condition in the current block.
|
||||
* - iframes always change.
|
||||
* - Luma plane changes on get_bits1 == 0
|
||||
* - Chroma planes change on get_bits1 == 1 */
|
||||
if(is_iframe || get_bits1(&ctx->gb) == is_chroma) {
|
||||
|
||||
if (is_iframe || get_bits1(&ctx->gb) == is_chroma) {
|
||||
/* Luma planes may use a backreference from the 15 last
|
||||
* frames preceding the previous. (get_bits1 == 1)
|
||||
* Chroma planes don't use backreferences. */
|
||||
if(is_chroma || is_iframe || !get_bits1(&ctx->gb)) {
|
||||
|
||||
if(!vlc_decode_block(ctx, num_coeffs, qscale))
|
||||
if (is_chroma || is_iframe || !get_bits1(&ctx->gb)) {
|
||||
if (!vlc_decode_block(ctx, num_coeffs, qscale))
|
||||
return 0;
|
||||
ctx->dsp.idct_put(dst, stride, ctx->dct_block);
|
||||
} else {
|
||||
unsigned int backref = get_bits(&ctx->gb, 4);
|
||||
int index = (ctx->cur_index+backref)&15;
|
||||
uint8_t *p = ctx->flipped_ptrs[index].data[0];
|
||||
int index = (ctx->cur_index + backref) & 15;
|
||||
uint8_t *p = ctx->flipped_ptrs[index].data[0];
|
||||
|
||||
if (index != ctx->cur_index && p) {
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0);
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[index],
|
||||
cur_row, 0);
|
||||
p += src -
|
||||
ctx->flipped_ptrs[ctx->prev_index].data[plane];
|
||||
ctx->flipped_ptrs[ctx->prev_index].data[plane];
|
||||
ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8);
|
||||
} else {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR,
|
||||
@ -270,16 +270,18 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[ctx->prev_index], cur_row, 0);
|
||||
ff_thread_await_progress(&ctx->buf_ptrs[ctx->prev_index],
|
||||
cur_row, 0);
|
||||
ctx->dsp.put_pixels_tab[1][0](dst, src, stride, 8);
|
||||
}
|
||||
src += 8;
|
||||
dst += 8;
|
||||
}
|
||||
src += (stride - ctx->num_hblocks[plane])<<3;
|
||||
dst += (stride - ctx->num_hblocks[plane])<<3;
|
||||
src += (stride - ctx->num_hblocks[plane]) << 3;
|
||||
dst += (stride - ctx->num_hblocks[plane]) << 3;
|
||||
|
||||
ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], cur_row++, 0);
|
||||
ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index],
|
||||
cur_row++, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,10 +295,10 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs,
|
||||
static void prepare_avpic(MimicContext *ctx, AVPicture *dst, AVPicture *src)
|
||||
{
|
||||
int i;
|
||||
dst->data[0] = src->data[0]+( ctx->avctx->height -1)*src->linesize[0];
|
||||
dst->data[1] = src->data[2]+((ctx->avctx->height>>1)-1)*src->linesize[2];
|
||||
dst->data[2] = src->data[1]+((ctx->avctx->height>>1)-1)*src->linesize[1];
|
||||
for(i = 0; i < 3; i++)
|
||||
dst->data[0] = src->data[0] + ( ctx->avctx->height - 1) * src->linesize[0];
|
||||
dst->data[1] = src->data[2] + ((ctx->avctx->height >> 1) - 1) * src->linesize[2];
|
||||
dst->data[2] = src->data[1] + ((ctx->avctx->height >> 1) - 1) * src->linesize[1];
|
||||
for (i = 0; i < 3; i++)
|
||||
dst->linesize[i] = -src->linesize[i];
|
||||
}
|
||||
|
||||
@ -304,13 +306,13 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
MimicContext *ctx = avctx->priv_data;
|
||||
int buf_size = avpkt->size;
|
||||
int swap_buf_size = buf_size - MIMIC_HEADER_SIZE;
|
||||
MimicContext *ctx = avctx->priv_data;
|
||||
GetByteContext gb;
|
||||
int is_pframe;
|
||||
int width, height;
|
||||
int quality, num_coeffs;
|
||||
int swap_buf_size = buf_size - MIMIC_HEADER_SIZE;
|
||||
int res;
|
||||
|
||||
if (buf_size <= MIMIC_HEADER_SIZE) {
|
||||
@ -328,11 +330,11 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
num_coeffs = bytestream2_get_byteu(&gb);
|
||||
bytestream2_skip(&gb, 3); /* some constant */
|
||||
|
||||
if(!ctx->avctx) {
|
||||
if (!ctx->avctx) {
|
||||
int i;
|
||||
|
||||
if(!(width == 160 && height == 120) &&
|
||||
!(width == 320 && height == 240)) {
|
||||
if (!(width == 160 && height == 120) &&
|
||||
!(width == 320 && height == 240)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid width/height!\n");
|
||||
return -1;
|
||||
}
|
||||
@ -341,23 +343,24 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
avctx->width = width;
|
||||
avctx->height = height;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
for(i = 0; i < 3; i++) {
|
||||
for (i = 0; i < 3; i++) {
|
||||
ctx->num_vblocks[i] = -((-height) >> (3 + !!i));
|
||||
ctx->num_hblocks[i] = width >> (3 + !!i) ;
|
||||
ctx->num_hblocks[i] = width >> (3 + !!i);
|
||||
}
|
||||
} else if(width != ctx->avctx->width || height != ctx->avctx->height) {
|
||||
} else if (width != ctx->avctx->width || height != ctx->avctx->height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "resolution changing is not supported\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) {
|
||||
if (is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) {
|
||||
av_log(avctx, AV_LOG_ERROR, "decoding must start with keyframe\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ctx->buf_ptrs[ctx->cur_index].reference = 3;
|
||||
ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P:AV_PICTURE_TYPE_I;
|
||||
if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) {
|
||||
ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P :
|
||||
AV_PICTURE_TYPE_I;
|
||||
if (ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
@ -371,12 +374,12 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
ff_thread_finish_setup(avctx);
|
||||
|
||||
av_fast_padded_malloc(&ctx->swap_buf, &ctx->swap_buf_size, swap_buf_size);
|
||||
if(!ctx->swap_buf)
|
||||
if (!ctx->swap_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ctx->dsp.bswap_buf(ctx->swap_buf,
|
||||
(const uint32_t*) (buf + MIMIC_HEADER_SIZE),
|
||||
swap_buf_size>>2);
|
||||
(const uint32_t*) (buf + MIMIC_HEADER_SIZE),
|
||||
swap_buf_size >> 2);
|
||||
init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3);
|
||||
|
||||
res = decode(ctx, quality, num_coeffs, !is_pframe);
|
||||
@ -395,7 +398,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
|
||||
ctx->cur_index = ctx->next_cur_index;
|
||||
|
||||
/* Only release frames that aren't used for backreferences anymore */
|
||||
if(ctx->buf_ptrs[ctx->cur_index].data[0])
|
||||
if (ctx->buf_ptrs[ctx->cur_index].data[0])
|
||||
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]);
|
||||
|
||||
return buf_size;
|
||||
@ -411,8 +414,8 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
|
||||
if (avctx->internal->is_copy)
|
||||
return 0;
|
||||
|
||||
for(i = 0; i < 16; i++)
|
||||
if(ctx->buf_ptrs[i].data[0])
|
||||
for (i = 0; i < 16; i++)
|
||||
if (ctx->buf_ptrs[i].data[0])
|
||||
ff_thread_release_buffer(avctx, &ctx->buf_ptrs[i]);
|
||||
ff_free_vlc(&ctx->vlc);
|
||||
|
||||
|
@ -96,12 +96,13 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
uint8_t *d;
|
||||
const uint8_t *s;
|
||||
|
||||
mask = png_pass_mask[pass];
|
||||
mask = png_pass_mask[pass];
|
||||
dsp_mask = png_pass_dsp_mask[pass];
|
||||
switch(bits_per_pixel) {
|
||||
|
||||
switch (bits_per_pixel) {
|
||||
case 1:
|
||||
src_x = 0;
|
||||
for(x = 0; x < width; x++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
j = (x & 7);
|
||||
if ((dsp_mask << j) & 0x80) {
|
||||
b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
|
||||
@ -114,8 +115,8 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
break;
|
||||
case 2:
|
||||
src_x = 0;
|
||||
for(x = 0; x < width; x++) {
|
||||
int j2 = 2*(x&3);
|
||||
for (x = 0; x < width; x++) {
|
||||
int j2 = 2 * (x & 3);
|
||||
j = (x & 7);
|
||||
if ((dsp_mask << j) & 0x80) {
|
||||
b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
|
||||
@ -128,7 +129,7 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
break;
|
||||
case 4:
|
||||
src_x = 0;
|
||||
for(x = 0; x < width; x++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
int j2 = 4*(x&1);
|
||||
j = (x & 7);
|
||||
if ((dsp_mask << j) & 0x80) {
|
||||
@ -142,9 +143,9 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
break;
|
||||
default:
|
||||
bpp = bits_per_pixel >> 3;
|
||||
d = dst;
|
||||
s = src;
|
||||
for(x = 0; x < width; x++) {
|
||||
d = dst;
|
||||
s = src;
|
||||
for (x = 0; x < width; x++) {
|
||||
j = x & 7;
|
||||
if ((dsp_mask << j) & 0x80) {
|
||||
memcpy(d, s, bpp);
|
||||
@ -160,14 +161,14 @@ static void png_put_interlaced_row(uint8_t *dst, int width,
|
||||
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
|
||||
{
|
||||
int i;
|
||||
for(i = 0; i < w; i++) {
|
||||
for (i = 0; i < w; i++) {
|
||||
int a, b, c, p, pa, pb, pc;
|
||||
|
||||
a = dst[i - bpp];
|
||||
b = top[i];
|
||||
c = top[i - bpp];
|
||||
|
||||
p = b - c;
|
||||
p = b - c;
|
||||
pc = a - c;
|
||||
|
||||
pa = abs(p);
|
||||
@ -219,20 +220,20 @@ static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
|
||||
{
|
||||
int i, p, r, g, b, a;
|
||||
|
||||
switch(filter_type) {
|
||||
switch (filter_type) {
|
||||
case PNG_FILTER_VALUE_NONE:
|
||||
memcpy(dst, src, size);
|
||||
break;
|
||||
case PNG_FILTER_VALUE_SUB:
|
||||
for(i = 0; i < bpp; i++) {
|
||||
for (i = 0; i < bpp; i++) {
|
||||
dst[i] = src[i];
|
||||
}
|
||||
if(bpp == 4) {
|
||||
if (bpp == 4) {
|
||||
p = *(int*)dst;
|
||||
for(; i < size; i+=bpp) {
|
||||
int s = *(int*)(src+i);
|
||||
p = ((s&0x7f7f7f7f) + (p&0x7f7f7f7f)) ^ ((s^p)&0x80808080);
|
||||
*(int*)(dst+i) = p;
|
||||
for (; i < size; i += bpp) {
|
||||
int s = *(int*)(src + i);
|
||||
p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
|
||||
*(int*)(dst + i) = p;
|
||||
}
|
||||
} else {
|
||||
#define OP_SUB(x,s,l) x+s
|
||||
@ -243,7 +244,7 @@ static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
|
||||
dsp->add_bytes_l2(dst, src, last, size);
|
||||
break;
|
||||
case PNG_FILTER_VALUE_AVG:
|
||||
for(i = 0; i < bpp; i++) {
|
||||
for (i = 0; i < bpp; i++) {
|
||||
p = (last[i] >> 1);
|
||||
dst[i] = p + src[i];
|
||||
}
|
||||
@ -251,17 +252,17 @@ static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
|
||||
UNROLL_FILTER(OP_AVG);
|
||||
break;
|
||||
case PNG_FILTER_VALUE_PAETH:
|
||||
for(i = 0; i < bpp; i++) {
|
||||
for (i = 0; i < bpp; i++) {
|
||||
p = last[i];
|
||||
dst[i] = p + src[i];
|
||||
}
|
||||
if(bpp > 2 && size > 4) {
|
||||
if (bpp > 2 && size > 4) {
|
||||
// would write off the end of the array if we let it process the last pixel with bpp=3
|
||||
int w = bpp==4 ? size : size-3;
|
||||
dsp->add_paeth_prediction(dst+i, src+i, last+i, w-i, bpp);
|
||||
int w = bpp == 4 ? size : size - 3;
|
||||
dsp->add_paeth_prediction(dst + i, src + i, last + i, w - i, bpp);
|
||||
i = w;
|
||||
}
|
||||
ff_add_png_paeth_prediction(dst+i, src+i, last+i, size-i, bpp);
|
||||
ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -322,7 +323,7 @@ static void png_handle_row(PNGDecContext *s)
|
||||
}
|
||||
} else {
|
||||
got_line = 0;
|
||||
for(;;) {
|
||||
for (;;) {
|
||||
ptr = s->image_buf + s->image_linesize * s->y;
|
||||
if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
|
||||
/* if we already read one row, it is time to stop to
|
||||
@ -341,7 +342,7 @@ static void png_handle_row(PNGDecContext *s)
|
||||
s->y++;
|
||||
if (s->y == s->height) {
|
||||
memset(s->last_row, 0, s->row_size);
|
||||
for(;;) {
|
||||
for (;;) {
|
||||
if (s->pass == NB_PASSES - 1) {
|
||||
s->state |= PNG_ALLIMAGE;
|
||||
goto the_end;
|
||||
@ -367,7 +368,7 @@ static int png_decode_idat(PNGDecContext *s, int length)
|
||||
{
|
||||
int ret;
|
||||
s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
|
||||
s->zstream.next_in = (unsigned char *)s->gb.buffer;
|
||||
s->zstream.next_in = (unsigned char *)s->gb.buffer;
|
||||
bytestream2_skip(&s->gb, length);
|
||||
|
||||
/* decode one line if possible */
|
||||
@ -382,7 +383,7 @@ static int png_decode_idat(PNGDecContext *s, int length)
|
||||
png_handle_row(s);
|
||||
}
|
||||
s->zstream.avail_out = s->crow_size;
|
||||
s->zstream.next_out = s->crow_buf;
|
||||
s->zstream.next_out = s->crow_buf;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -508,19 +509,19 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *got_frame,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
PNGDecContext * const s = avctx->priv_data;
|
||||
AVFrame *picture = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
AVFrame *picture = data;
|
||||
AVDictionary *metadata = NULL;
|
||||
uint8_t *crow_buf_base = NULL;
|
||||
AVFrame *p;
|
||||
AVDictionary *metadata = NULL;
|
||||
uint8_t *crow_buf_base = NULL;
|
||||
uint32_t tag, length;
|
||||
int64_t sig;
|
||||
int ret;
|
||||
|
||||
FFSWAP(AVFrame *, s->current_picture, s->last_picture);
|
||||
avctx->coded_frame= s->current_picture;
|
||||
avctx->coded_frame = s->current_picture;
|
||||
p = s->current_picture;
|
||||
|
||||
bytestream2_init(&s->gb, buf, buf_size);
|
||||
@ -533,19 +534,18 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->y=
|
||||
s->state=0;
|
||||
// memset(s, 0, sizeof(PNGDecContext));
|
||||
s->y = s->state = 0;
|
||||
|
||||
/* init the zlib */
|
||||
s->zstream.zalloc = ff_png_zalloc;
|
||||
s->zstream.zfree = ff_png_zfree;
|
||||
s->zstream.zfree = ff_png_zfree;
|
||||
s->zstream.opaque = NULL;
|
||||
ret = inflateInit(&s->zstream);
|
||||
if (ret != Z_OK) {
|
||||
av_log(avctx, AV_LOG_ERROR, "inflateInit returned %d\n", ret);
|
||||
return -1;
|
||||
}
|
||||
for(;;) {
|
||||
for (;;) {
|
||||
if (bytestream2_get_bytes_left(&s->gb) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "No bytes left\n");
|
||||
goto fail;
|
||||
@ -563,14 +563,14 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
((tag >> 8) & 0xff),
|
||||
((tag >> 16) & 0xff),
|
||||
((tag >> 24) & 0xff), length);
|
||||
switch(tag) {
|
||||
switch (tag) {
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (length != 13)
|
||||
goto fail;
|
||||
s->width = bytestream2_get_be32(&s->gb);
|
||||
s->height = bytestream2_get_be32(&s->gb);
|
||||
if(av_image_check_size(s->width, s->height, 0, avctx)){
|
||||
s->width= s->height= 0;
|
||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||
s->width = s->height = 0;
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
|
||||
goto fail;
|
||||
}
|
||||
@ -582,7 +582,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
s->state |= PNG_IHDR;
|
||||
if (avctx->debug & FF_DEBUG_PICT_INFO)
|
||||
av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
|
||||
av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
|
||||
"compression_type=%d filter_type=%d interlace_type=%d\n",
|
||||
s->width, s->height, s->bit_depth, s->color_type,
|
||||
s->compression_type, s->filter_type, s->interlace_type);
|
||||
break;
|
||||
@ -605,13 +606,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
if (!(s->state & PNG_IDAT)) {
|
||||
/* init image info */
|
||||
avctx->width = s->width;
|
||||
avctx->width = s->width;
|
||||
avctx->height = s->height;
|
||||
|
||||
s->channels = ff_png_get_nb_channels(s->color_type);
|
||||
s->channels = ff_png_get_nb_channels(s->color_type);
|
||||
s->bits_per_pixel = s->bit_depth * s->channels;
|
||||
s->bpp = (s->bits_per_pixel + 7) >> 3;
|
||||
s->row_size = (avctx->width * s->bits_per_pixel + 7) >> 3;
|
||||
s->bpp = (s->bits_per_pixel + 7) >> 3;
|
||||
s->row_size = (avctx->width * s->bits_per_pixel + 7) >> 3;
|
||||
|
||||
if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_RGB) {
|
||||
@ -645,16 +646,16 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
s->bit_depth, s->color_type);
|
||||
goto fail;
|
||||
}
|
||||
if(p->data[0])
|
||||
if (p->data[0])
|
||||
avctx->release_buffer(avctx, p);
|
||||
|
||||
p->reference= 3;
|
||||
if(ff_get_buffer(avctx, p) < 0){
|
||||
p->reference = 3;
|
||||
if (ff_get_buffer(avctx, p) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
goto fail;
|
||||
}
|
||||
p->pict_type= AV_PICTURE_TYPE_I;
|
||||
p->key_frame= 1;
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
p->key_frame = 1;
|
||||
p->interlaced_frame = !!s->interlace_type;
|
||||
|
||||
/* compute the compressed row size */
|
||||
@ -669,7 +670,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
av_dlog(avctx, "row_size=%d crow_size =%d\n",
|
||||
s->row_size, s->crow_size);
|
||||
s->image_buf = p->data[0];
|
||||
s->image_buf = p->data[0];
|
||||
s->image_linesize = p->linesize[0];
|
||||
/* copy the palette if needed */
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
|
||||
@ -690,9 +691,9 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
goto fail;
|
||||
|
||||
/* we want crow_buf+1 to be 16-byte aligned */
|
||||
s->crow_buf = crow_buf_base + 15;
|
||||
s->crow_buf = crow_buf_base + 15;
|
||||
s->zstream.avail_out = s->crow_size;
|
||||
s->zstream.next_out = s->crow_buf;
|
||||
s->zstream.next_out = s->crow_buf;
|
||||
}
|
||||
s->state |= PNG_IDAT;
|
||||
if (png_decode_idat(s, length) < 0)
|
||||
@ -707,13 +708,13 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
goto skip_tag;
|
||||
/* read the palette */
|
||||
n = length / 3;
|
||||
for(i=0;i<n;i++) {
|
||||
for (i = 0; i < n; i++) {
|
||||
r = bytestream2_get_byte(&s->gb);
|
||||
g = bytestream2_get_byte(&s->gb);
|
||||
b = bytestream2_get_byte(&s->gb);
|
||||
s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
|
||||
}
|
||||
for(;i<256;i++) {
|
||||
for (; i < 256; i++) {
|
||||
s->palette[i] = (0xFFU << 24);
|
||||
}
|
||||
s->state |= PNG_PLTE;
|
||||
@ -729,7 +730,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
length > 256 ||
|
||||
!(s->state & PNG_PLTE))
|
||||
goto skip_tag;
|
||||
for(i=0;i<length;i++) {
|
||||
for (i = 0; i < length; i++) {
|
||||
v = bytestream2_get_byte(&s->gb);
|
||||
s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
|
||||
}
|
||||
@ -763,68 +764,68 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
exit_loop:
|
||||
|
||||
if(s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE){
|
||||
if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE){
|
||||
int i, j;
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
for(j=0; j < s->height; j++) {
|
||||
for(i=s->width/8-1; i>=0; i--) {
|
||||
pd[8*i+7]= pd[i] &1;
|
||||
pd[8*i+6]= (pd[i]>>1)&1;
|
||||
pd[8*i+5]= (pd[i]>>2)&1;
|
||||
pd[8*i+4]= (pd[i]>>3)&1;
|
||||
pd[8*i+3]= (pd[i]>>4)&1;
|
||||
pd[8*i+2]= (pd[i]>>5)&1;
|
||||
pd[8*i+1]= (pd[i]>>6)&1;
|
||||
pd[8*i+0]= pd[i]>>7;
|
||||
for (j = 0; j < s->height; j++) {
|
||||
for (i = s->width/8-1; i >= 0; i--) {
|
||||
pd[8*i + 7]= pd[i] & 1;
|
||||
pd[8*i + 6]= (pd[i]>>1) & 1;
|
||||
pd[8*i + 5]= (pd[i]>>2) & 1;
|
||||
pd[8*i + 4]= (pd[i]>>3) & 1;
|
||||
pd[8*i + 3]= (pd[i]>>4) & 1;
|
||||
pd[8*i + 2]= (pd[i]>>5) & 1;
|
||||
pd[8*i + 1]= (pd[i]>>6) & 1;
|
||||
pd[8*i + 0]= pd[i]>>7;
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
}
|
||||
}
|
||||
if(s->bits_per_pixel == 2){
|
||||
if (s->bits_per_pixel == 2){
|
||||
int i, j;
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
for(j=0; j < s->height; j++) {
|
||||
i = s->width/4;
|
||||
for (j = 0; j < s->height; j++) {
|
||||
i = s->width / 4;
|
||||
if (s->color_type == PNG_COLOR_TYPE_PALETTE){
|
||||
if ((s->width&3) >= 3) pd[4*i+2]= (pd[i]>>2)&3;
|
||||
if ((s->width&3) >= 2) pd[4*i+1]= (pd[i]>>4)&3;
|
||||
if ((s->width&3) >= 1) pd[4*i+0]= pd[i]>>6;
|
||||
for(i--; i>=0; i--) {
|
||||
pd[4*i+3]= pd[i] &3;
|
||||
pd[4*i+2]= (pd[i]>>2)&3;
|
||||
pd[4*i+1]= (pd[i]>>4)&3;
|
||||
pd[4*i+0]= pd[i]>>6;
|
||||
}
|
||||
if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
|
||||
if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
|
||||
if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
|
||||
for (i--; i >= 0; i--) {
|
||||
pd[4*i + 3]= pd[i] & 3;
|
||||
pd[4*i + 2]= (pd[i]>>2) & 3;
|
||||
pd[4*i + 1]= (pd[i]>>4) & 3;
|
||||
pd[4*i + 0]= pd[i]>>6;
|
||||
}
|
||||
} else {
|
||||
if ((s->width&3) >= 3) pd[4*i+2]= ((pd[i]>>2)&3)*0x55;
|
||||
if ((s->width&3) >= 2) pd[4*i+1]= ((pd[i]>>4)&3)*0x55;
|
||||
if ((s->width&3) >= 1) pd[4*i+0]= ( pd[i]>>6 )*0x55;
|
||||
for(i--; i>=0; i--) {
|
||||
pd[4*i+3]= ( pd[i] &3)*0x55;
|
||||
pd[4*i+2]= ((pd[i]>>2)&3)*0x55;
|
||||
pd[4*i+1]= ((pd[i]>>4)&3)*0x55;
|
||||
pd[4*i+0]= ( pd[i]>>6 )*0x55;
|
||||
if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
|
||||
if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
|
||||
if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
|
||||
for (i--; i >= 0; i--) {
|
||||
pd[4*i + 3]= ( pd[i] & 3)*0x55;
|
||||
pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
|
||||
pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
|
||||
pd[4*i + 0]= ( pd[i]>>6 )*0x55;
|
||||
}
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
}
|
||||
}
|
||||
if(s->bits_per_pixel == 4){
|
||||
if (s->bits_per_pixel == 4){
|
||||
int i, j;
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
for(j=0; j < s->height; j++) {
|
||||
i=s->width/2;
|
||||
for (j = 0; j < s->height; j++) {
|
||||
i = s->width/2;
|
||||
if (s->color_type == PNG_COLOR_TYPE_PALETTE){
|
||||
if (s->width&1) pd[2*i+0]= pd[i]>>4;
|
||||
for(i--; i>=0; i--) {
|
||||
pd[2*i+1]= pd[i]&15;
|
||||
pd[2*i+0]= pd[i]>>4;
|
||||
for (i--; i >= 0; i--) {
|
||||
pd[2*i + 1] = pd[i] & 15;
|
||||
pd[2*i + 0] = pd[i] >> 4;
|
||||
}
|
||||
} else {
|
||||
if (s->width&1) pd[2*i+0]= (pd[i]>>4)*0x11;
|
||||
for(i--; i>=0; i--) {
|
||||
pd[2*i+1]= (pd[i]&15)*0x11;
|
||||
pd[2*i+0]= (pd[i]>>4)*0x11;
|
||||
if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
|
||||
for (i--; i >= 0; i--) {
|
||||
pd[2*i + 1] = (pd[i] & 15) * 0x11;
|
||||
pd[2*i + 0] = (pd[i] >> 4) * 0x11;
|
||||
}
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
@ -832,29 +833,29 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
/* handle p-frames only if a predecessor frame is available */
|
||||
if(s->last_picture->data[0] != NULL) {
|
||||
if( !(avpkt->flags & AV_PKT_FLAG_KEY)
|
||||
if (s->last_picture->data[0] != NULL) {
|
||||
if ( !(avpkt->flags & AV_PKT_FLAG_KEY)
|
||||
&& s->last_picture->width == s->current_picture->width
|
||||
&& s->last_picture->height== s->current_picture->height
|
||||
&& s->last_picture->format== s->current_picture->format
|
||||
) {
|
||||
int i, j;
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
uint8_t *pd = s->current_picture->data[0];
|
||||
uint8_t *pd_last = s->last_picture->data[0];
|
||||
|
||||
for(j=0; j < s->height; j++) {
|
||||
for(i=0; i < s->width * s->bpp; i++) {
|
||||
for (j = 0; j < s->height; j++) {
|
||||
for (i = 0; i < s->width * s->bpp; i++) {
|
||||
pd[i] += pd_last[i];
|
||||
}
|
||||
pd += s->image_linesize;
|
||||
pd += s->image_linesize;
|
||||
pd_last += s->image_linesize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s->current_picture->metadata = metadata;
|
||||
metadata = NULL;
|
||||
*picture= *s->current_picture;
|
||||
metadata = NULL;
|
||||
*picture = *s->current_picture;
|
||||
*got_frame = 1;
|
||||
|
||||
ret = bytestream2_tell(&s->gb);
|
||||
@ -876,7 +877,7 @@ static av_cold int png_dec_init(AVCodecContext *avctx)
|
||||
PNGDecContext *s = avctx->priv_data;
|
||||
|
||||
s->current_picture = &s->picture1;
|
||||
s->last_picture = &s->picture2;
|
||||
s->last_picture = &s->picture2;
|
||||
avcodec_get_frame_defaults(&s->picture1);
|
||||
avcodec_get_frame_defaults(&s->picture2);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user