mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-27 21:40:34 +00:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: (58 commits) amrnbdec: check frame size before decoding. cscd: use negative error values to indicate decode_init() failures. h264: prevent overreads in intra PCM decoding. FATE: do not decode audio in the nuv test. dxa: set audio stream time base using the sample rate psx-str: do not allow seeking by bytes asfdec: Do not set AVCodecContext.frame_size vqf: set packet parameters after av_new_packet() mpegaudiodec: use DSPUtil.butterflies_float(). FATE: add mp3 test for sample that exhibited false overreads fate: add cdxl test for bit line plane arrangement vmnc: return error on decode_init() failure. libvorbis: add/update error messages libvorbis: use AVFifoBuffer for output packet buffer libvorbis: remove unneeded e_o_s check libvorbis: check return values for functions that can return errors libvorbis: use float input instead of s16 libvorbis: do not flush libvorbis analysis if dsp state was not initialized libvorbis: use VBR by default, with default quality of 3 libvorbis: fix use of minrate/maxrate AVOptions ... Conflicts: Changelog doc/APIchanges libavcodec/avcodec.h libavcodec/dpxenc.c libavcodec/libvorbis.c libavcodec/vmnc.c libavformat/asfdec.c libavformat/id3v2enc.c libavformat/internal.h libavformat/mp3enc.c libavformat/utils.c libavformat/version.h libswscale/utils.c tests/fate/video.mak tests/ref/fate/nuv tests/ref/fate/prores-alpha tests/ref/lavf/ffm tests/ref/vsynth1/prores tests/ref/vsynth2/prores Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
79ae084e9b
@ -8,6 +8,7 @@ version next:
|
||||
- Apple ProRes encoder
|
||||
- ffprobe -count_packets and -count_frames options
|
||||
- Sun Rasterfile Encoder
|
||||
- ID3v2 attached pictures reading and writing
|
||||
|
||||
|
||||
version 0.10:
|
||||
|
@ -13,6 +13,7 @@ libavutil: 2011-04-18
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
<<<<<<< HEAD
|
||||
2012-02-21 - xxxxxxx - lavc 54.4.100
|
||||
Add av_get_pcm_codec() function.
|
||||
|
||||
@ -35,6 +36,13 @@ API changes, most recent first:
|
||||
2012-01-24 - xxxxxxx - lavfi 2.60.100
|
||||
Add avfilter_graph_dump.
|
||||
|
||||
||||||| merged common ancestors
|
||||
=======
|
||||
2012-xx-xx - xxxxxxx - lavf 54.2.0 - avformat.h
|
||||
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
|
||||
used for dealing with attached pictures/cover art.
|
||||
|
||||
>>>>>>> qatar/master
|
||||
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
|
||||
Add AVERROR_UNKNOWN
|
||||
|
||||
|
@ -370,5 +370,39 @@ Wrap around segment index once it reaches @var{limit}.
|
||||
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
|
||||
@end example
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
|
||||
@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is
|
||||
not written by default, but may be enabled with the @code{write_id3v1} option.
|
||||
|
||||
For seekable output the muxer also writes a Xing frame at the beginning, which
|
||||
contains the number of frames in the file. It is useful for computing duration
|
||||
of VBR files.
|
||||
|
||||
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
|
||||
are supplied to the muxer in form of a video stream with a single packet. There
|
||||
can be any number of those streams, each will correspond to a single APIC frame.
|
||||
The stream metadata tags @var{title} and @var{comment} map to APIC
|
||||
@var{description} and @var{picture type} respectively. See
|
||||
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
||||
|
||||
Note that the APIC frames must be written at the beginning, so the muxer will
|
||||
buffer the audio frames until it gets all the pictures. It is therefore advised
|
||||
to provide the pictures as soon as possible to avoid excessive buffering.
|
||||
|
||||
Examples:
|
||||
|
||||
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
@example
|
||||
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
|
||||
@end example
|
||||
|
||||
Attach a picture to an mp3:
|
||||
@example
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
|
||||
-metadata:s:v comment="Cover (Front)" out.mp3
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@ -114,55 +114,6 @@ static VLC vlc_spectral[11];
|
||||
|
||||
static const char overread_err[] = "Input buffer exhausted before END element found\n";
|
||||
|
||||
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
{
|
||||
// For PCE based channel configurations map the channels solely based on tags.
|
||||
if (!ac->m4ac.chan_config) {
|
||||
return ac->tag_che_map[type][elem_id];
|
||||
}
|
||||
// For indexed channel configurations map the channels solely based on position.
|
||||
switch (ac->m4ac.chan_config) {
|
||||
case 7:
|
||||
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
|
||||
}
|
||||
case 6:
|
||||
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
|
||||
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
|
||||
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
|
||||
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
||||
}
|
||||
case 5:
|
||||
if (ac->tags_mapped == 2 && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
||||
}
|
||||
case 4:
|
||||
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
|
||||
}
|
||||
case 3:
|
||||
case 2:
|
||||
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
|
||||
} else if (ac->m4ac.chan_config == 2) {
|
||||
return NULL;
|
||||
}
|
||||
case 1:
|
||||
if (!ac->tags_mapped && type == TYPE_SCE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
|
||||
}
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int count_channels(uint8_t (*layout)[3], int tags)
|
||||
{
|
||||
int i, sum = 0;
|
||||
@ -454,6 +405,90 @@ static void flush(AVCodecContext *avctx)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up channel positions based on a default channel configuration
|
||||
* as specified in table 1.17.
|
||||
*
|
||||
* @return Returns error status. 0 - OK, !0 - error
|
||||
*/
|
||||
static av_cold int set_default_channel_config(AVCodecContext *avctx,
|
||||
uint8_t (*layout_map)[3],
|
||||
int *tags,
|
||||
int channel_config)
|
||||
{
|
||||
if (channel_config < 1 || channel_config > 7) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
|
||||
channel_config);
|
||||
return -1;
|
||||
}
|
||||
*tags = tags_per_config[channel_config];
|
||||
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
{
|
||||
// For PCE based channel configurations map the channels solely based on tags.
|
||||
if (!ac->m4ac.chan_config) {
|
||||
return ac->tag_che_map[type][elem_id];
|
||||
}
|
||||
// Allow single CPE stereo files to be signalled with mono configuration.
|
||||
if (!ac->tags_mapped && type == TYPE_CPE && ac->m4ac.chan_config == 1) {
|
||||
uint8_t layout_map[MAX_ELEM_ID*4][3];
|
||||
int layout_map_tags;
|
||||
|
||||
if (set_default_channel_config(ac->avctx, layout_map, &layout_map_tags,
|
||||
2) < 0)
|
||||
return NULL;
|
||||
if (output_configure(ac, layout_map, layout_map_tags,
|
||||
2, OC_TRIAL_FRAME) < 0)
|
||||
return NULL;
|
||||
|
||||
ac->m4ac.chan_config = 2;
|
||||
}
|
||||
// For indexed channel configurations map the channels solely based on position.
|
||||
switch (ac->m4ac.chan_config) {
|
||||
case 7:
|
||||
if (ac->tags_mapped == 3 && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
|
||||
}
|
||||
case 6:
|
||||
/* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
|
||||
instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
|
||||
encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
|
||||
if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
||||
}
|
||||
case 5:
|
||||
if (ac->tags_mapped == 2 && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
||||
}
|
||||
case 4:
|
||||
if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
|
||||
}
|
||||
case 3:
|
||||
case 2:
|
||||
if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
|
||||
} else if (ac->m4ac.chan_config == 2) {
|
||||
return NULL;
|
||||
}
|
||||
case 1:
|
||||
if (!ac->tags_mapped && type == TYPE_SCE) {
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
|
||||
}
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
|
||||
*
|
||||
@ -550,27 +585,6 @@ static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
|
||||
return tags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up channel positions based on a default channel configuration
|
||||
* as specified in table 1.17.
|
||||
*
|
||||
* @return Returns error status. 0 - OK, !0 - error
|
||||
*/
|
||||
static av_cold int set_default_channel_config(AVCodecContext *avctx,
|
||||
uint8_t (*layout_map)[3],
|
||||
int *tags,
|
||||
int channel_config)
|
||||
{
|
||||
if (channel_config < 1 || channel_config > 7) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
|
||||
channel_config);
|
||||
return -1;
|
||||
}
|
||||
*tags = tags_per_config[channel_config];
|
||||
memcpy(layout_map, aac_channel_layout_map[channel_config-1], *tags * sizeof(*layout_map));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode GA "General Audio" specific configuration; reference: table 4.1.
|
||||
*
|
||||
|
@ -2138,6 +2138,17 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
s->bit_alloc.sr_code = i % 3;
|
||||
s->bitstream_id = s->eac3 ? 16 : 8 + s->bit_alloc.sr_shift;
|
||||
|
||||
/* select a default bit rate if not set by the user */
|
||||
if (!avctx->bit_rate) {
|
||||
switch (s->fbw_channels) {
|
||||
case 1: avctx->bit_rate = 96000; break;
|
||||
case 2: avctx->bit_rate = 192000; break;
|
||||
case 3: avctx->bit_rate = 320000; break;
|
||||
case 4: avctx->bit_rate = 384000; break;
|
||||
case 5: avctx->bit_rate = 448000; break;
|
||||
}
|
||||
}
|
||||
|
||||
/* validate bit rate */
|
||||
if (s->eac3) {
|
||||
int max_br, min_br, wpf, min_br_dist, min_br_code;
|
||||
@ -2186,15 +2197,20 @@ static av_cold int validate_options(AC3EncodeContext *s)
|
||||
wpf--;
|
||||
s->frame_size_min = 2 * wpf;
|
||||
} else {
|
||||
int best_br = 0, best_code = 0, best_diff = INT_MAX;
|
||||
for (i = 0; i < 19; i++) {
|
||||
if ((ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift)*1000 == avctx->bit_rate)
|
||||
int br = (ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift) * 1000;
|
||||
int diff = abs(br - avctx->bit_rate);
|
||||
if (diff < best_diff) {
|
||||
best_br = br;
|
||||
best_code = i;
|
||||
best_diff = diff;
|
||||
}
|
||||
if (!best_diff)
|
||||
break;
|
||||
}
|
||||
if (i == 19) {
|
||||
av_log(avctx, AV_LOG_ERROR, "invalid bit rate\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
s->frame_size_code = i << 1;
|
||||
avctx->bit_rate = best_br;
|
||||
s->frame_size_code = best_code << 1;
|
||||
s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code];
|
||||
s->num_blks_code = 0x3;
|
||||
s->num_blocks = 6;
|
||||
|
@ -157,4 +157,5 @@ AVCodec ff_ac3_fixed_encoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
|
@ -155,5 +155,6 @@ AVCodec ff_ac3_encoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
|
||||
.priv_class = &ac3enc_class,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
#endif
|
||||
|
@ -20,6 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "internal.h"
|
||||
#include "ac3.h"
|
||||
|
||||
#if AC3ENC_TYPE == AC3ENC_TYPE_AC3_FIXED
|
||||
@ -78,3 +79,8 @@ static const AVOption eac3_options[] = {
|
||||
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
static const AVCodecDefault ac3_defaults[] = {
|
||||
{ "b", "0" },
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -200,6 +200,10 @@ static enum Mode unpack_bitstream(AMRContext *p, const uint8_t *buf,
|
||||
p->bad_frame_indicator = !get_bits1(&gb); // quality bit
|
||||
skip_bits(&gb, 2); // two padding bits
|
||||
|
||||
if (mode >= N_MODES || buf_size < frame_sizes_nb[mode] + 1) {
|
||||
return NO_DATA;
|
||||
}
|
||||
|
||||
if (mode < MODE_DTX)
|
||||
ff_amr_bit_reorder((uint16_t *) &p->frame, sizeof(AMRNBFrame), buf + 1,
|
||||
amr_unpacking_bitmaps_per_mode[mode]);
|
||||
@ -947,6 +951,10 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
|
||||
buf_out = (float *)p->avframe.data[0];
|
||||
|
||||
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
|
||||
if (p->cur_frame_mode == NO_DATA) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Corrupt bitstream\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (p->cur_frame_mode == MODE_DTX) {
|
||||
av_log_missing_feature(avctx, "dtx mode", 0);
|
||||
av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
|
||||
|
@ -582,17 +582,19 @@ typedef struct RcOverride{
|
||||
#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
|
||||
/* Fx : Flag for h263+ extra options */
|
||||
#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
|
||||
#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
|
||||
#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
|
||||
#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
|
||||
#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
|
||||
#define CODEC_FLAG_CLOSED_GOP 0x80000000
|
||||
#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
|
||||
#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
|
||||
#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
|
||||
#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
|
||||
#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
|
||||
#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
|
||||
#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
|
||||
#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
|
||||
#endif
|
||||
#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
|
||||
#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe
|
||||
|
||||
@ -1492,19 +1494,21 @@ typedef struct AVCodecContext {
|
||||
|
||||
int b_frame_strategy;
|
||||
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
/**
|
||||
* luma single coefficient elimination threshold
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int luma_elim_threshold;
|
||||
attribute_deprecated int luma_elim_threshold;
|
||||
|
||||
/**
|
||||
* chroma single coeff elimination threshold
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int chroma_elim_threshold;
|
||||
attribute_deprecated int chroma_elim_threshold;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* qscale offset between IP and B-frames
|
||||
@ -1735,13 +1739,15 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
int inter_quant_bias;
|
||||
|
||||
#if FF_API_COLOR_TABLE_ID
|
||||
/**
|
||||
* color table ID
|
||||
* - encoding: unused
|
||||
* - decoding: Which clrtable should be used for 8bit RGB images.
|
||||
* Tables have to be stored somewhere. FIXME
|
||||
*/
|
||||
int color_table_id;
|
||||
attribute_deprecated int color_table_id;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* slice flags
|
||||
@ -1799,19 +1805,19 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
int noise_reduction;
|
||||
|
||||
#if FF_API_INTER_THRESHOLD
|
||||
/**
|
||||
*
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* @deprecated this field is unused
|
||||
*/
|
||||
int inter_threshold;
|
||||
attribute_deprecated int inter_threshold;
|
||||
#endif
|
||||
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
/**
|
||||
* quantizer noise shaping
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* @deprecated use mpegvideo private options instead
|
||||
*/
|
||||
int quantizer_noise_shaping;
|
||||
attribute_deprecated int quantizer_noise_shaping;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Motion estimation threshold below which no motion estimation is
|
||||
|
@ -228,7 +228,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"CamStudio codec error: invalid depth %i bpp\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
return 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
c->bpp = avctx->bits_per_coded_sample;
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
@ -242,7 +242,7 @@ static av_cold int decode_init(AVCodecContext *avctx) {
|
||||
c->decomp_buf = av_malloc(c->decomp_size + AV_LZO_OUTPUT_PADDING);
|
||||
if (!c->decomp_buf) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
|
||||
return 1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -131,9 +131,8 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
memcpy (buf + 8, "V1.0", 4);
|
||||
write32(buf + 20, 1); /* new image */
|
||||
write32(buf + 24, HEADER_SIZE);
|
||||
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
|
||||
if (!(avctx->flags & CODEC_FLAG_BITEXACT))
|
||||
memcpy (buf + 160, LIBAVCODEC_IDENT, FFMIN(sizeof(LIBAVCODEC_IDENT), 100));
|
||||
}
|
||||
write32(buf + 660, 0xFFFFFFFF); /* unencrypted */
|
||||
|
||||
/* Image information header */
|
||||
|
@ -258,5 +258,6 @@ AVCodec ff_eac3_encoder = {
|
||||
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"),
|
||||
.priv_class = &eac3enc_class,
|
||||
.channel_layouts = ff_ac3_channel_layouts,
|
||||
.defaults = ac3_defaults,
|
||||
};
|
||||
#endif
|
||||
|
@ -84,6 +84,8 @@ void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run, in
|
||||
}
|
||||
}
|
||||
|
||||
FF_MPV_GENERIC_CLASS(flv)
|
||||
|
||||
AVCodec ff_flv_encoder = {
|
||||
.name = "flv",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -94,4 +96,5 @@ AVCodec ff_flv_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
|
||||
.priv_class = &flv_class,
|
||||
};
|
||||
|
@ -321,6 +321,8 @@ static void h261_encode_block(H261Context * h, DCTELEM * block, int n){
|
||||
}
|
||||
}
|
||||
|
||||
FF_MPV_GENERIC_CLASS(h261)
|
||||
|
||||
AVCodec ff_h261_encoder = {
|
||||
.name = "h261",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -331,4 +333,5 @@ AVCodec ff_h261_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
|
||||
.priv_class = &h261_class,
|
||||
};
|
||||
|
@ -148,7 +148,7 @@ static inline int get_p_cbp(MpegEncContext * s,
|
||||
int motion_x, int motion_y){
|
||||
int cbp, i;
|
||||
|
||||
if(s->flags & CODEC_FLAG_CBP_RD){
|
||||
if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
|
||||
int best_cbpy_score= INT_MAX;
|
||||
int best_cbpc_score= INT_MAX;
|
||||
int cbpc = (-1), cbpy= (-1);
|
||||
|
@ -1998,6 +1998,8 @@ decode_intra_mb:
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
if ((int) (h->cabac.bytestream_end - ptr) < mb_size)
|
||||
return -1;
|
||||
memcpy(h->mb, ptr, mb_size); ptr+=mb_size;
|
||||
|
||||
ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr);
|
||||
|
@ -20,12 +20,13 @@
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Ogg Vorbis codec support via libvorbisenc.
|
||||
* Vorbis encoding support via libvorbisenc.
|
||||
* @author Mark Hills <mark@pogo.org.uk>
|
||||
*/
|
||||
|
||||
#include <vorbis/vorbisenc.h>
|
||||
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
@ -35,32 +36,41 @@
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
|
||||
/* Number of samples the user should send in each call.
|
||||
* This value is used because it is the LCD of all possible frame sizes, so
|
||||
* an output packet will always start at the same point as one of the input
|
||||
* packets.
|
||||
*/
|
||||
#define OGGVORBIS_FRAME_SIZE 64
|
||||
|
||||
#define BUFFER_SIZE (1024 * 64)
|
||||
|
||||
typedef struct OggVorbisContext {
|
||||
AVClass *av_class;
|
||||
vorbis_info vi;
|
||||
vorbis_dsp_state vd;
|
||||
vorbis_block vb;
|
||||
uint8_t buffer[BUFFER_SIZE];
|
||||
int buffer_index;
|
||||
int eof;
|
||||
|
||||
/* decoder */
|
||||
vorbis_comment vc;
|
||||
ogg_packet op;
|
||||
|
||||
double iblock;
|
||||
AVClass *av_class; /**< class for AVOptions */
|
||||
vorbis_info vi; /**< vorbis_info used during init */
|
||||
vorbis_dsp_state vd; /**< DSP state used for analysis */
|
||||
vorbis_block vb; /**< vorbis_block used for analysis */
|
||||
AVFifoBuffer *pkt_fifo; /**< output packet buffer */
|
||||
int eof; /**< end-of-file flag */
|
||||
int dsp_initialized; /**< vd has been initialized */
|
||||
vorbis_comment vc; /**< VorbisComment info */
|
||||
ogg_packet op; /**< ogg packet */
|
||||
double iblock; /**< impulse block bias option */
|
||||
} OggVorbisContext;
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -15, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVCodecDefault defaults[] = {
|
||||
{ "b", "0" },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
|
||||
|
||||
|
||||
static int vorbis_error_to_averror(int ov_err)
|
||||
{
|
||||
switch (ov_err) {
|
||||
@ -71,27 +81,34 @@ static int vorbis_error_to_averror(int ov_err)
|
||||
}
|
||||
}
|
||||
|
||||
static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext)
|
||||
static av_cold int oggvorbis_init_encoder(vorbis_info *vi,
|
||||
AVCodecContext *avctx)
|
||||
{
|
||||
OggVorbisContext *context = avccontext->priv_data;
|
||||
OggVorbisContext *s = avctx->priv_data;
|
||||
double cfreq;
|
||||
int ret;
|
||||
|
||||
if (avccontext->flags & CODEC_FLAG_QSCALE) {
|
||||
/* variable bitrate */
|
||||
float q = avccontext->global_quality / (float)FF_QP2LAMBDA;
|
||||
if ((ret = vorbis_encode_setup_vbr(vi, avccontext->channels,
|
||||
avccontext->sample_rate,
|
||||
if (avctx->flags & CODEC_FLAG_QSCALE || !avctx->bit_rate) {
|
||||
/* variable bitrate
|
||||
* NOTE: we use the oggenc range of -1 to 10 for global_quality for
|
||||
* user convenience, but libvorbis uses -0.1 to 1.0.
|
||||
*/
|
||||
float q = avctx->global_quality / (float)FF_QP2LAMBDA;
|
||||
/* default to 3 if the user did not set quality or bitrate */
|
||||
if (!(avctx->flags & CODEC_FLAG_QSCALE))
|
||||
q = 3.0;
|
||||
if ((ret = vorbis_encode_setup_vbr(vi, avctx->channels,
|
||||
avctx->sample_rate,
|
||||
q / 10.0)))
|
||||
goto error;
|
||||
} else {
|
||||
int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1;
|
||||
int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1;
|
||||
int minrate = avctx->rc_min_rate > 0 ? avctx->rc_min_rate : -1;
|
||||
int maxrate = avctx->rc_max_rate > 0 ? avctx->rc_max_rate : -1;
|
||||
|
||||
/* constant bitrate */
|
||||
if ((ret = vorbis_encode_setup_managed(vi, avccontext->channels,
|
||||
avccontext->sample_rate, minrate,
|
||||
avccontext->bit_rate, maxrate)))
|
||||
/* average bitrate */
|
||||
if ((ret = vorbis_encode_setup_managed(vi, avctx->channels,
|
||||
avctx->sample_rate, maxrate,
|
||||
avctx->bit_rate, minrate)))
|
||||
goto error;
|
||||
|
||||
/* variable bitrate by estimate, disable slow rate management */
|
||||
@ -101,43 +118,44 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
|
||||
}
|
||||
|
||||
/* cutoff frequency */
|
||||
if (avccontext->cutoff > 0) {
|
||||
cfreq = avccontext->cutoff / 1000.0;
|
||||
if (avctx->cutoff > 0) {
|
||||
cfreq = avctx->cutoff / 1000.0;
|
||||
if ((ret = vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)))
|
||||
goto error; /* should not happen */
|
||||
}
|
||||
|
||||
if (context->iblock) {
|
||||
if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock)))
|
||||
/* impulse block bias */
|
||||
if (s->iblock) {
|
||||
if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &s->iblock)))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (avccontext->channels == 3 &&
|
||||
avccontext->channel_layout != (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) ||
|
||||
avccontext->channels == 4 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_2_2 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_QUAD ||
|
||||
avccontext->channels == 5 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_5POINT0 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_5POINT0_BACK ||
|
||||
avccontext->channels == 6 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_5POINT1 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_5POINT1_BACK ||
|
||||
avccontext->channels == 7 &&
|
||||
avccontext->channel_layout != (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) ||
|
||||
avccontext->channels == 8 &&
|
||||
avccontext->channel_layout != AV_CH_LAYOUT_7POINT1) {
|
||||
if (avccontext->channel_layout) {
|
||||
if (avctx->channels == 3 &&
|
||||
avctx->channel_layout != (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) ||
|
||||
avctx->channels == 4 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_2_2 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_QUAD ||
|
||||
avctx->channels == 5 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_5POINT0 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_5POINT0_BACK ||
|
||||
avctx->channels == 6 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_5POINT1 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_5POINT1_BACK ||
|
||||
avctx->channels == 7 &&
|
||||
avctx->channel_layout != (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) ||
|
||||
avctx->channels == 8 &&
|
||||
avctx->channel_layout != AV_CH_LAYOUT_7POINT1) {
|
||||
if (avctx->channel_layout) {
|
||||
char name[32];
|
||||
av_get_channel_layout_string(name, sizeof(name), avccontext->channels,
|
||||
avccontext->channel_layout);
|
||||
av_log(avccontext, AV_LOG_ERROR, "%s not supported by Vorbis: "
|
||||
av_get_channel_layout_string(name, sizeof(name), avctx->channels,
|
||||
avctx->channel_layout);
|
||||
av_log(avctx, AV_LOG_ERROR, "%s not supported by Vorbis: "
|
||||
"output stream will have incorrect "
|
||||
"channel layout.\n", name);
|
||||
} else {
|
||||
av_log(avccontext, AV_LOG_WARNING, "No channel layout specified. The encoder "
|
||||
av_log(avctx, AV_LOG_WARNING, "No channel layout specified. The encoder "
|
||||
"will use Vorbis channel layout for "
|
||||
"%d channels.\n", avccontext->channels);
|
||||
"%d channels.\n", avctx->channels);
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,59 +173,64 @@ static int xiph_len(int l)
|
||||
return 1 + l / 255 + l;
|
||||
}
|
||||
|
||||
static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext)
|
||||
static av_cold int oggvorbis_encode_close(AVCodecContext *avctx)
|
||||
{
|
||||
OggVorbisContext *context = avccontext->priv_data;
|
||||
/* ogg_packet op ; */
|
||||
OggVorbisContext *s = avctx->priv_data;
|
||||
|
||||
vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */
|
||||
/* notify vorbisenc this is EOF */
|
||||
if (s->dsp_initialized)
|
||||
vorbis_analysis_wrote(&s->vd, 0);
|
||||
|
||||
vorbis_block_clear(&context->vb);
|
||||
vorbis_dsp_clear(&context->vd);
|
||||
vorbis_info_clear(&context->vi);
|
||||
vorbis_block_clear(&s->vb);
|
||||
vorbis_dsp_clear(&s->vd);
|
||||
vorbis_info_clear(&s->vi);
|
||||
|
||||
av_freep(&avccontext->coded_frame);
|
||||
av_freep(&avccontext->extradata);
|
||||
av_fifo_free(s->pkt_fifo);
|
||||
av_freep(&avctx->coded_frame);
|
||||
av_freep(&avctx->extradata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext)
|
||||
static av_cold int oggvorbis_encode_init(AVCodecContext *avctx)
|
||||
{
|
||||
OggVorbisContext *context = avccontext->priv_data;
|
||||
OggVorbisContext *s = avctx->priv_data;
|
||||
ogg_packet header, header_comm, header_code;
|
||||
uint8_t *p;
|
||||
unsigned int offset;
|
||||
int ret;
|
||||
|
||||
vorbis_info_init(&context->vi);
|
||||
if ((ret = oggvorbis_init_encoder(&context->vi, avccontext))) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n");
|
||||
vorbis_info_init(&s->vi);
|
||||
if ((ret = oggvorbis_init_encoder(&s->vi, avctx))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "encoder setup failed\n");
|
||||
goto error;
|
||||
}
|
||||
if ((ret = vorbis_analysis_init(&context->vd, &context->vi))) {
|
||||
if ((ret = vorbis_analysis_init(&s->vd, &s->vi))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "analysis init failed\n");
|
||||
ret = vorbis_error_to_averror(ret);
|
||||
goto error;
|
||||
}
|
||||
if ((ret = vorbis_block_init(&context->vd, &context->vb))) {
|
||||
s->dsp_initialized = 1;
|
||||
if ((ret = vorbis_block_init(&s->vd, &s->vb))) {
|
||||
av_log(avctx, AV_LOG_ERROR, "dsp init failed\n");
|
||||
ret = vorbis_error_to_averror(ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
vorbis_comment_init(&context->vc);
|
||||
vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT);
|
||||
vorbis_comment_init(&s->vc);
|
||||
vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT);
|
||||
|
||||
if ((ret = vorbis_analysis_headerout(&context->vd, &context->vc, &header,
|
||||
&header_comm, &header_code))) {
|
||||
if ((ret = vorbis_analysis_headerout(&s->vd, &s->vc, &header, &header_comm,
|
||||
&header_code))) {
|
||||
ret = vorbis_error_to_averror(ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
avccontext->extradata_size =
|
||||
1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) +
|
||||
header_code.bytes;
|
||||
p = avccontext->extradata =
|
||||
av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
avctx->extradata_size = 1 + xiph_len(header.bytes) +
|
||||
xiph_len(header_comm.bytes) +
|
||||
header_code.bytes;
|
||||
p = avctx->extradata = av_malloc(avctx->extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!p) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
@ -222,100 +245,107 @@ static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext)
|
||||
offset += header_comm.bytes;
|
||||
memcpy(&p[offset], header_code.packet, header_code.bytes);
|
||||
offset += header_code.bytes;
|
||||
assert(offset == avccontext->extradata_size);
|
||||
assert(offset == avctx->extradata_size);
|
||||
|
||||
#if 0
|
||||
vorbis_block_clear(&context->vb);
|
||||
vorbis_dsp_clear(&context->vd);
|
||||
vorbis_info_clear(&context->vi);
|
||||
#endif
|
||||
vorbis_comment_clear(&context->vc);
|
||||
vorbis_comment_clear(&s->vc);
|
||||
|
||||
avccontext->frame_size = OGGVORBIS_FRAME_SIZE;
|
||||
avctx->frame_size = OGGVORBIS_FRAME_SIZE;
|
||||
|
||||
avccontext->coded_frame = avcodec_alloc_frame();
|
||||
if (!avccontext->coded_frame) {
|
||||
s->pkt_fifo = av_fifo_alloc(BUFFER_SIZE);
|
||||
if (!s->pkt_fifo) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
||||
avctx->coded_frame = avcodec_alloc_frame();
|
||||
if (!avctx->coded_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
oggvorbis_encode_close(avccontext);
|
||||
oggvorbis_encode_close(avctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int oggvorbis_encode_frame(AVCodecContext *avccontext,
|
||||
unsigned char *packets,
|
||||
static int oggvorbis_encode_frame(AVCodecContext *avctx, unsigned char *packets,
|
||||
int buf_size, void *data)
|
||||
{
|
||||
OggVorbisContext *context = avccontext->priv_data;
|
||||
OggVorbisContext *s = avctx->priv_data;
|
||||
ogg_packet op;
|
||||
signed short *audio = data;
|
||||
int l;
|
||||
float *audio = data;
|
||||
int pkt_size, ret;
|
||||
|
||||
/* send samples to libvorbis */
|
||||
if (data) {
|
||||
const int samples = avccontext->frame_size;
|
||||
const int samples = avctx->frame_size;
|
||||
float **buffer;
|
||||
int c, channels = context->vi.channels;
|
||||
int c, channels = s->vi.channels;
|
||||
|
||||
buffer = vorbis_analysis_buffer(&context->vd, samples);
|
||||
buffer = vorbis_analysis_buffer(&s->vd, samples);
|
||||
for (c = 0; c < channels; c++) {
|
||||
int i;
|
||||
int co = (channels > 8) ? c :
|
||||
ff_vorbis_encoding_channel_layout_offsets[channels - 1][c];
|
||||
for (l = 0; l < samples; l++)
|
||||
buffer[c][l] = audio[l * channels + co] / 32768.f;
|
||||
for (i = 0; i < samples; i++)
|
||||
buffer[c][i] = audio[i * channels + co];
|
||||
}
|
||||
if ((ret = vorbis_analysis_wrote(&s->vd, samples)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n");
|
||||
return vorbis_error_to_averror(ret);
|
||||
}
|
||||
vorbis_analysis_wrote(&context->vd, samples);
|
||||
} else {
|
||||
if (!context->eof)
|
||||
vorbis_analysis_wrote(&context->vd, 0);
|
||||
context->eof = 1;
|
||||
if (!s->eof)
|
||||
if ((ret = vorbis_analysis_wrote(&s->vd, 0)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error in vorbis_analysis_wrote()\n");
|
||||
return vorbis_error_to_averror(ret);
|
||||
}
|
||||
s->eof = 1;
|
||||
}
|
||||
|
||||
while (vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
|
||||
vorbis_analysis(&context->vb, NULL);
|
||||
vorbis_bitrate_addblock(&context->vb);
|
||||
/* retrieve available packets from libvorbis */
|
||||
while ((ret = vorbis_analysis_blockout(&s->vd, &s->vb)) == 1) {
|
||||
if ((ret = vorbis_analysis(&s->vb, NULL)) < 0)
|
||||
break;
|
||||
if ((ret = vorbis_bitrate_addblock(&s->vb)) < 0)
|
||||
break;
|
||||
|
||||
while (vorbis_bitrate_flushpacket(&context->vd, &op)) {
|
||||
/* i'd love to say the following line is a hack, but sadly it's
|
||||
* not, apparently the end of stream decision is in libogg. */
|
||||
if (op.bytes == 1 && op.e_o_s)
|
||||
continue;
|
||||
if (context->buffer_index + sizeof(ogg_packet) + op.bytes > BUFFER_SIZE) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "libvorbis: buffer overflow.\n");
|
||||
return AVERROR(EINVAL);
|
||||
/* add any available packets to the output packet buffer */
|
||||
while ((ret = vorbis_bitrate_flushpacket(&s->vd, &op)) == 1) {
|
||||
if (av_fifo_space(s->pkt_fifo) < sizeof(ogg_packet) + op.bytes) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet buffer is too small\n");
|
||||
return AVERROR_BUG;
|
||||
}
|
||||
memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet));
|
||||
context->buffer_index += sizeof(ogg_packet);
|
||||
memcpy(context->buffer + context->buffer_index, op.packet, op.bytes);
|
||||
context->buffer_index += op.bytes;
|
||||
// av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes);
|
||||
av_fifo_generic_write(s->pkt_fifo, &op, sizeof(ogg_packet), NULL);
|
||||
av_fifo_generic_write(s->pkt_fifo, op.packet, op.bytes, NULL);
|
||||
}
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error getting available packets\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error getting available packets\n");
|
||||
return vorbis_error_to_averror(ret);
|
||||
}
|
||||
|
||||
l = 0;
|
||||
if (context->buffer_index) {
|
||||
ogg_packet *op2 = (ogg_packet *)context->buffer;
|
||||
op2->packet = context->buffer + sizeof(ogg_packet);
|
||||
|
||||
l = op2->bytes;
|
||||
avccontext->coded_frame->pts = ff_samples_to_time_base(avccontext,
|
||||
op2->granulepos);
|
||||
//FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate
|
||||
|
||||
if (l > buf_size) {
|
||||
av_log(avccontext, AV_LOG_ERROR, "libvorbis: buffer overflow.\n");
|
||||
/* output then next packet from the output buffer, if available */
|
||||
pkt_size = 0;
|
||||
if (av_fifo_size(s->pkt_fifo) >= sizeof(ogg_packet)) {
|
||||
av_fifo_generic_read(s->pkt_fifo, &op, sizeof(ogg_packet), NULL);
|
||||
pkt_size = op.bytes;
|
||||
// FIXME: we should use the user-supplied pts and duration
|
||||
avctx->coded_frame->pts = ff_samples_to_time_base(avctx,
|
||||
op.granulepos);
|
||||
if (pkt_size > buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
memcpy(packets, op2->packet, l);
|
||||
context->buffer_index -= l + sizeof(ogg_packet);
|
||||
memmove(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index);
|
||||
// av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l);
|
||||
av_fifo_generic_read(s->pkt_fifo, packets, pkt_size, NULL);
|
||||
}
|
||||
|
||||
return l;
|
||||
return pkt_size;
|
||||
}
|
||||
|
||||
AVCodec ff_libvorbis_encoder = {
|
||||
@ -327,7 +357,9 @@ AVCodec ff_libvorbis_encoder = {
|
||||
.encode = oggvorbis_encode_frame,
|
||||
.close = oggvorbis_encode_close,
|
||||
.capabilities = CODEC_CAP_DELAY,
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
|
||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
|
||||
AV_SAMPLE_FMT_NONE },
|
||||
.long_name = NULL_IF_CONFIG_SMALL("libvorbis Vorbis"),
|
||||
.priv_class = &class,
|
||||
.defaults = defaults,
|
||||
};
|
||||
|
@ -939,6 +939,7 @@ static void mpeg1_encode_block(MpegEncContext *s,
|
||||
|
||||
static const AVOption mpeg1_options[] = {
|
||||
COMMON_OPTS
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
@ -946,6 +947,7 @@ static const AVOption mpeg2_options[] = {
|
||||
COMMON_OPTS
|
||||
{ "non_linear_quant", "Use nonlinear quantizer.", OFFSET(q_scale_type), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
{ "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@ -430,7 +430,7 @@ static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
|
||||
{
|
||||
int cbp = 0, i;
|
||||
|
||||
if (s->flags & CODEC_FLAG_CBP_RD) {
|
||||
if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
|
||||
int score = 0;
|
||||
const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
|
||||
|
||||
@ -1330,6 +1330,7 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
|
||||
static const AVOption options[] = {
|
||||
{ "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
{ "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "get_bits.h"
|
||||
#include "mathops.h"
|
||||
#include "mpegaudiodsp.h"
|
||||
#include "dsputil.h"
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
@ -82,6 +83,7 @@ typedef struct MPADecodeContext {
|
||||
int err_recognition;
|
||||
AVCodecContext* avctx;
|
||||
MPADSPContext mpadsp;
|
||||
DSPContext dsp;
|
||||
AVFrame frame;
|
||||
} MPADecodeContext;
|
||||
|
||||
@ -434,6 +436,7 @@ static av_cold int decode_init(AVCodecContext * avctx)
|
||||
s->avctx = avctx;
|
||||
|
||||
ff_mpadsp_init(&s->mpadsp);
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
|
||||
avctx->sample_fmt= OUT_FMT;
|
||||
s->err_recognition = avctx->err_recognition;
|
||||
@ -1155,6 +1158,9 @@ found2:
|
||||
/* ms stereo ONLY */
|
||||
/* NOTE: the 1/sqrt(2) normalization factor is included in the
|
||||
global gain */
|
||||
#if CONFIG_FLOAT
|
||||
s-> dsp.butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576);
|
||||
#else
|
||||
tab0 = g0->sb_hybrid;
|
||||
tab1 = g1->sb_hybrid;
|
||||
for (i = 0; i < 576; i++) {
|
||||
@ -1163,6 +1169,7 @@ found2:
|
||||
tab0[i] = tmp0 + tmp1;
|
||||
tab1[i] = tmp0 - tmp1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@
|
||||
#include "rl.h"
|
||||
#include "libavutil/timecode.h"
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded
|
||||
|
||||
enum OutputFormat {
|
||||
@ -695,6 +697,9 @@ typedef struct MpegEncContext {
|
||||
int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
|
||||
int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow);
|
||||
void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block);
|
||||
|
||||
int mpv_flags; ///< flags set by private options
|
||||
int quantizer_noise_shaping;
|
||||
} MpegEncContext;
|
||||
|
||||
#define REBASE_PICTURE(pic, new_ctx, old_ctx) (pic ? \
|
||||
@ -702,6 +707,36 @@ typedef struct MpegEncContext {
|
||||
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
|
||||
: NULL)
|
||||
|
||||
/* mpegvideo_enc common options */
|
||||
#define FF_MPV_FLAG_SKIP_RD 0x0001
|
||||
#define FF_MPV_FLAG_STRICT_GOP 0x0002
|
||||
#define FF_MPV_FLAG_QP_RD 0x0004
|
||||
#define FF_MPV_FLAG_CBP_RD 0x0008
|
||||
|
||||
#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x)
|
||||
#define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
|
||||
#define FF_MPV_COMMON_OPTS \
|
||||
{ "mpv_flags", "Flags common for all mpegvideo-based encoders.", FF_MPV_OFFSET(mpv_flags), AV_OPT_TYPE_FLAGS, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "mpv_flags" },\
|
||||
{ "skip_rd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_SKIP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
|
||||
{ "strict_gop", "Strictly enforce gop size", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_STRICT_GOP }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
|
||||
{ "qp_rd", "Use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_QP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
|
||||
{ "cbp_rd", "use rate distortion optimization for CBP", 0, AV_OPT_TYPE_CONST, { FF_MPV_FLAG_CBP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
|
||||
{ "luma_elim_threshold", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)",\
|
||||
FF_MPV_OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
|
||||
{ "chroma_elim_threshold", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)",\
|
||||
FF_MPV_OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, { 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
|
||||
{ "quantizer_noise_shaping", NULL, FF_MPV_OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, FF_MPV_OPT_FLAGS },
|
||||
|
||||
extern const AVOption ff_mpv_generic_options[];
|
||||
|
||||
#define FF_MPV_GENERIC_CLASS(name) \
|
||||
static const AVClass name ## _class = {\
|
||||
.class_name = #name " encoder",\
|
||||
.item_name = av_default_item_name,\
|
||||
.option = ff_mpv_generic_options,\
|
||||
.version = LIBAVUTIL_VERSION_INT,\
|
||||
};
|
||||
|
||||
void ff_MPV_decode_defaults(MpegEncContext *s);
|
||||
int ff_MPV_common_init(MpegEncContext *s);
|
||||
void ff_MPV_common_end(MpegEncContext *s);
|
||||
|
@ -63,6 +63,11 @@ static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int
|
||||
static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
|
||||
static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
|
||||
|
||||
const AVOption ff_mpv_generic_options[] = {
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
|
||||
uint16_t (*qmat16)[2][64],
|
||||
const uint16_t *quant_matrix,
|
||||
@ -352,8 +357,12 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
s->flags2 = avctx->flags2;
|
||||
s->max_b_frames = avctx->max_b_frames;
|
||||
s->codec_id = avctx->codec->id;
|
||||
s->luma_elim_threshold = avctx->luma_elim_threshold;
|
||||
s->chroma_elim_threshold = avctx->chroma_elim_threshold;
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
if (avctx->luma_elim_threshold)
|
||||
s->luma_elim_threshold = avctx->luma_elim_threshold;
|
||||
if (avctx->chroma_elim_threshold)
|
||||
s->chroma_elim_threshold = avctx->chroma_elim_threshold;
|
||||
#endif
|
||||
s->strict_std_compliance = avctx->strict_std_compliance;
|
||||
s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
|
||||
s->mpeg_quant = avctx->mpeg_quant;
|
||||
@ -373,13 +382,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
/* Fixed QSCALE */
|
||||
s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
|
||||
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
if (s->flags & CODEC_FLAG_QP_RD)
|
||||
s->mpv_flags |= FF_MPV_FLAG_QP_RD;
|
||||
#endif
|
||||
|
||||
s->adaptive_quant = (s->avctx->lumi_masking ||
|
||||
s->avctx->dark_masking ||
|
||||
s->avctx->temporal_cplx_masking ||
|
||||
s->avctx->spatial_cplx_masking ||
|
||||
s->avctx->p_masking ||
|
||||
s->avctx->border_masking ||
|
||||
(s->flags & CODEC_FLAG_QP_RD)) &&
|
||||
(s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
|
||||
!s->fixed_qscale;
|
||||
|
||||
s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
|
||||
@ -488,12 +502,17 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis) {
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
if (s->flags & CODEC_FLAG_CBP_RD)
|
||||
s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
|
||||
#endif
|
||||
|
||||
if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
|
||||
av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((s->flags & CODEC_FLAG_QP_RD) &&
|
||||
if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
|
||||
s->avctx->mb_decision != FF_MB_DECISION_RD) {
|
||||
av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
|
||||
return -1;
|
||||
@ -610,6 +629,15 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
|
||||
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
|
||||
s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
|
||||
if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
|
||||
s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
|
||||
if (avctx->quantizer_noise_shaping)
|
||||
s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
|
||||
#endif
|
||||
|
||||
switch (avctx->codec->id) {
|
||||
case CODEC_ID_MPEG1VIDEO:
|
||||
s->out_format = FMT_MPEG1;
|
||||
@ -1301,7 +1329,7 @@ static int select_input_picture(MpegEncContext *s)
|
||||
}
|
||||
|
||||
if (s->picture_in_gop_number + b_frames >= s->gop_size) {
|
||||
if ((s->flags2 & CODEC_FLAG2_STRICT_GOP) &&
|
||||
if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
|
||||
s->gop_size > s->picture_in_gop_number) {
|
||||
b_frames = s->gop_size - s->picture_in_gop_number - 1;
|
||||
} else {
|
||||
@ -1726,7 +1754,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
s->lambda = s->lambda_table[mb_xy];
|
||||
update_qscale(s);
|
||||
|
||||
if (!(s->flags & CODEC_FLAG_QP_RD)) {
|
||||
if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
|
||||
s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
|
||||
s->dquant = s->qscale - last_qp;
|
||||
|
||||
@ -1746,7 +1774,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
}
|
||||
}
|
||||
ff_set_qscale(s, last_qp + s->dquant);
|
||||
} else if (s->flags & CODEC_FLAG_QP_RD)
|
||||
} else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
|
||||
ff_set_qscale(s, s->qscale + s->dquant);
|
||||
|
||||
wrap_y = s->linesize;
|
||||
@ -1934,7 +1962,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
}
|
||||
}
|
||||
|
||||
if (s->avctx->quantizer_noise_shaping) {
|
||||
if (s->quantizer_noise_shaping) {
|
||||
if (!skip_dct[0])
|
||||
get_visual_weight(weight[0], ptr_y , wrap_y);
|
||||
if (!skip_dct[1])
|
||||
@ -1975,7 +2003,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
} else
|
||||
s->block_last_index[i] = -1;
|
||||
}
|
||||
if (s->avctx->quantizer_noise_shaping) {
|
||||
if (s->quantizer_noise_shaping) {
|
||||
for (i = 0; i < mb_block_count; i++) {
|
||||
if (!skip_dct[i]) {
|
||||
s->block_last_index[i] =
|
||||
@ -1992,7 +2020,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
for (i = 4; i < mb_block_count; i++)
|
||||
dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
|
||||
|
||||
if (s->flags & CODEC_FLAG_CBP_RD) {
|
||||
if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
|
||||
for (i = 0; i < mb_block_count; i++) {
|
||||
if (s->block_last_index[i] == -1)
|
||||
s->coded_score[i] = INT_MAX / 256;
|
||||
@ -2513,7 +2541,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
s->mb_skipped=0;
|
||||
s->dquant=0; //only for QP_RD
|
||||
|
||||
if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD
|
||||
if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
|
||||
int next_block=0;
|
||||
int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
|
||||
|
||||
@ -2650,7 +2678,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
}
|
||||
}
|
||||
|
||||
if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
|
||||
if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
|
||||
if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
|
||||
const int last_qp= backup_s.qscale;
|
||||
int qpi, qp, dc[6];
|
||||
@ -2715,7 +2743,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
|
||||
&dmin, &next_block, 0, 0);
|
||||
}
|
||||
if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
|
||||
if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
|
||||
int coded=0;
|
||||
for(i=0; i<6; i++)
|
||||
coded |= s->block_last_index[i];
|
||||
@ -3755,7 +3783,7 @@ STOP_TIMER("init rem[]")
|
||||
#ifdef REFINE_STATS
|
||||
{START_TIMER
|
||||
#endif
|
||||
analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3;
|
||||
analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
|
||||
|
||||
if(analyze_gradient){
|
||||
#ifdef REFINE_STATS
|
||||
@ -3813,7 +3841,7 @@ STOP_TIMER("dct")}
|
||||
const int level= block[j];
|
||||
int change, old_coeff;
|
||||
|
||||
if(s->avctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
|
||||
if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
|
||||
break;
|
||||
|
||||
if(level){
|
||||
@ -3831,7 +3859,7 @@ STOP_TIMER("dct")}
|
||||
int score, new_coeff, unquant_change;
|
||||
|
||||
score=0;
|
||||
if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
|
||||
if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
|
||||
continue;
|
||||
|
||||
if(new_level){
|
||||
@ -4089,6 +4117,7 @@ int ff_dct_quantize_c(MpegEncContext *s,
|
||||
static const AVOption h263_options[] = {
|
||||
{ "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
{ "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
@ -4117,6 +4146,7 @@ static const AVOption h263p_options[] = {
|
||||
{ "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
{ "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE },
|
||||
{ "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { 0 }, 0, 1, VE},
|
||||
FF_MPV_COMMON_OPTS
|
||||
{ NULL },
|
||||
};
|
||||
static const AVClass h263p_class = {
|
||||
@ -4140,6 +4170,8 @@ AVCodec ff_h263p_encoder = {
|
||||
.priv_class = &h263p_class,
|
||||
};
|
||||
|
||||
FF_MPV_GENERIC_CLASS(msmpeg4v2)
|
||||
|
||||
AVCodec ff_msmpeg4v2_encoder = {
|
||||
.name = "msmpeg4v2",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -4150,8 +4182,11 @@ AVCodec ff_msmpeg4v2_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
|
||||
.priv_class = &msmpeg4v2_class,
|
||||
};
|
||||
|
||||
FF_MPV_GENERIC_CLASS(msmpeg4v3)
|
||||
|
||||
AVCodec ff_msmpeg4v3_encoder = {
|
||||
.name = "msmpeg4",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -4162,8 +4197,11 @@ AVCodec ff_msmpeg4v3_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
|
||||
.priv_class = &msmpeg4v3_class,
|
||||
};
|
||||
|
||||
FF_MPV_GENERIC_CLASS(wmv1)
|
||||
|
||||
AVCodec ff_wmv1_encoder = {
|
||||
.name = "wmv1",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -4174,4 +4212,5 @@ AVCodec ff_wmv1_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
|
||||
.priv_class = &wmv1_class,
|
||||
};
|
||||
|
@ -98,12 +98,16 @@ static const AVOption options[]={
|
||||
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
|
||||
{"bitexact", "use only bitexact stuff (except (i)dct)", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
|
||||
{"aic", "h263 advanced intra coding / mpeg4 ac prediction", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
{"cbp", "use rate distortion optimization for cbp", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
{"qprd", "use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
{"cbp", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CBP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
{"qprd", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_QP_RD }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
#endif
|
||||
{"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
{"cgop", "closed gop", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
|
||||
{"fast", "allow non spec compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
{"sgop", "strictly enforce gop size", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
{"sgop", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_STRICT_GOP }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
#endif
|
||||
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
{"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
|
||||
@ -167,8 +171,10 @@ static const AVOption options[]={
|
||||
{"dc_clip", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_DC_CLIP }, INT_MIN, INT_MAX, V|D, "bug"},
|
||||
{"ms", "workaround various bugs in microsofts broken decoders", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_MS }, INT_MIN, INT_MAX, V|D, "bug"},
|
||||
{"trunc", "trancated frames", 0, AV_OPT_TYPE_CONST, {.dbl = FF_BUG_TRUNCATED}, INT_MIN, INT_MAX, V|D, "bug"},
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
{"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
#endif
|
||||
{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
|
||||
{"very", "strictly conform to a older more strict version of the spec or reference software", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_VERY_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
|
||||
{"strict", "strictly conform to all the things in the spec no matter what consequences", 0, AV_OPT_TYPE_CONST, {.dbl = FF_COMPLIANCE_STRICT }, INT_MIN, INT_MAX, V|D|E, "strict"},
|
||||
@ -194,8 +200,8 @@ static const AVOption options[]={
|
||||
{"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"rc_eq", "set rate control equation", OFFSET(rc_eq), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E},
|
||||
{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"maxrate", "set max bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"minrate", "set min bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
|
||||
{"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E},
|
||||
{"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
|
||||
@ -296,7 +302,9 @@ static const AVOption options[]={
|
||||
{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"ibias", "intra quant bias", OFFSET(intra_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
|
||||
{"pbias", "inter quant bias", OFFSET(inter_quant_bias), AV_OPT_TYPE_INT, {.dbl = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, V|E},
|
||||
#if FF_API_COLOR_TABLE_ID
|
||||
{"color_table_id", NULL, OFFSET(color_table_id), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
|
||||
#endif
|
||||
{"global_quality", NULL, OFFSET(global_quality), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"coder", NULL, OFFSET(coder_type), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E, "coder"},
|
||||
{"vlc", "variable length coder / huffman coder", 0, AV_OPT_TYPE_CONST, {.dbl = FF_CODER_TYPE_VLC }, INT_MIN, INT_MAX, V|E, "coder"},
|
||||
@ -317,10 +325,14 @@ static const AVOption options[]={
|
||||
{"lmax", "max lagrange factor (VBR)", OFFSET(lmax), AV_OPT_TYPE_INT, {.dbl = 31*FF_QP2LAMBDA }, 0, INT_MAX, V|E},
|
||||
{"nr", "noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
#if FF_API_INTER_THRESHOLD
|
||||
{"inter_threshold", NULL, OFFSET(inter_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
#endif
|
||||
{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.dbl = DEFAULT}, 0, UINT_MAX, V|A|E|D, "flags2"},
|
||||
{"error", NULL, OFFSET(error_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
{"qns", "deprecated, use mpegvideo private options instead", OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
#endif
|
||||
{"threads", NULL, OFFSET(thread_count), AV_OPT_TYPE_INT, {.dbl = 1 }, 0, INT_MAX, V|E|D, "threads"},
|
||||
{"auto", "detect a good number of threads", 0, AV_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"},
|
||||
{"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
@ -366,7 +378,9 @@ static const AVOption options[]={
|
||||
{"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.dbl = 1 }, INT_MIN, INT_MAX, V|E},
|
||||
{"chromaoffset", "chroma qp offset from luma", OFFSET(chromaoffset), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
|
||||
{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
|
||||
{"skiprd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
#if FF_API_MPV_GLOBAL_OPTS
|
||||
{"skiprd", "Deprecated, use mpegvideo private options instead", 0, AV_OPT_TYPE_CONST, {.dbl = CODEC_FLAG2_SKIP_RD }, INT_MIN, INT_MAX, V|E, "flags2"},
|
||||
#endif
|
||||
{"sc_factor", "multiplied by qscale for each frame and added to scene_change_score", OFFSET(scenechange_factor), AV_OPT_TYPE_INT, {.dbl = 6 }, 0, INT_MAX, V|E},
|
||||
{"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.dbl = 256 }, 0, INT_MAX, V|E},
|
||||
{"b_sensitivity", "adjusts sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.dbl = 40 }, 1, INT_MAX, V|E},
|
||||
|
@ -165,6 +165,10 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
|
||||
ctx->picture.top_field_first = ctx->frame_type & 1;
|
||||
}
|
||||
|
||||
avctx->color_primaries = buf[14];
|
||||
avctx->color_trc = buf[15];
|
||||
avctx->colorspace = buf[16];
|
||||
|
||||
ctx->alpha_info = buf[17] & 0xf;
|
||||
if (ctx->alpha_info)
|
||||
av_log_missing_feature(avctx, "alpha channel", 0);
|
||||
@ -411,7 +415,7 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
|
||||
int data_size, uint16_t *out_ptr,
|
||||
int linesize, int mbs_per_slice,
|
||||
int blocks_per_mb, int plane_size_factor,
|
||||
const int16_t *qmat)
|
||||
const int16_t *qmat, int is_chroma)
|
||||
{
|
||||
GetBitContext gb;
|
||||
DCTELEM *block_ptr;
|
||||
@ -431,18 +435,33 @@ static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
|
||||
/* inverse quantization, inverse transform and output */
|
||||
block_ptr = td->blocks;
|
||||
|
||||
for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
|
||||
ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
|
||||
if (!is_chroma) {
|
||||
for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
|
||||
ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
}
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
}
|
||||
}
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
|
||||
} else {
|
||||
for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
|
||||
ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
|
||||
block_ptr += 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -523,7 +542,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
|
||||
(uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
|
||||
(mb_x_pos << 5)), y_linesize,
|
||||
mbs_per_slice, 4, slice_width_factor + 2,
|
||||
td->qmat_luma_scaled);
|
||||
td->qmat_luma_scaled, 0);
|
||||
|
||||
/* decode U chroma plane */
|
||||
decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size,
|
||||
@ -531,7 +550,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
|
||||
(mb_x_pos << ctx->mb_chroma_factor)),
|
||||
u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
|
||||
slice_width_factor + ctx->chroma_factor - 1,
|
||||
td->qmat_chroma_scaled);
|
||||
td->qmat_chroma_scaled, 1);
|
||||
|
||||
/* decode V chroma plane */
|
||||
decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size,
|
||||
@ -540,7 +559,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
|
||||
(mb_x_pos << ctx->mb_chroma_factor)),
|
||||
v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
|
||||
slice_width_factor + ctx->chroma_factor - 1,
|
||||
td->qmat_chroma_scaled);
|
||||
td->qmat_chroma_scaled, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ typedef struct ProresContext {
|
||||
static void get_slice_data(ProresContext *ctx, const uint16_t *src,
|
||||
int linesize, int x, int y, int w, int h,
|
||||
DCTELEM *blocks,
|
||||
int mbs_per_slice, int blocks_per_mb)
|
||||
int mbs_per_slice, int blocks_per_mb, int is_chroma)
|
||||
{
|
||||
const uint16_t *esrc;
|
||||
const int mb_width = 4 * blocks_per_mb;
|
||||
@ -189,37 +189,50 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src,
|
||||
elinesize = linesize;
|
||||
} else {
|
||||
int bw, bh, pix;
|
||||
const int estride = 16 / sizeof(*ctx->emu_buf);
|
||||
|
||||
esrc = ctx->emu_buf;
|
||||
elinesize = 16;
|
||||
elinesize = 16 * sizeof(*ctx->emu_buf);
|
||||
|
||||
bw = FFMIN(w - x, mb_width);
|
||||
bh = FFMIN(h - y, 16);
|
||||
|
||||
for (j = 0; j < bh; j++) {
|
||||
memcpy(ctx->emu_buf + j * estride, src + j * linesize,
|
||||
memcpy(ctx->emu_buf + j * 16,
|
||||
(const uint8_t*)src + j * linesize,
|
||||
bw * sizeof(*src));
|
||||
pix = ctx->emu_buf[j * estride + bw - 1];
|
||||
pix = ctx->emu_buf[j * 16 + bw - 1];
|
||||
for (k = bw; k < mb_width; k++)
|
||||
ctx->emu_buf[j * estride + k] = pix;
|
||||
ctx->emu_buf[j * 16 + k] = pix;
|
||||
}
|
||||
for (; j < 16; j++)
|
||||
memcpy(ctx->emu_buf + j * estride,
|
||||
ctx->emu_buf + (bh - 1) * estride,
|
||||
memcpy(ctx->emu_buf + j * 16,
|
||||
ctx->emu_buf + (bh - 1) * 16,
|
||||
mb_width * sizeof(*ctx->emu_buf));
|
||||
}
|
||||
ctx->dsp.fdct(esrc, elinesize, blocks);
|
||||
blocks += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.fdct(src + 8, linesize, blocks);
|
||||
if (!is_chroma) {
|
||||
ctx->dsp.fdct(esrc, elinesize, blocks);
|
||||
blocks += 64;
|
||||
}
|
||||
ctx->dsp.fdct(src + linesize * 4, linesize, blocks);
|
||||
blocks += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks);
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.fdct(src + 8, linesize, blocks);
|
||||
blocks += 64;
|
||||
}
|
||||
ctx->dsp.fdct(src + linesize * 4, linesize, blocks);
|
||||
blocks += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks);
|
||||
blocks += 64;
|
||||
}
|
||||
} else {
|
||||
ctx->dsp.fdct(esrc, elinesize, blocks);
|
||||
blocks += 64;
|
||||
ctx->dsp.fdct(src + linesize * 4, linesize, blocks);
|
||||
blocks += 64;
|
||||
if (blocks_per_mb > 2) {
|
||||
ctx->dsp.fdct(src + 8, linesize, blocks);
|
||||
blocks += 64;
|
||||
ctx->dsp.fdct(src + linesize * 4 + 8, linesize, blocks);
|
||||
blocks += 64;
|
||||
}
|
||||
}
|
||||
|
||||
x += mb_width;
|
||||
@ -383,7 +396,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
|
||||
|
||||
get_slice_data(ctx, src, pic->linesize[i], xp, yp,
|
||||
pwidth, avctx->height, ctx->blocks[0],
|
||||
mbs_per_slice, num_cblocks);
|
||||
mbs_per_slice, num_cblocks, is_chroma);
|
||||
sizes[i] = encode_slice_plane(ctx, pb, src, pic->linesize[i],
|
||||
mbs_per_slice, ctx->blocks[0],
|
||||
num_cblocks, plane_factor,
|
||||
@ -539,7 +552,7 @@ static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
|
||||
|
||||
get_slice_data(ctx, src, pic->linesize[i], xp, yp,
|
||||
pwidth, avctx->height, ctx->blocks[i],
|
||||
mbs_per_slice, num_cblocks[i]);
|
||||
mbs_per_slice, num_cblocks[i], is_chroma[i]);
|
||||
}
|
||||
|
||||
for (q = min_quant; q < max_quant + 2; q++) {
|
||||
@ -676,9 +689,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
bytestream_put_be16 (&buf, avctx->height);
|
||||
bytestream_put_byte (&buf, ctx->chroma_factor << 6); // frame flags
|
||||
bytestream_put_byte (&buf, 0); // reserved
|
||||
bytestream_put_byte (&buf, 0); // primaries
|
||||
bytestream_put_byte (&buf, 0); // transfer function
|
||||
bytestream_put_byte (&buf, 6); // colour matrix - ITU-R BT.601-4
|
||||
bytestream_put_byte (&buf, avctx->color_primaries);
|
||||
bytestream_put_byte (&buf, avctx->color_trc);
|
||||
bytestream_put_byte (&buf, avctx->colorspace);
|
||||
bytestream_put_byte (&buf, 0x40); // source format and alpha information
|
||||
bytestream_put_byte (&buf, 0); // reserved
|
||||
bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present
|
||||
|
@ -424,7 +424,7 @@ static av_cold int qtrle_decode_init(AVCodecContext *avctx)
|
||||
default:
|
||||
av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n",
|
||||
avctx->bits_per_coded_sample);
|
||||
break;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
|
@ -183,6 +183,8 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
color4[1] |= ((11 * ta + 21 * tb) >> 5);
|
||||
color4[2] |= ((21 * ta + 11 * tb) >> 5);
|
||||
|
||||
if (s->size - stream_ptr < n_blocks * 4)
|
||||
return;
|
||||
while (n_blocks--) {
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
@ -200,6 +202,8 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
|
||||
/* Fill block with 16 colors */
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
return;
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
for (pixel_y = 0; pixel_y < 4; pixel_y++) {
|
||||
for (pixel_x = 0; pixel_x < 4; pixel_x++){
|
||||
|
@ -56,6 +56,8 @@ void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
|
||||
put_bits(&s->pb, 3, 0); /* ignored */
|
||||
}
|
||||
|
||||
FF_MPV_GENERIC_CLASS(rv10)
|
||||
|
||||
AVCodec ff_rv10_encoder = {
|
||||
.name = "rv10",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -66,4 +68,5 @@ AVCodec ff_rv10_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
|
||||
.priv_class = &rv10_class,
|
||||
};
|
||||
|
@ -57,6 +57,8 @@ void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number){
|
||||
}
|
||||
}
|
||||
|
||||
FF_MPV_GENERIC_CLASS(rv20)
|
||||
|
||||
AVCodec ff_rv20_encoder = {
|
||||
.name = "rv20",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
@ -67,4 +69,5 @@ AVCodec ff_rv20_encoder = {
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
|
||||
.priv_class = &rv20_class,
|
||||
};
|
||||
|
@ -60,5 +60,14 @@
|
||||
#ifndef FF_API_OLD_ENCODE_VIDEO
|
||||
#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
#ifndef FF_API_MPV_GLOBAL_OPTS
|
||||
#define FF_API_MPV_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
#ifndef FF_API_COLOR_TABLE_ID
|
||||
#define FF_API_COLOR_TABLE_ID (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
#ifndef FF_API_INTER_THRESHOLD
|
||||
#define FF_API_INTER_THRESHOLD (LIBAVCODEC_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_VERSION_H */
|
||||
|
@ -484,7 +484,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
break;
|
||||
default:
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -57,6 +57,11 @@ static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
}
|
||||
rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
|
||||
cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n",
|
||||
cols << 4, rows << 4);
|
||||
return 0;
|
||||
}
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
|
||||
vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
|
||||
vp56_rac_gets(c, 2);
|
||||
|
@ -77,6 +77,10 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
cols = buf[3]; /* number of stored macroblock cols */
|
||||
/* buf[4] is number of displayed macroblock rows */
|
||||
/* buf[5] is number of displayed macroblock cols */
|
||||
if (!rows || !cols) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!s->macroblocks || /* first frame */
|
||||
16*cols != s->avctx->coded_width ||
|
||||
@ -97,7 +101,7 @@ static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size,
|
||||
vrt_shift = 5;
|
||||
s->sub_version = sub_version;
|
||||
} else {
|
||||
if (!s->sub_version)
|
||||
if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
|
||||
return 0;
|
||||
|
||||
if (separated_coeff || !s->filter_header) {
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavcodec/mpegaudio.h"
|
||||
#include "avformat.h"
|
||||
#include "internal.h"
|
||||
#include "avio_internal.h"
|
||||
@ -199,6 +198,8 @@ static int asf_read_file_properties(AVFormatContext *s, int64_t size)
|
||||
asf->hdr.flags = avio_rl32(pb);
|
||||
asf->hdr.min_pktsize = avio_rl32(pb);
|
||||
asf->hdr.max_pktsize = avio_rl32(pb);
|
||||
if (asf->hdr.min_pktsize >= (1U<<29))
|
||||
return AVERROR_INVALIDDATA;
|
||||
asf->hdr.max_bitrate = avio_rl32(pb);
|
||||
s->packet_size = asf->hdr.max_pktsize;
|
||||
|
||||
@ -317,25 +318,6 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
|
||||
|| asf_st->ds_packet_size % asf_st->ds_chunk_size)
|
||||
asf_st->ds_span = 0; // disable descrambling
|
||||
}
|
||||
switch (st->codec->codec_id) {
|
||||
case CODEC_ID_MP3:
|
||||
st->codec->frame_size = MPA_FRAME_SIZE;
|
||||
break;
|
||||
case CODEC_ID_PCM_S16LE:
|
||||
case CODEC_ID_PCM_S16BE:
|
||||
case CODEC_ID_PCM_U16LE:
|
||||
case CODEC_ID_PCM_U16BE:
|
||||
case CODEC_ID_PCM_S8:
|
||||
case CODEC_ID_PCM_U8:
|
||||
case CODEC_ID_PCM_ALAW:
|
||||
case CODEC_ID_PCM_MULAW:
|
||||
st->codec->frame_size = 1;
|
||||
break;
|
||||
default:
|
||||
/* This is probably wrong, but it prevents a crash later */
|
||||
st->codec->frame_size = 1;
|
||||
break;
|
||||
}
|
||||
} else if (type == AVMEDIA_TYPE_VIDEO &&
|
||||
size - (avio_tell(pb) - pos1 + 24) >= 51) {
|
||||
avio_rl32(pb);
|
||||
@ -612,7 +594,9 @@ static int asf_read_header(AVFormatContext *s)
|
||||
if (gsize < 24)
|
||||
return -1;
|
||||
if (!ff_guidcmp(&g, &ff_asf_file_header)) {
|
||||
asf_read_file_properties(s, gsize);
|
||||
int ret = asf_read_file_properties(s, gsize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_stream_header)) {
|
||||
asf_read_stream_properties(s, gsize);
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_comment_header)) {
|
||||
|
@ -541,6 +541,13 @@ typedef struct AVIndexEntry {
|
||||
#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */
|
||||
#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */
|
||||
#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */
|
||||
/**
|
||||
* The stream is stored in the file as an attached picture/"cover art" (e.g.
|
||||
* APIC frame in ID3v2). The single packet associated with it will be returned
|
||||
* among the first few packets read from the file unless seeking takes place.
|
||||
* It can also be accessed at any time in AVStream.attached_pic.
|
||||
*/
|
||||
#define AV_DISPOSITION_ATTACHED_PIC 0x0400
|
||||
|
||||
/**
|
||||
* Stream structure.
|
||||
@ -615,6 +622,15 @@ typedef struct AVStream {
|
||||
*/
|
||||
AVRational avg_frame_rate;
|
||||
|
||||
/**
|
||||
* For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet
|
||||
* will contain the attached picture.
|
||||
*
|
||||
* decoding: set by libavformat, must not be modified by the caller.
|
||||
* encoding: unused
|
||||
*/
|
||||
AVPacket attached_pic;
|
||||
|
||||
/*****************************************************************
|
||||
* All fields below this line are not part of the public API. They
|
||||
* may not be used outside of libavformat and can be changed and
|
||||
|
@ -107,6 +107,8 @@ static int dxa_read_header(AVFormatContext *s)
|
||||
ret = ff_get_wav_header(pb, ast->codec, fsize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ast->codec->sample_rate > 0)
|
||||
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
|
||||
// find 'data' chunk
|
||||
while(avio_tell(pb) < c->vidpos && !url_feof(pb)){
|
||||
tag = avio_rl32(pb);
|
||||
|
@ -338,8 +338,6 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
codec->dct_algo = avio_rb32(pb);
|
||||
codec->strict_std_compliance = avio_rb32(pb);
|
||||
codec->max_b_frames = avio_rb32(pb);
|
||||
codec->luma_elim_threshold = avio_rb32(pb);
|
||||
codec->chroma_elim_threshold = avio_rb32(pb);
|
||||
codec->mpeg_quant = avio_rb32(pb);
|
||||
codec->intra_dc_precision = avio_rb32(pb);
|
||||
codec->me_method = avio_rb32(pb);
|
||||
|
@ -144,8 +144,6 @@ static int ffm_write_header(AVFormatContext *s)
|
||||
avio_wb32(pb, codec->dct_algo);
|
||||
avio_wb32(pb, codec->strict_std_compliance);
|
||||
avio_wb32(pb, codec->max_b_frames);
|
||||
avio_wb32(pb, codec->luma_elim_threshold);
|
||||
avio_wb32(pb, codec->chroma_elim_threshold);
|
||||
avio_wb32(pb, codec->mpeg_quant);
|
||||
avio_wb32(pb, codec->intra_dc_precision);
|
||||
avio_wb32(pb, codec->me_method);
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "avio_internal.h"
|
||||
#include "internal.h"
|
||||
|
||||
const AVMetadataConv ff_id3v2_34_metadata_conv[] = {
|
||||
{ "TALB", "album"},
|
||||
@ -99,6 +100,38 @@ const char ff_id3v2_3_tags[][4] = {
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
const char *ff_id3v2_picture_types[21] = {
|
||||
"Other",
|
||||
"32x32 pixels 'file icon'",
|
||||
"Other file icon",
|
||||
"Cover (front)",
|
||||
"Cover (back)",
|
||||
"Leaflet page",
|
||||
"Media (e.g. label side of CD)",
|
||||
"Lead artist/lead performer/soloist",
|
||||
"Artist/performer",
|
||||
"Conductor",
|
||||
"Band/Orchestra",
|
||||
"Composer",
|
||||
"Lyricist/text writer",
|
||||
"Recording Location",
|
||||
"During recording",
|
||||
"During performance",
|
||||
"Movie/video screen capture",
|
||||
"A bright coloured fish",
|
||||
"Illustration",
|
||||
"Band/artist logotype",
|
||||
"Publisher/Studio logotype",
|
||||
};
|
||||
|
||||
const CodecMime ff_id3v2_mime_tags[] = {
|
||||
{"image/gif" , CODEC_ID_GIF},
|
||||
{"image/jpeg", CODEC_ID_MJPEG},
|
||||
{"image/png" , CODEC_ID_PNG},
|
||||
{"image/tiff", CODEC_ID_TIFF},
|
||||
{"", CODEC_ID_NONE},
|
||||
};
|
||||
|
||||
int ff_id3v2_match(const uint8_t *buf, const char * magic)
|
||||
{
|
||||
return buf[0] == magic[0] &&
|
||||
@ -394,6 +427,84 @@ finish:
|
||||
av_dict_set(m, "date", date, 0);
|
||||
}
|
||||
|
||||
static void free_apic(void *obj)
|
||||
{
|
||||
ID3v2ExtraMetaAPIC *apic = obj;
|
||||
av_freep(&apic->data);
|
||||
av_freep(&apic->description);
|
||||
av_freep(&apic);
|
||||
}
|
||||
|
||||
static void read_apic(AVFormatContext *s, AVIOContext *pb, int taglen, char *tag, ID3v2ExtraMeta **extra_meta)
|
||||
{
|
||||
int enc, pic_type;
|
||||
char mimetype[64];
|
||||
const CodecMime *mime = ff_id3v2_mime_tags;
|
||||
enum CodecID id = CODEC_ID_NONE;
|
||||
ID3v2ExtraMetaAPIC *apic = NULL;
|
||||
ID3v2ExtraMeta *new_extra = NULL;
|
||||
int64_t end = avio_tell(pb) + taglen;
|
||||
|
||||
if (taglen <= 4)
|
||||
goto fail;
|
||||
|
||||
new_extra = av_mallocz(sizeof(*new_extra));
|
||||
apic = av_mallocz(sizeof(*apic));
|
||||
if (!new_extra || !apic)
|
||||
goto fail;
|
||||
|
||||
enc = avio_r8(pb);
|
||||
taglen--;
|
||||
|
||||
/* mimetype */
|
||||
taglen -= avio_get_str(pb, taglen, mimetype, sizeof(mimetype));
|
||||
while (mime->id != CODEC_ID_NONE) {
|
||||
if (!strncmp(mime->str, mimetype, sizeof(mimetype))) {
|
||||
id = mime->id;
|
||||
break;
|
||||
}
|
||||
mime++;
|
||||
}
|
||||
if (id == CODEC_ID_NONE) {
|
||||
av_log(s, AV_LOG_WARNING, "Unknown attached picture mimetype: %s, skipping.\n", mimetype);
|
||||
goto fail;
|
||||
}
|
||||
apic->id = id;
|
||||
|
||||
/* picture type */
|
||||
pic_type = avio_r8(pb);
|
||||
taglen--;
|
||||
if (pic_type < 0 || pic_type >= FF_ARRAY_ELEMS(ff_id3v2_picture_types)) {
|
||||
av_log(s, AV_LOG_WARNING, "Unknown attached picture type %d.\n", pic_type);
|
||||
pic_type = 0;
|
||||
}
|
||||
apic->type = ff_id3v2_picture_types[pic_type];
|
||||
|
||||
/* description and picture data */
|
||||
if (decode_str(s, pb, enc, &apic->description, &taglen) < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Error decoding attached picture description.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
apic->len = taglen;
|
||||
apic->data = av_malloc(taglen);
|
||||
if (!apic->data || avio_read(pb, apic->data, taglen) != taglen)
|
||||
goto fail;
|
||||
|
||||
new_extra->tag = "APIC";
|
||||
new_extra->data = apic;
|
||||
new_extra->next = *extra_meta;
|
||||
*extra_meta = new_extra;
|
||||
|
||||
return;
|
||||
|
||||
fail:
|
||||
if (apic)
|
||||
free_apic(apic);
|
||||
av_freep(&new_extra);
|
||||
avio_seek(pb, end, SEEK_SET);
|
||||
}
|
||||
|
||||
typedef struct ID3v2EMFunc {
|
||||
const char *tag3;
|
||||
const char *tag4;
|
||||
@ -403,6 +514,7 @@ typedef struct ID3v2EMFunc {
|
||||
|
||||
static const ID3v2EMFunc id3v2_extra_meta_funcs[] = {
|
||||
{ "GEO", "GEOB", read_geobtag, free_geobtag },
|
||||
{ "PIC", "APIC", read_apic, free_apic },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
@ -620,7 +732,7 @@ seek:
|
||||
return;
|
||||
}
|
||||
|
||||
void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta)
|
||||
void ff_id3v2_read(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta)
|
||||
{
|
||||
int len, ret;
|
||||
uint8_t buf[ID3v2_HEADER_SIZE];
|
||||
@ -651,11 +763,6 @@ void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **e
|
||||
merge_date(&s->metadata);
|
||||
}
|
||||
|
||||
void ff_id3v2_read(AVFormatContext *s, const char *magic)
|
||||
{
|
||||
ff_id3v2_read_all(s, magic, NULL);
|
||||
}
|
||||
|
||||
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
|
||||
{
|
||||
ID3v2ExtraMeta *current = *extra_meta, *next;
|
||||
@ -669,3 +776,37 @@ void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta)
|
||||
current = next;
|
||||
}
|
||||
}
|
||||
|
||||
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta)
|
||||
{
|
||||
ID3v2ExtraMeta *cur;
|
||||
|
||||
for (cur = *extra_meta; cur; cur = cur->next) {
|
||||
ID3v2ExtraMetaAPIC *apic;
|
||||
AVStream *st;
|
||||
|
||||
if (strcmp(cur->tag, "APIC"))
|
||||
continue;
|
||||
apic = cur->data;
|
||||
|
||||
if (!(st = avformat_new_stream(s, NULL)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
|
||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
st->codec->codec_id = apic->id;
|
||||
av_dict_set(&st->metadata, "title", apic->description, 0);
|
||||
av_dict_set(&st->metadata, "comment", apic->type, 0);
|
||||
|
||||
av_init_packet(&st->attached_pic);
|
||||
st->attached_pic.data = apic->data;
|
||||
st->attached_pic.size = apic->len;
|
||||
st->attached_pic.destruct = av_destruct_packet;
|
||||
st->attached_pic.stream_index = st->index;
|
||||
|
||||
apic->data = NULL;
|
||||
apic->len = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include <stdint.h>
|
||||
#include "avformat.h"
|
||||
#include "internal.h"
|
||||
#include "metadata.h"
|
||||
|
||||
#define ID3v2_HEADER_SIZE 10
|
||||
@ -45,6 +46,12 @@ enum ID3v2Encoding {
|
||||
ID3v2_ENCODING_UTF8 = 3,
|
||||
};
|
||||
|
||||
typedef struct ID3v2EncContext {
|
||||
int version; ///< ID3v2 minor version, either 3 or 4
|
||||
int64_t size_pos; ///< offset of the tag total size
|
||||
int len; ///< size of the tag written so far
|
||||
} ID3v2EncContext;
|
||||
|
||||
typedef struct ID3v2ExtraMeta {
|
||||
const char *tag;
|
||||
void *data;
|
||||
@ -59,6 +66,14 @@ typedef struct ID3v2ExtraMetaGEOB {
|
||||
uint8_t *data;
|
||||
} ID3v2ExtraMetaGEOB;
|
||||
|
||||
typedef struct ID3v2ExtraMetaAPIC {
|
||||
uint8_t *data;
|
||||
int len;
|
||||
const char *type;
|
||||
uint8_t *description;
|
||||
enum CodecID id;
|
||||
} ID3v2ExtraMetaAPIC;
|
||||
|
||||
/**
|
||||
* Detect ID3v2 Header.
|
||||
* @param buf must be ID3v2_HEADER_SIZE byte long
|
||||
@ -75,24 +90,40 @@ int ff_id3v2_match(const uint8_t *buf, const char *magic);
|
||||
int ff_id3v2_tag_len(const uint8_t *buf);
|
||||
|
||||
/**
|
||||
* Read an ID3v2 tag (text tags only)
|
||||
*/
|
||||
void ff_id3v2_read(AVFormatContext *s, const char *magic);
|
||||
|
||||
/**
|
||||
* Read an ID3v2 tag, including supported extra metadata (currently only GEOB)
|
||||
* Read an ID3v2 tag, including supported extra metadata
|
||||
* @param extra_meta If not NULL, extra metadata is parsed into a list of
|
||||
* ID3v2ExtraMeta structs and *extra_meta points to the head of the list
|
||||
*/
|
||||
void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta);
|
||||
void ff_id3v2_read(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta);
|
||||
|
||||
/**
|
||||
* Write an ID3v2 tag.
|
||||
* Initialize an ID3v2 tag.
|
||||
*/
|
||||
void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
|
||||
const char *magic);
|
||||
|
||||
/**
|
||||
* Convert and write all global metadata from s into an ID3v2 tag.
|
||||
*/
|
||||
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3);
|
||||
|
||||
/**
|
||||
* Write an attached picture from pkt into an ID3v2 tag.
|
||||
*/
|
||||
int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt);
|
||||
|
||||
/**
|
||||
* Finalize an opened ID3v2 tag.
|
||||
*/
|
||||
void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb);
|
||||
|
||||
/**
|
||||
* Write an ID3v2 tag containing all global metadata from s.
|
||||
* @param id3v2_version Subversion of ID3v2; supported values are 3 and 4
|
||||
* @param magic magic bytes to identify the header
|
||||
* If in doubt, use ID3v2_DEFAULT_MAGIC.
|
||||
*/
|
||||
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *magic);
|
||||
int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version, const char *magic);
|
||||
|
||||
/**
|
||||
* Free memory allocated parsing special (non-text) metadata.
|
||||
@ -100,6 +131,12 @@ int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *mag
|
||||
*/
|
||||
void ff_id3v2_free_extra_meta(ID3v2ExtraMeta **extra_meta);
|
||||
|
||||
/**
|
||||
* Create a stream for each APIC (attached picture) extracted from the
|
||||
* ID3v2 header.
|
||||
*/
|
||||
int ff_id3v2_parse_apic(AVFormatContext *s, ID3v2ExtraMeta **extra_meta);
|
||||
|
||||
extern const AVMetadataConv ff_id3v2_34_metadata_conv[];
|
||||
extern const AVMetadataConv ff_id3v2_4_metadata_conv[];
|
||||
|
||||
@ -120,4 +157,8 @@ extern const char ff_id3v2_4_tags[][4];
|
||||
*/
|
||||
extern const char ff_id3v2_3_tags[][4];
|
||||
|
||||
extern const CodecMime ff_id3v2_mime_tags[];
|
||||
|
||||
extern const char *ff_id3v2_picture_types[21];
|
||||
|
||||
#endif /* AVFORMAT_ID3V2_H */
|
||||
|
@ -19,6 +19,8 @@
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
@ -26,12 +28,12 @@
|
||||
#include "avio.h"
|
||||
#include "id3v2.h"
|
||||
|
||||
static void id3v2_put_size(AVFormatContext *s, int size)
|
||||
static void id3v2_put_size(AVIOContext *pb, int size)
|
||||
{
|
||||
avio_w8(s->pb, size >> 21 & 0x7f);
|
||||
avio_w8(s->pb, size >> 14 & 0x7f);
|
||||
avio_w8(s->pb, size >> 7 & 0x7f);
|
||||
avio_w8(s->pb, size & 0x7f);
|
||||
avio_w8(pb, size >> 21 & 0x7f);
|
||||
avio_w8(pb, size >> 14 & 0x7f);
|
||||
avio_w8(pb, size >> 7 & 0x7f);
|
||||
avio_w8(pb, size & 0x7f);
|
||||
}
|
||||
|
||||
static int string_is_ascii(const uint8_t *str)
|
||||
@ -40,17 +42,30 @@ static int string_is_ascii(const uint8_t *str)
|
||||
return !*str;
|
||||
}
|
||||
|
||||
static void id3v2_encode_string(AVIOContext *pb, const uint8_t *str,
|
||||
enum ID3v2Encoding enc)
|
||||
{
|
||||
int (*put)(AVIOContext*, const char*);
|
||||
|
||||
if (enc == ID3v2_ENCODING_UTF16BOM) {
|
||||
avio_wl16(pb, 0xFEFF); /* BOM */
|
||||
put = avio_put_str16le;
|
||||
} else
|
||||
put = avio_put_str;
|
||||
|
||||
put(pb, str);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a text frame with one (normal frames) or two (TXXX frames) strings
|
||||
* according to encoding (only UTF-8 or UTF-16+BOM supported).
|
||||
* @return number of bytes written or a negative error code.
|
||||
*/
|
||||
static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2,
|
||||
static int id3v2_put_ttag(ID3v2EncContext *id3, AVIOContext *avioc, const char *str1, const char *str2,
|
||||
uint32_t tag, enum ID3v2Encoding enc)
|
||||
{
|
||||
int len;
|
||||
uint8_t *pb;
|
||||
int (*put)(AVIOContext*, const char*);
|
||||
AVIOContext *dyn_buf;
|
||||
if (avio_open_dyn_buf(&dyn_buf) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
@ -62,28 +77,26 @@ static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2
|
||||
enc = ID3v2_ENCODING_ISO8859;
|
||||
|
||||
avio_w8(dyn_buf, enc);
|
||||
if (enc == ID3v2_ENCODING_UTF16BOM) {
|
||||
avio_wl16(dyn_buf, 0xFEFF); /* BOM */
|
||||
put = avio_put_str16le;
|
||||
} else
|
||||
put = avio_put_str;
|
||||
|
||||
put(dyn_buf, str1);
|
||||
id3v2_encode_string(dyn_buf, str1, enc);
|
||||
if (str2)
|
||||
put(dyn_buf, str2);
|
||||
id3v2_encode_string(dyn_buf, str2, enc);
|
||||
len = avio_close_dyn_buf(dyn_buf, &pb);
|
||||
|
||||
avio_wb32(s->pb, tag);
|
||||
id3v2_put_size(s, len);
|
||||
avio_wb16(s->pb, 0);
|
||||
avio_write(s->pb, pb, len);
|
||||
avio_wb32(avioc, tag);
|
||||
/* ID3v2.3 frame size is not synchsafe */
|
||||
if (id3->version == 3)
|
||||
avio_wb32(avioc, len);
|
||||
else
|
||||
id3v2_put_size(avioc, len);
|
||||
avio_wb16(avioc, 0);
|
||||
avio_write(avioc, pb, len);
|
||||
|
||||
av_freep(&pb);
|
||||
return len + ID3v2_HEADER_SIZE;
|
||||
}
|
||||
|
||||
static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const char table[][4],
|
||||
enum ID3v2Encoding enc)
|
||||
static int id3v2_check_write_tag(ID3v2EncContext *id3, AVIOContext *pb, AVDictionaryEntry *t,
|
||||
const char table[][4], enum ID3v2Encoding enc)
|
||||
{
|
||||
uint32_t tag;
|
||||
int i;
|
||||
@ -93,7 +106,7 @@ static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const
|
||||
tag = AV_RB32(t->key);
|
||||
for (i = 0; *table[i]; i++)
|
||||
if (tag == AV_RB32(table[i]))
|
||||
return id3v2_put_ttag(s, t->value, NULL, tag, enc);
|
||||
return id3v2_put_ttag(id3, pb, t->value, NULL, tag, enc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -135,52 +148,137 @@ static void id3v2_3_metadata_split_date(AVDictionary **pm)
|
||||
*pm = dst;
|
||||
}
|
||||
|
||||
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version,
|
||||
const char *magic)
|
||||
void ff_id3v2_start(ID3v2EncContext *id3, AVIOContext *pb, int id3v2_version,
|
||||
const char *magic)
|
||||
{
|
||||
int64_t size_pos, cur_pos;
|
||||
AVDictionaryEntry *t = NULL;
|
||||
id3->version = id3v2_version;
|
||||
|
||||
int totlen = 0, enc = id3v2_version == 3 ? ID3v2_ENCODING_UTF16BOM :
|
||||
ID3v2_ENCODING_UTF8;
|
||||
|
||||
|
||||
avio_wb32(s->pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
|
||||
avio_w8(s->pb, 0);
|
||||
avio_w8(s->pb, 0); /* flags */
|
||||
avio_wb32(pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
|
||||
avio_w8(pb, 0);
|
||||
avio_w8(pb, 0); /* flags */
|
||||
|
||||
/* reserve space for size */
|
||||
size_pos = avio_tell(s->pb);
|
||||
avio_wb32(s->pb, 0);
|
||||
id3->size_pos = avio_tell(pb);
|
||||
avio_wb32(pb, 0);
|
||||
}
|
||||
|
||||
int ff_id3v2_write_metadata(AVFormatContext *s, ID3v2EncContext *id3)
|
||||
{
|
||||
AVDictionaryEntry *t = NULL;
|
||||
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
|
||||
ID3v2_ENCODING_UTF8;
|
||||
|
||||
ff_metadata_conv(&s->metadata, ff_id3v2_34_metadata_conv, NULL);
|
||||
if (id3v2_version == 3)
|
||||
if (id3->version == 3)
|
||||
id3v2_3_metadata_split_date(&s->metadata);
|
||||
else if (id3v2_version == 4)
|
||||
else if (id3->version == 4)
|
||||
ff_metadata_conv(&s->metadata, ff_id3v2_4_metadata_conv, NULL);
|
||||
|
||||
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
||||
int ret;
|
||||
|
||||
if ((ret = id3v2_check_write_tag(s, t, ff_id3v2_tags, enc)) > 0) {
|
||||
totlen += ret;
|
||||
if ((ret = id3v2_check_write_tag(id3, s->pb, t, ff_id3v2_tags, enc)) > 0) {
|
||||
id3->len += ret;
|
||||
continue;
|
||||
}
|
||||
if ((ret = id3v2_check_write_tag(s, t, id3v2_version == 3 ?
|
||||
if ((ret = id3v2_check_write_tag(id3, s->pb, t, id3->version == 3 ?
|
||||
ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) {
|
||||
totlen += ret;
|
||||
id3->len += ret;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* unknown tag, write as TXXX frame */
|
||||
if ((ret = id3v2_put_ttag(s, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
|
||||
if ((ret = id3v2_put_ttag(id3, s->pb, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
|
||||
return ret;
|
||||
totlen += ret;
|
||||
id3->len += ret;
|
||||
}
|
||||
|
||||
cur_pos = avio_tell(s->pb);
|
||||
avio_seek(s->pb, size_pos, SEEK_SET);
|
||||
id3v2_put_size(s, totlen);
|
||||
avio_seek(s->pb, cur_pos, SEEK_SET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt)
|
||||
{
|
||||
AVStream *st = s->streams[pkt->stream_index];
|
||||
AVDictionaryEntry *e;
|
||||
|
||||
AVIOContext *dyn_buf;
|
||||
uint8_t *buf;
|
||||
const CodecMime *mime = ff_id3v2_mime_tags;
|
||||
const char *mimetype = NULL, *desc = "";
|
||||
int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM :
|
||||
ID3v2_ENCODING_UTF8;
|
||||
int i, len, type = 0;
|
||||
|
||||
/* get the mimetype*/
|
||||
while (mime->id != CODEC_ID_NONE) {
|
||||
if (mime->id == st->codec->codec_id) {
|
||||
mimetype = mime->str;
|
||||
break;
|
||||
}
|
||||
mime++;
|
||||
}
|
||||
if (!mimetype) {
|
||||
av_log(s, AV_LOG_ERROR, "No mimetype is known for stream %d, cannot "
|
||||
"write an attached picture.\n", st->index);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* get the picture type */
|
||||
e = av_dict_get(st->metadata, "comment", NULL, 0);
|
||||
for (i = 0; e && i < FF_ARRAY_ELEMS(ff_id3v2_picture_types); i++) {
|
||||
if (strstr(ff_id3v2_picture_types[i], e->value) == ff_id3v2_picture_types[i]) {
|
||||
type = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* get the description */
|
||||
if ((e = av_dict_get(st->metadata, "title", NULL, 0)))
|
||||
desc = e->value;
|
||||
|
||||
/* start writing */
|
||||
if (avio_open_dyn_buf(&dyn_buf) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avio_w8(dyn_buf, enc);
|
||||
avio_put_str(dyn_buf, mimetype);
|
||||
avio_w8(dyn_buf, type);
|
||||
id3v2_encode_string(dyn_buf, desc, enc);
|
||||
avio_write(dyn_buf, pkt->data, pkt->size);
|
||||
len = avio_close_dyn_buf(dyn_buf, &buf);
|
||||
|
||||
avio_wb32(s->pb, MKBETAG('A', 'P', 'I', 'C'));
|
||||
if (id3->version == 3)
|
||||
avio_wb32(s->pb, len);
|
||||
else
|
||||
id3v2_put_size(s->pb, len);
|
||||
avio_wb16(s->pb, 0);
|
||||
avio_write(s->pb, buf, len);
|
||||
av_freep(&buf);
|
||||
|
||||
id3->len += len + ID3v2_HEADER_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_id3v2_finish(ID3v2EncContext *id3, AVIOContext *pb)
|
||||
{
|
||||
int64_t cur_pos = avio_tell(pb);
|
||||
avio_seek(pb, id3->size_pos, SEEK_SET);
|
||||
id3v2_put_size(pb, id3->len);
|
||||
avio_seek(pb, cur_pos, SEEK_SET);
|
||||
}
|
||||
|
||||
int ff_id3v2_write_simple(struct AVFormatContext *s, int id3v2_version,
|
||||
const char *magic)
|
||||
{
|
||||
ID3v2EncContext id3 = { 0 };
|
||||
int ret;
|
||||
|
||||
ff_id3v2_start(&id3, s->pb, id3v2_version, magic);
|
||||
if ((ret = ff_id3v2_write_metadata(s, &id3)) < 0)
|
||||
return ret;
|
||||
ff_id3v2_finish(&id3, s->pb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -37,6 +37,11 @@ typedef struct AVCodecTag {
|
||||
unsigned int tag;
|
||||
} AVCodecTag;
|
||||
|
||||
typedef struct CodecMime{
|
||||
char str[32];
|
||||
enum CodecID id;
|
||||
} CodecMime;
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define dynarray_add(tab, nb_ptr, elem)\
|
||||
do {\
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "metadata.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* EBML version supported */
|
||||
#define EBML_VERSION 1
|
||||
@ -250,11 +251,6 @@ typedef struct CodecTags{
|
||||
enum CodecID id;
|
||||
}CodecTags;
|
||||
|
||||
typedef struct CodecMime{
|
||||
char str[32];
|
||||
enum CodecID id;
|
||||
}CodecMime;
|
||||
|
||||
/* max. depth in the EBML tree structure */
|
||||
#define EBML_MAX_DEPTH 16
|
||||
|
||||
|
@ -1223,6 +1223,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
|
||||
if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
|
||||
unsigned int color_depth, len;
|
||||
int color_greyscale;
|
||||
int color_table_id;
|
||||
|
||||
st->codec->codec_id = id;
|
||||
avio_rb16(pb); /* version */
|
||||
@ -1250,9 +1251,9 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
|
||||
st->codec->codec_tag=MKTAG('I', '4', '2', '0');
|
||||
|
||||
st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */
|
||||
st->codec->color_table_id = avio_rb16(pb); /* colortable id */
|
||||
color_table_id = avio_rb16(pb); /* colortable id */
|
||||
av_dlog(c->fc, "depth %d, ctab id %d\n",
|
||||
st->codec->bits_per_coded_sample, st->codec->color_table_id);
|
||||
st->codec->bits_per_coded_sample, color_table_id);
|
||||
/* figure out the palette situation */
|
||||
color_depth = st->codec->bits_per_coded_sample & 0x1F;
|
||||
color_greyscale = st->codec->bits_per_coded_sample & 0x20;
|
||||
@ -1282,7 +1283,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
|
||||
if (color_index < 0)
|
||||
color_index = 0;
|
||||
}
|
||||
} else if (st->codec->color_table_id) {
|
||||
} else if (color_table_id) {
|
||||
const uint8_t *color_table;
|
||||
/* if flag bit 3 is set, use the default palette */
|
||||
color_count = 1 << color_depth;
|
||||
|
@ -84,6 +84,7 @@ static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf)
|
||||
|
||||
typedef struct MP3Context {
|
||||
const AVClass *class;
|
||||
ID3v2EncContext id3;
|
||||
int id3v2_version;
|
||||
int write_id3v1;
|
||||
int64_t frames_offset;
|
||||
@ -93,61 +94,16 @@ typedef struct MP3Context {
|
||||
uint32_t seen;
|
||||
uint32_t pos;
|
||||
uint64_t bag[VBR_NUM_BAGS];
|
||||
|
||||
/* index of the audio stream */
|
||||
int audio_stream_idx;
|
||||
/* number of attached pictures we still need to write */
|
||||
int pics_to_write;
|
||||
|
||||
/* audio packets are queued here until we get all the attached pictures */
|
||||
AVPacketList *queue, *queue_end;
|
||||
} MP3Context;
|
||||
|
||||
static int mp2_write_trailer(struct AVFormatContext *s)
|
||||
{
|
||||
uint8_t buf[ID3v1_TAG_SIZE];
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
|
||||
/* write the id3v1 tag */
|
||||
if (mp3 && mp3->write_id3v1 && id3v1_create_tag(s, buf) > 0) {
|
||||
avio_write(s->pb, buf, ID3v1_TAG_SIZE);
|
||||
}
|
||||
|
||||
/* write number of frames */
|
||||
if (mp3 && mp3->frames_offset) {
|
||||
avio_seek(s->pb, mp3->frames_offset, SEEK_SET);
|
||||
avio_wb32(s->pb, s->streams[0]->nb_frames);
|
||||
avio_seek(s->pb, 0, SEEK_END);
|
||||
}
|
||||
|
||||
avio_flush(s->pb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if CONFIG_MP2_MUXER
|
||||
AVOutputFormat ff_mp2_muxer = {
|
||||
.name = "mp2",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2"),
|
||||
.mime_type = "audio/x-mpeg",
|
||||
.extensions = "mp2,m2a",
|
||||
.audio_codec = CODEC_ID_MP2,
|
||||
.video_codec = CODEC_ID_NONE,
|
||||
.write_packet = ff_raw_write_packet,
|
||||
.write_trailer = mp2_write_trailer,
|
||||
.flags = AVFMT_NOTIMESTAMPS,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if CONFIG_MP3_MUXER
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
|
||||
offsetof(MP3Context, id3v2_version), AV_OPT_TYPE_INT, {.dbl = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
{ "write_id3v1", "Enable ID3v1 writing. ID3v1 tags are written in UTF-8 which may not be supported by most software.",
|
||||
offsetof(MP3Context, write_id3v1), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass mp3_muxer_class = {
|
||||
.class_name = "MP3 muxer",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
|
||||
|
||||
/*
|
||||
@ -155,8 +111,8 @@ static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
|
||||
*/
|
||||
static int mp3_write_xing(AVFormatContext *s)
|
||||
{
|
||||
AVCodecContext *codec = s->streams[0]->codec;
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec;
|
||||
int bitrate_idx;
|
||||
int best_bitrate_idx = -1;
|
||||
int best_bitrate_error= INT_MAX;
|
||||
@ -166,6 +122,9 @@ static int mp3_write_xing(AVFormatContext *s)
|
||||
int srate_idx, i, channels;
|
||||
int needed;
|
||||
|
||||
if (!s->pb->seekable)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++)
|
||||
if (avpriv_mpa_freq_tab[i] == codec->sample_rate) {
|
||||
srate_idx = i;
|
||||
@ -295,26 +254,7 @@ static void mp3_fix_xing(AVFormatContext *s)
|
||||
avio_seek(s->pb, 0, SEEK_END);
|
||||
}
|
||||
|
||||
/**
|
||||
* Write an ID3v2 header at beginning of stream
|
||||
*/
|
||||
|
||||
static int mp3_write_header(struct AVFormatContext *s)
|
||||
{
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
int ret;
|
||||
|
||||
ret = ff_id3v2_write(s, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (s->pb->seekable)
|
||||
mp3_write_xing(s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
static int mp3_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
if (! pkt || ! pkt->data || pkt->size < 4)
|
||||
return ff_raw_write_packet(s, pkt);
|
||||
@ -350,6 +290,175 @@ static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
}
|
||||
}
|
||||
|
||||
static int mp3_queue_flush(AVFormatContext *s)
|
||||
{
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
AVPacketList *pktl;
|
||||
int ret = 0, write = 1;
|
||||
|
||||
ff_id3v2_finish(&mp3->id3, s->pb);
|
||||
mp3_write_xing(s);
|
||||
|
||||
while ((pktl = mp3->queue)) {
|
||||
if (write && (ret = mp3_write_packet_internal(s, &pktl->pkt)) < 0)
|
||||
write = 0;
|
||||
av_free_packet(&pktl->pkt);
|
||||
mp3->queue = pktl->next;
|
||||
av_freep(&pktl);
|
||||
}
|
||||
mp3->queue_end = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mp2_write_trailer(struct AVFormatContext *s)
|
||||
{
|
||||
uint8_t buf[ID3v1_TAG_SIZE];
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
|
||||
if (mp3 && mp3->pics_to_write) {
|
||||
av_log(s, AV_LOG_WARNING, "No packets were sent for some of the "
|
||||
"attached pictures.\n");
|
||||
mp3_queue_flush(s);
|
||||
}
|
||||
|
||||
/* write the id3v1 tag */
|
||||
if (mp3 && mp3->write_id3v1 && id3v1_create_tag(s, buf) > 0) {
|
||||
avio_write(s->pb, buf, ID3v1_TAG_SIZE);
|
||||
}
|
||||
|
||||
/* write number of frames */
|
||||
if (mp3 && mp3->frames_offset) {
|
||||
avio_seek(s->pb, mp3->frames_offset, SEEK_SET);
|
||||
avio_wb32(s->pb, s->streams[mp3->audio_stream_idx]->nb_frames);
|
||||
avio_seek(s->pb, 0, SEEK_END);
|
||||
}
|
||||
|
||||
avio_flush(s->pb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if CONFIG_MP2_MUXER
|
||||
AVOutputFormat ff_mp2_muxer = {
|
||||
.name = "mp2",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG audio layer 2"),
|
||||
.mime_type = "audio/x-mpeg",
|
||||
.extensions = "mp2,m2a",
|
||||
.audio_codec = CODEC_ID_MP2,
|
||||
.video_codec = CODEC_ID_NONE,
|
||||
.write_packet = ff_raw_write_packet,
|
||||
.write_trailer = mp2_write_trailer,
|
||||
.flags = AVFMT_NOTIMESTAMPS,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if CONFIG_MP3_MUXER
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "id3v2_version", "Select ID3v2 version to write. Currently 3 and 4 are supported.",
|
||||
offsetof(MP3Context, id3v2_version), AV_OPT_TYPE_INT, {.dbl = 4}, 3, 4, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
{ "write_id3v1", "Enable ID3v1 writing. ID3v1 tags are written in UTF-8 which may not be supported by most software.",
|
||||
offsetof(MP3Context, write_id3v1), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass mp3_muxer_class = {
|
||||
.class_name = "MP3 muxer",
|
||||
.item_name = av_default_item_name,
|
||||
.option = options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
static int mp3_write_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
|
||||
if (pkt->stream_index == mp3->audio_stream_idx) {
|
||||
if (mp3->pics_to_write) {
|
||||
/* buffer audio packets until we get all the pictures */
|
||||
AVPacketList *pktl = av_mallocz(sizeof(*pktl));
|
||||
if (!pktl)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
pktl->pkt = *pkt;
|
||||
pkt->destruct = NULL;
|
||||
|
||||
if (mp3->queue_end)
|
||||
mp3->queue_end->next = pktl;
|
||||
else
|
||||
mp3->queue = pktl;
|
||||
mp3->queue_end = pktl;
|
||||
} else
|
||||
return mp3_write_packet_internal(s, pkt);
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
/* warn only once for each stream */
|
||||
if (s->streams[pkt->stream_index]->nb_frames == 1) {
|
||||
av_log(s, AV_LOG_WARNING, "Got more than one picture in stream %d,"
|
||||
" ignoring.\n", pkt->stream_index);
|
||||
}
|
||||
if (!mp3->pics_to_write || s->streams[pkt->stream_index]->nb_frames >= 1)
|
||||
return 0;
|
||||
|
||||
if ((ret = ff_id3v2_write_apic(s, &mp3->id3, pkt)) < 0)
|
||||
return ret;
|
||||
mp3->pics_to_write--;
|
||||
|
||||
/* flush the buffered audio packets */
|
||||
if (!mp3->pics_to_write &&
|
||||
(ret = mp3_queue_flush(s)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write an ID3v2 header at beginning of stream
|
||||
*/
|
||||
|
||||
static int mp3_write_header(struct AVFormatContext *s)
|
||||
{
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
int ret, i;
|
||||
|
||||
/* check the streams -- we want exactly one audio and arbitrary number of
|
||||
* video (attached pictures) */
|
||||
mp3->audio_stream_idx = -1;
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (mp3->audio_stream_idx >= 0 || st->codec->codec_id != CODEC_ID_MP3) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid audio stream. Exactly one MP3 "
|
||||
"audio stream is required.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
mp3->audio_stream_idx = i;
|
||||
} else if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
|
||||
av_log(s, AV_LOG_ERROR, "Only audio streams and pictures are allowed in MP3.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
if (mp3->audio_stream_idx < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "No audio stream present.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
mp3->pics_to_write = s->nb_streams - 1;
|
||||
|
||||
ff_id3v2_start(&mp3->id3, s->pb, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
|
||||
ret = ff_id3v2_write_metadata(s, &mp3->id3);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!mp3->pics_to_write) {
|
||||
ff_id3v2_finish(&mp3->id3, s->pb);
|
||||
mp3_write_xing(s);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mp3_write_trailer(AVFormatContext *s)
|
||||
{
|
||||
MP3Context *mp3 = s->priv_data;
|
||||
@ -371,7 +480,7 @@ AVOutputFormat ff_mp3_muxer = {
|
||||
.extensions = "mp3",
|
||||
.priv_data_size = sizeof(MP3Context),
|
||||
.audio_codec = CODEC_ID_MP3,
|
||||
.video_codec = CODEC_ID_NONE,
|
||||
.video_codec = CODEC_ID_PNG,
|
||||
.write_header = mp3_write_header,
|
||||
.write_packet = mp3_write_packet,
|
||||
.write_trailer = mp3_write_trailer,
|
||||
|
@ -270,7 +270,7 @@ static int oma_read_header(AVFormatContext *s)
|
||||
ID3v2ExtraMeta *extra_meta = NULL;
|
||||
OMAContext *oc = s->priv_data;
|
||||
|
||||
ff_id3v2_read_all(s, ID3v2_EA3_MAGIC, &extra_meta);
|
||||
ff_id3v2_read(s, ID3v2_EA3_MAGIC, &extra_meta);
|
||||
ret = avio_read(s->pb, buf, EA3_HEADER_SIZE);
|
||||
if (ret < EA3_HEADER_SIZE)
|
||||
return -1;
|
||||
|
@ -49,7 +49,7 @@ static av_cold int oma_write_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
/* Metadata; OpenMG does not support ID3v2.4 */
|
||||
ff_id3v2_write(s, 3, ID3v2_EA3_MAGIC);
|
||||
ff_id3v2_write_simple(s, 3, ID3v2_EA3_MAGIC);
|
||||
|
||||
ffio_wfourcc(s->pb, "EA3\0");
|
||||
avio_w8(s->pb, EA3_HEADER_SIZE >> 7);
|
||||
|
@ -299,4 +299,5 @@ AVInputFormat ff_str_demuxer = {
|
||||
.read_header = str_read_header,
|
||||
.read_packet = str_read_packet,
|
||||
.read_close = str_read_close,
|
||||
.flags = AVFMT_NO_BYTE_SEEK,
|
||||
};
|
||||
|
@ -112,7 +112,7 @@ static int rtp_write_header(AVFormatContext *s1)
|
||||
|
||||
if (s->max_packet_size) {
|
||||
if (s1->pb->max_packet_size)
|
||||
s->max_packet_size = FFMIN(s->max_payload_size,
|
||||
s->max_packet_size = FFMIN(s->max_packet_size,
|
||||
s1->pb->max_packet_size);
|
||||
} else
|
||||
s->max_packet_size = s1->pb->max_packet_size;
|
||||
|
@ -113,7 +113,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
vst->codec->codec_id = ff_codec_get_id(swf_codec_tags, avio_r8(pb));
|
||||
avpriv_set_pts_info(vst, 16, 256, swf->frame_rate);
|
||||
vst->codec->time_base = (AVRational){ 256, swf->frame_rate };
|
||||
len -= 8;
|
||||
} else if (tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2) {
|
||||
/* streaming found */
|
||||
@ -186,7 +185,6 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
vst->codec->codec_id = CODEC_ID_MJPEG;
|
||||
avpriv_set_pts_info(vst, 64, 256, swf->frame_rate);
|
||||
vst->codec->time_base = (AVRational){ 256, swf->frame_rate };
|
||||
st = vst;
|
||||
}
|
||||
avio_rl16(pb); /* BITMAP_ID */
|
||||
|
@ -545,11 +545,29 @@ static int init_input(AVFormatContext *s, const char *filename, AVDictionary **o
|
||||
return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
|
||||
}
|
||||
|
||||
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
|
||||
AVPacketList **plast_pktl){
|
||||
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
|
||||
if (!pktl)
|
||||
return NULL;
|
||||
|
||||
if (*packet_buffer)
|
||||
(*plast_pktl)->next = pktl;
|
||||
else
|
||||
*packet_buffer = pktl;
|
||||
|
||||
/* add the packet in the buffered packet list */
|
||||
*plast_pktl = pktl;
|
||||
pktl->pkt= *pkt;
|
||||
return &pktl->pkt;
|
||||
}
|
||||
|
||||
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
|
||||
{
|
||||
AVFormatContext *s = *ps;
|
||||
int ret = 0;
|
||||
int i, ret = 0;
|
||||
AVDictionary *tmp = NULL;
|
||||
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
|
||||
|
||||
if (!s && !(s = avformat_alloc_context()))
|
||||
return AVERROR(ENOMEM);
|
||||
@ -592,12 +610,25 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
|
||||
|
||||
/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
|
||||
if (s->pb)
|
||||
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
|
||||
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
|
||||
|
||||
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
|
||||
if ((ret = s->iformat->read_header(s)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (id3v2_extra_meta &&
|
||||
(ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
|
||||
goto fail;
|
||||
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
||||
|
||||
/* queue attached pictures */
|
||||
for (i = 0; i < s->nb_streams; i++)
|
||||
if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
|
||||
AVPacket copy = s->streams[i]->attached_pic;
|
||||
copy.destruct = NULL;
|
||||
add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
|
||||
}
|
||||
|
||||
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
|
||||
s->data_offset = avio_tell(s->pb);
|
||||
|
||||
@ -611,6 +642,7 @@ int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputForma
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
|
||||
av_dict_free(&tmp);
|
||||
if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
|
||||
avio_close(s->pb);
|
||||
@ -621,23 +653,6 @@ fail:
|
||||
|
||||
/*******************************************************/
|
||||
|
||||
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
|
||||
AVPacketList **plast_pktl){
|
||||
AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
|
||||
if (!pktl)
|
||||
return NULL;
|
||||
|
||||
if (*packet_buffer)
|
||||
(*plast_pktl)->next = pktl;
|
||||
else
|
||||
*packet_buffer = pktl;
|
||||
|
||||
/* add the packet in the buffered packet list */
|
||||
*plast_pktl = pktl;
|
||||
pktl->pkt= *pkt;
|
||||
return &pktl->pkt;
|
||||
}
|
||||
|
||||
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
int ret, i;
|
||||
@ -2722,6 +2737,8 @@ void avformat_free_context(AVFormatContext *s)
|
||||
av_parser_close(st->parser);
|
||||
av_free_packet(&st->cur_pkt);
|
||||
}
|
||||
if (st->attached_pic.data)
|
||||
av_free_packet(&st->attached_pic);
|
||||
av_dict_free(&st->metadata);
|
||||
av_freep(&st->index_entries);
|
||||
av_freep(&st->codec->extradata);
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||
#define LIBAVFORMAT_VERSION_MINOR 1
|
||||
#define LIBAVFORMAT_VERSION_MINOR 2
|
||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
|
@ -220,12 +220,12 @@ static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
int ret;
|
||||
int size = (c->frame_bit_len - c->remaining_bits + 7)>>3;
|
||||
|
||||
pkt->pos = avio_tell(s->pb);
|
||||
pkt->stream_index = 0;
|
||||
|
||||
if (av_new_packet(pkt, size+2) < 0)
|
||||
return AVERROR(EIO);
|
||||
|
||||
pkt->pos = avio_tell(s->pb);
|
||||
pkt->stream_index = 0;
|
||||
|
||||
pkt->data[0] = 8 - c->remaining_bits; // Number of bits to skip
|
||||
pkt->data[1] = c->last_frame_bits;
|
||||
ret = avio_read(s->pb, pkt->data+2, size);
|
||||
|
@ -1046,7 +1046,7 @@ int sws_init_context(SwsContext *c, SwsFilter *srcFilter, SwsFilter *dstFilter)
|
||||
c->vLumBufSize= c->vLumFilterSize;
|
||||
c->vChrBufSize= c->vChrFilterSize;
|
||||
for (i=0; i<dstH; i++) {
|
||||
int chrI= (int64_t)i*c->chrDstH / dstH;
|
||||
int chrI = (int64_t) i * c->chrDstH / dstH;
|
||||
int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
|
||||
((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
|
||||
|
||||
|
@ -38,13 +38,13 @@ fi
|
||||
|
||||
if [ -n "$do_mpeg2_ivlc_qprd" ]; then
|
||||
# mpeg2 encoding intra vlc qprd
|
||||
do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +qprd+mv0 -intra_vlc 1 -cmp 2 -subcmp 2 -mbd rd -vcodec mpeg2video -f mpeg2video"
|
||||
do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -trellis 1 -flags +mv0 -mpv_flags +qp_rd -intra_vlc 1 -cmp 2 -subcmp 2 -mbd rd -vcodec mpeg2video -f mpeg2video"
|
||||
do_video_decoding
|
||||
fi
|
||||
|
||||
if [ -n "$do_mpeg2_422" ]; then
|
||||
#mpeg2 4:2:2 encoding
|
||||
do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +qprd+mv0+ildct+ilme -intra_vlc 1 -mbd rd -vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video"
|
||||
do_video_encoding mpeg2_422.mpg "-vb 1000k -bf 2 -trellis 1 -flags +mv0+ildct+ilme -mpv_flags +qp_rd -intra_vlc 1 -mbd rd -vcodec mpeg2video -pix_fmt yuv422p -f mpeg2video"
|
||||
do_video_decoding
|
||||
fi
|
||||
|
||||
@ -143,7 +143,7 @@ do_video_decoding
|
||||
fi
|
||||
|
||||
if [ -n "$do_mpeg4_qprd" ]; then
|
||||
do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+qprd+mv0 -cmp 2 -subcmp 2 -mbd rd -an -vcodec mpeg4"
|
||||
do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -trellis 1 -flags +mv4+mv0 -mpv_flags +qp_rd -cmp 2 -subcmp 2 -mbd rd -an -vcodec mpeg4"
|
||||
do_video_decoding
|
||||
fi
|
||||
|
||||
|
@ -10,5 +10,8 @@ fate-cdxl-pal8: CMD = framecrc -i $(SAMPLES)/cdxl/maku.cdxl -pix_fmt rgb24 -fram
|
||||
FATE_CDXL += fate-cdxl-pal8-small
|
||||
fate-cdxl-pal8-small: CMD = framecrc -i $(SAMPLES)/cdxl/fruit.cdxl -an -pix_fmt rgb24 -frames:v 46
|
||||
|
||||
FATE_CDXL += fate-cdxl-bitline-ham6
|
||||
fate-cdxl-bitline-ham6: CMD = framecrc -i $(SAMPLES)/cdxl/bitline.cdxl -frames:v 10
|
||||
|
||||
FATE_TESTS += $(FATE_CDXL)
|
||||
fate-cdxl: $(FATE_CDXL)
|
||||
|
@ -33,6 +33,11 @@ fate-mp3-float-conf-si_block: CMD = pcm -acodec mp3float -i $(SAMPLES)/mp3-confo
|
||||
fate-mp3-float-conf-si_block: CMP = stddev
|
||||
fate-mp3-float-conf-si_block: REF = $(SAMPLES)/mp3-conformance/si_block.pcm
|
||||
|
||||
FATE_MP3 += fate-mp3-float-extra_overread
|
||||
fate-mp3-float-extra_overread: CMD = pcm -c:a mp3float -i $(SAMPLES)/mpegaudio/extra_overread.mp3
|
||||
fate-mp3-float-extra_overread: CMP = stddev
|
||||
fate-mp3-float-extra_overread: REF = $(SAMPLES)/mpegaudio/extra_overread.pcm
|
||||
|
||||
FATE_TESTS += $(FATE_MP3)
|
||||
fate-mp3: $(FATE_MP3)
|
||||
$(FATE_MP3): CMP = stddev
|
||||
|
@ -143,7 +143,7 @@ FATE_VIDEO += fate-mpeg2-field-enc
|
||||
fate-mpeg2-field-enc: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/mpeg2/mpeg2_field_encoding.ts -an
|
||||
|
||||
FATE_VIDEO += fate-nuv
|
||||
fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv
|
||||
fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv -an
|
||||
|
||||
FATE_VIDEO += fate-qpeg
|
||||
fate-qpeg: CMD = framecrc -i $(SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24
|
||||
|
11
tests/ref/fate/cdxl-bitline-ham6
Normal file
11
tests/ref/fate/cdxl-bitline-ham6
Normal file
@ -0,0 +1,11 @@
|
||||
#tb 0: 12/601
|
||||
0, 0, 0, 1, 63180, 0xcda82c16
|
||||
0, 1, 1, 1, 63180, 0xa6097bf9
|
||||
0, 2, 2, 1, 63180, 0x4c2fb091
|
||||
0, 3, 3, 1, 63180, 0xc597db00
|
||||
0, 4, 4, 1, 63180, 0xfa581ccd
|
||||
0, 5, 5, 1, 63180, 0x3e51498f
|
||||
0, 6, 6, 1, 63180, 0xe3495396
|
||||
0, 7, 7, 1, 63180, 0x425f5f02
|
||||
0, 8, 8, 1, 63180, 0x6077465f
|
||||
0, 9, 9, 1, 63180, 0x923ba29c
|
@ -1,29 +1,10 @@
|
||||
#tb 0: 100/2997
|
||||
#tb 1: 1/44100
|
||||
1, 0, 0, 1024, 4096, 0x00000000
|
||||
1, 1024, 1024, 1024, 4096, 0x4dfae7a6
|
||||
1, 2048, 2048, 1024, 4096, 0x3fd9f5c6
|
||||
1, 3072, 3072, 1024, 4096, 0x7b86e310
|
||||
1, 4096, 4096, 1024, 4096, 0x611cece5
|
||||
1, 5120, 5120, 1024, 4096, 0xb7d8e872
|
||||
0, 4, 4, 1, 460800, 0x54aedafe
|
||||
1, 6144, 6144, 1024, 4096, 0x072ef72b
|
||||
1, 7168, 7168, 1024, 4096, 0xb3560144
|
||||
0, 5, 5, 1, 460800, 0xb7aa8b56
|
||||
1, 8192, 8192, 1024, 4096, 0x0a3d119e
|
||||
0, 6, 6, 1, 460800, 0x283ea3b5
|
||||
1, 9216, 9216, 1024, 4096, 0xbe391aa4
|
||||
1, 10240, 10240, 1024, 4096, 0x28f7c6e5
|
||||
0, 7, 7, 1, 460800, 0x283ea3b5
|
||||
1, 11264, 11264, 1024, 4096, 0xca9d9df2
|
||||
0, 8, 8, 1, 460800, 0x10e577de
|
||||
1, 12288, 12288, 1024, 4096, 0x5c6b95a9
|
||||
0, 9, 9, 1, 460800, 0x4e091ee2
|
||||
1, 13312, 13312, 1024, 4096, 0x0bdfc0bf
|
||||
1, 14336, 14336, 1024, 4096, 0xd95a9277
|
||||
0, 10, 10, 1, 460800, 0x2ea88828
|
||||
1, 15360, 15360, 1024, 4096, 0xae2bef2c
|
||||
0, 11, 11, 1, 460800, 0x4b7f4df0
|
||||
1, 16384, 16384, 1024, 4096, 0xbf031e83
|
||||
1, 17408, 17408, 1024, 4096, 0x4c83e2d1
|
||||
0, 12, 12, 1, 460800, 0xa57f20d0
|
||||
|
@ -1,3 +1,3 @@
|
||||
4ef091d638bb20b8eaef5b3a0d6f97b7 *./tests/data/lavf/lavf.ffm
|
||||
8ce2ea9a73a1187647df7bf3c8e1b8fd *./tests/data/lavf/lavf.ffm
|
||||
376832 ./tests/data/lavf/lavf.ffm
|
||||
./tests/data/lavf/lavf.ffm CRC=0xf361ed74
|
||||
|
@ -1,4 +1,4 @@
|
||||
40e7637e04991dbe9a23fe109f95bfc8 *./tests/data/vsynth1/prores_kostya.mov
|
||||
f8fe98b7f9bb66857c81dbca409a9037 *./tests/data/vsynth1/prores_kostya.mov
|
||||
3858901 ./tests/data/vsynth1/prores_kostya.mov
|
||||
0a4153637d0cc0a88a8bcbf04cfaf8c6 *./tests/data/prores_kostya.vsynth1.out.yuv
|
||||
stddev: 3.17 PSNR: 38.09 MAXDIFF: 39 bytes: 7603200/ 7603200
|
||||
|
@ -1,4 +1,4 @@
|
||||
ed8b8a94da049518af8f95c5da736e57 *./tests/data/vsynth2/prores_kostya.mov
|
||||
26adb18726c08dde23bc4bee2eb591e2 *./tests/data/vsynth2/prores_kostya.mov
|
||||
3884586 ./tests/data/vsynth2/prores_kostya.mov
|
||||
ca2f6c1162635dedfa468c90f1fdc0ef *./tests/data/prores_kostya.vsynth2.out.yuv
|
||||
stddev: 0.92 PSNR: 48.77 MAXDIFF: 10 bytes: 7603200/ 7603200
|
||||
|
Loading…
Reference in New Issue
Block a user