mirror of
https://github.com/jellyfin/jellyfin-ffmpeg.git
synced 2024-11-26 23:50:30 +00:00
New upstream version 6.0.1
Signed-off-by: nyanmisaka <nst799610810@gmail.com>
This commit is contained in:
parent
571a39b356
commit
1719f91170
4
CREDITS
4
CREDITS
@ -1,6 +1,6 @@
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
See the Git history of the project (https://git.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
http://source.ffmpeg.org.
|
||||
https://git.ffmpeg.org/ffmpeg
|
||||
|
181
Changelog
181
Changelog
@ -1,6 +1,187 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 6.0.1:
|
||||
avcodec/4xm: Check for cfrm exhaustion
|
||||
avformat/mov: Disallow FTYP after streams
|
||||
doc/html: fix styling issue with Texinfo 7.0
|
||||
doc/html: support texinfo 7.0
|
||||
Changelog: update
|
||||
avformat/lafdec: Check for 0 parameters
|
||||
avformat/lafdec: Check for 0 parameters
|
||||
avfilter/buffersink: fix order of operation with = and <0
|
||||
avfilter/framesync: fix order of operation with = and <0
|
||||
tools/target_dec_fuzzer: Adjust threshold for CSCD
|
||||
avcodec/dovi_rpu: Use 64 bit in get_us/se_coeff()
|
||||
avformat/mov: Check that is_still_picture_avif has no trak based streams
|
||||
avformat/matroskadec: Fix declaration-after-statement warnings
|
||||
Update for FFmpeg 6.0.1
|
||||
fftools/ffmpeg_mux_init: Restrict disabling automatic copying of metadata
|
||||
avformat/rtsp: Use rtsp_st->stream_index
|
||||
avformat/rtsp: Use rtsp_st->stream_index
|
||||
avutil/tx_template: fix integer ovberflwo in fft3()
|
||||
avcodec/jpeg2000dec: Check image offset
|
||||
avformat/mxfdec: Check klv offset
|
||||
libavutil/ppc/cpu.c: check that AT_HWCAP2 is defined
|
||||
avcodec/h2645_parse: Avoid EAGAIN
|
||||
avcodec/xvididct: Make c* unsigned to avoid undefined overflows
|
||||
avcodec/bonk: Fix undefined overflow in predictor_calc_error()
|
||||
avformat/tmv: Check video chunk size
|
||||
avcodec/h264_parser: saturate dts a bit
|
||||
avformat/asfdec_f: Saturate presentation time in marker
|
||||
avformat/xwma: sanity check bits_per_coded_sample
|
||||
avformat/matroskadec: Check prebuffered_ns for overflow
|
||||
avformat/wavdec: Check left avio_tell for overflow
|
||||
avformat/tta: Better totalframes check
|
||||
avformat/rpl: Check for number_of_chunks overflow
|
||||
avformat/mov: compute absolute dts difference without overflow in mov_find_next_sample()
|
||||
avformat/jacosubdec: Check timeres
|
||||
avformat/jacosubdec: avoid signed integer overflows in get_shift()
|
||||
avformat/jacosubdec: Factorize code in get_shift() a bit
|
||||
avformat/sbgdec: Check for negative duration or un-representable end pts
|
||||
avcodec/escape124: Do not return random numbers
|
||||
avcodec/apedec: Fix an integer overflow in predictor_update_filter()
|
||||
tools/target_dec_fuzzer: Adjust wmapro threshold
|
||||
avcodec/wavarc: Allocate AV_INPUT_BUFFER_PADDING_SIZE
|
||||
avcodec/wavarc: Fix integer overflwo in do_stereo()
|
||||
avutil/tx_template: Fix some signed integer overflows in DECL_FFT5()
|
||||
avcodec/aacdec_template: Better avoidance of signed integer overflow in imdct_and_windowing_eld()
|
||||
tools/target_dec_fuzzer: Adjust threshold for MVHA
|
||||
avformat/avs: Check if return code is representable
|
||||
avcodec/flacdec: Fix integer overflow in "33bit" DECODER_SUBFRAME_FIXED_WIDE()
|
||||
avcodec/flacdec: Fix overflow in "33bit" decorrelate
|
||||
avcodec/lcldec: Make PNG filter addressing match the code afterwards
|
||||
avformat/westwood_vqa: Check chunk size
|
||||
avformat/sbgdec: Check for period overflow
|
||||
avformat/concatdec: Check in/outpoint for overflow
|
||||
avformat/mov: Check avif_info
|
||||
avformat/mxfdec: Remove this_partition
|
||||
avcodec/xvididct: Fix integer overflow in idct_row()
|
||||
avcodec/celp_math: avoid overflow in shift
|
||||
tools/target_dec_fuzzer: Adjust threshold for rtv1
|
||||
avformat/hls: reduce default max reload to 3
|
||||
avformat/format: Stop reading data at EOF during probing
|
||||
avcodec/bonk: Fix integer overflow in predictor_calc_error()
|
||||
avcodec/jpeg2000dec: jpeg2000 has its own lowres option
|
||||
avcodec/huffyuvdec: avoid undefined behavior with get_vlc2() failure
|
||||
avcodec/cscd: Fix "CamStudio Lossless Codec 1.0" gzip files
|
||||
avcodec/cscd: Check for CamStudio Lossless Codec 1.0 behavior in end check of LZO files
|
||||
avcodec/mpeg4videodec: consider lowres in dest_pcm[]
|
||||
avcodec/hevcdec: Fix undefined memcpy()
|
||||
avcodec/mpeg4videodec: more unsigned in amv computation
|
||||
avcodec/tta: fix signed overflow in decorrelate
|
||||
avcodec/apedec: remove unused variable
|
||||
avcodec/apedec: Fix 48khz 24bit below insane level
|
||||
avcodec/apedec: Fix CRC for 24bps and bigendian
|
||||
avcodec/wavarc: Check that nb_samples is not negative
|
||||
avcodec/wavarc: Check shift
|
||||
avcodec/xvididct: Fix integer overflow in idct_row()
|
||||
avformat/avr: Check sample rate
|
||||
avformat/imf_cpl: Replace NULL content_title_utf8 by ""
|
||||
avformat/imf_cpl: xmlNodeListGetString() can return NULL
|
||||
avcodec/aacdec_template: Fix undefined signed interger operations
|
||||
avcodec/wavarc: Fix k limit
|
||||
avcodec/rka: Fix integer overflow in decode_filter()
|
||||
avformat/rka: bps < 8 is invalid
|
||||
avcodec/pcm: allow Changing parameters
|
||||
avutil/tx_template: extend to 2M
|
||||
avcodec/jpeg2000dec: Check for reduction factor and image offset
|
||||
avutil/softfloat: Basic documentation for av_sincos_sf()
|
||||
avutil/softfloat: fix av_sincos_sf()
|
||||
tools/target_dec_fuzzer: Adjust threshold for speex
|
||||
avcodec/utils: fix 2 integer overflows in get_audio_frame_duration()
|
||||
avcodec/hevcdec: Avoid null pointer dereferences in MC
|
||||
avcodec/takdsp: Fix integer overflows
|
||||
avcodec/mpegvideo_dec: consider interlaced lowres 4:2:0 chroma in edge emulation check better
|
||||
avcodec/rka: use unsigned for buf0 additions
|
||||
avcodec/rka: Avoid undefined left shift
|
||||
avcodec: Ignoring errors is only possible before the input end
|
||||
avformat/jpegxl_probe: Forward error codes
|
||||
avformat/jpegxl_probe: check length instead of blindly reading
|
||||
avformat/jpegxl_probe: Remove intermediate macro obfuscation around get_bits*()
|
||||
avcodec/noise_bsf: Check for wrapped frames
|
||||
avformat/oggparsetheora: clip duration within 64bit
|
||||
avcodec/rka: avoid undefined multiply in cmode==0
|
||||
avcodec/rka: use 64bit for srate_pad computation
|
||||
avcodec/bonk: Avoid undefined integer overflow in predictor_calc_error()
|
||||
avformat/wavdec: Check that smv block fits in available space
|
||||
avcodec/adpcm: Fix integer overflow in intermediate in ADPCM_XMD
|
||||
avcodec/dpcm: fix undefined interger overflow in wady
|
||||
avcodec/tiff: add a zero DNG_LINEARIZATION_TABLE check
|
||||
avcodec/tak: Check remaining bits in ff_tak_decode_frame_header()
|
||||
avcodec/sonic: Fix two undefined integer overflows
|
||||
avcodec/utils: the IFF_ILBM implementation assumes that there are a multiple of 16 allocated
|
||||
avcodec/flacdec: Fix signed integre overflow
|
||||
avcodec/exr: Cleanup befor return
|
||||
avcodec/pngdec: Do not pass AVFrame into global header decode
|
||||
avcodec/pngdec: remove AVFrame argument from decode_iccp_chunk()
|
||||
avcodec/wavarc: Check order before using it to write the list
|
||||
avcodec/bonk: decode multiple passes in intlist_read() at once
|
||||
avcodec/vorbisdec: Check codebook float values to be finite
|
||||
avcodec/g2meet: Replace fake allocation avoidance for framebuf
|
||||
avutil/tx_priv: Use unsigned in BF() to avoid signed overflows
|
||||
avcodec/lcldec: More space for rgb24
|
||||
avcodec/lcldec: Support 4:1:1 and 4:2:2 with odd width
|
||||
libavcodec/lcldec: width and height should not be unsigned
|
||||
avformat/imf: fix invalid resource handling
|
||||
avcodec/escape124: Check that blocks are allocated before use
|
||||
avcodec/rka: Fix signed integer overflow in decode_filter()
|
||||
avcodec/huffyuvdec: Fix undefined behavior with shift
|
||||
avcodec/j2kenc: Replace RGB24 special case by generic test
|
||||
avcodec/j2kenc: Replace BGR48 / GRAY16 test by test for number of bits
|
||||
avcodec/j2kenc: simplify pixel format setup
|
||||
avcodec/j2kenc: Fix funky bpno errors on decoding
|
||||
avcodec/j2kenc: remove misleading pred value
|
||||
avcodec/j2kenc: fix 5/3 DWT identifer
|
||||
avcodec/vp3: Check width to avoid assertion failure
|
||||
avcodec/g729postfilter: Limit shift in long term filter
|
||||
avcodec/wavarc: Fix several integer overflows
|
||||
avcodec/tests/snowenc: Fix 2nd test
|
||||
avcodec/tests/snowenc: return a failure if DWT/IDWT mismatches
|
||||
avcodec/snowenc: Fix visual weight calculation
|
||||
avcodec/tests/snowenc: unbreak DWT tests
|
||||
avcodec/mpeg12dec: Check input size
|
||||
avcodec/escape124: Fix some return codes
|
||||
avcodec/escape124: fix signdness of end of input check
|
||||
Use https for repository links
|
||||
avcodec/nvdec_hevc: fail to initialize on unsupported profiles
|
||||
fftools/ffmpeg_enc: apply -top to individual encoded frames
|
||||
avcodec/on2avc: use correct fft sizes
|
||||
avcodec/on2avc: use the matching AVTX context for the 512 sized iMDCT
|
||||
examples: fix build of mux and resample_audio
|
||||
avcodec/nvenc: stop using deprecated rc modes with SDK 12.1
|
||||
configure: use non-deprecated nvenc GUID for conftest
|
||||
avcodec/x86/mathops: clip constants used with shift instructions within inline assembly
|
||||
avfilter/vsrc_ddagrab: calculate pointer position on rotated screens
|
||||
avfilter/vsrc_ddagrab: account for mouse-only frames during probing
|
||||
avcodec/aac_ac3_parser: add preprocessor checks for codec specific code
|
||||
avcodec/nvenc: handle frame durations and AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
|
||||
Revert "lavc/nvenc: handle frame durations and AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE"
|
||||
Revert "avcodec/nvenc: fix b-frame DTS behavior with fractional framerates"
|
||||
avcodec/vdpau_mpeg4: fix order of quant matrix coefficients
|
||||
avcodec/vdpau_mpeg12: fix order of quant matrix coefficients
|
||||
avcodec/nvdec_mpeg4: fix order of quant matrix coefficients
|
||||
avcodec/nvdec_mpeg2: fix order of quant matrix coefficients
|
||||
fftools/ffmpeg_filter: fix leak of AVIOContext in read_binary()
|
||||
fftools/ffmpeg: avoid possible invalid reads with short -tag values
|
||||
avcodec/mp_cmp: reject invalid comparison function values
|
||||
avcodec/aacpsy: clip global_quality within the psy_vbr_map array boundaries
|
||||
avutil/wchar_filename: propagate MultiByteToWideChar() and WideCharToMultiByte() failures
|
||||
avformat/concatf: check if any nodes were allocated
|
||||
avcodec/nvenc: fix b-frame DTS behavior with fractional framerates
|
||||
avcodec/vorbisdec: export skip_samples instead of dropping frames
|
||||
fftools/ffmpeg_mux_init: avoid invalid reads in forced keyframe parsing
|
||||
lavfi/vf_vpp_qsv: set the right timestamp for AVERROR_EOF
|
||||
avfilter/vf_untile: swap the chroma shift values used for plane offsets
|
||||
lavc/decode: stop mangling last_pkt_props->opaque
|
||||
avcodec/nvenc: avoid failing b_ref_mode check when unset
|
||||
lavu/vulkan: fix handle type for 32-bit targets
|
||||
vulkan: Fix win/i386 calling convention
|
||||
avfilter/graphparser: fix filter instance name when an id is provided
|
||||
avcodec/aacps_tablegen: fix build error after avutil bump
|
||||
avcodec/nvenc: fix potential NULL pointer dereference
|
||||
|
||||
|
||||
version 6.0:
|
||||
- Radiance HDR image support
|
||||
- ddagrab (Desktop Duplication) video capture filter
|
||||
|
2
configure
vendored
2
configure
vendored
@ -7042,7 +7042,7 @@ enabled nvenc &&
|
||||
test_cc -I$source_path <<EOF || disable nvenc
|
||||
#include <ffnvcodec/nvEncodeAPI.h>
|
||||
NV_ENCODE_API_FUNCTION_LIST flist;
|
||||
void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } }; }
|
||||
void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_CODEC_H264_GUID } }; }
|
||||
int main(void) { return 0; }
|
||||
EOF
|
||||
|
||||
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 6.0
|
||||
PROJECT_NUMBER = 6.0.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
@ -3,9 +3,9 @@
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
(https://git.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
online repository at @url{https://git.ffmpeg.org/ffmpeg}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
|
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@ -43,8 +43,8 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
mux: LDLIBS += -lm
|
||||
resample_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
|
@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it.
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
|
106
doc/t2h.pm
106
doc/t2h.pm
@ -20,8 +20,45 @@
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# Texinfo 7.0 changed the syntax of various functions.
|
||||
# Provide a shim for older versions.
|
||||
sub ff_set_from_init_file($$) {
|
||||
my $key = shift;
|
||||
my $value = shift;
|
||||
if (exists &{'texinfo_set_from_init_file'}) {
|
||||
texinfo_set_from_init_file($key, $value);
|
||||
} else {
|
||||
set_from_init_file($key, $value);
|
||||
}
|
||||
}
|
||||
|
||||
sub ff_get_conf($) {
|
||||
my $key = shift;
|
||||
if (exists &{'texinfo_get_conf'}) {
|
||||
texinfo_get_conf($key);
|
||||
} else {
|
||||
get_conf($key);
|
||||
}
|
||||
}
|
||||
|
||||
sub get_formatting_function($$) {
|
||||
my $obj = shift;
|
||||
my $func = shift;
|
||||
|
||||
my $sub = $obj->can('formatting_function');
|
||||
if ($sub) {
|
||||
return $obj->formatting_function($func);
|
||||
} else {
|
||||
return $obj->{$func};
|
||||
}
|
||||
}
|
||||
|
||||
# determine texinfo version
|
||||
my $program_version_num = version->declare(ff_get_conf('PACKAGE_VERSION'))->numify;
|
||||
my $program_version_6_8 = $program_version_num >= 6.008000;
|
||||
|
||||
# no navigation elements
|
||||
set_from_init_file('HEADERS', 0);
|
||||
ff_set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
@ -55,7 +92,7 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
@ -112,7 +149,11 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
# format_heading_text expects an array of headings for texinfo >= 7.0
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$heading = [$heading];
|
||||
}
|
||||
$result .= &{get_formatting_function($self,'format_heading_text')}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
@ -126,23 +167,19 @@ foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
||||
}
|
||||
|
||||
# determine if texinfo is at least version 6.8
|
||||
my $program_version_num = version->declare(get_conf('PACKAGE_VERSION'))->numify;
|
||||
my $program_version_6_8 = $program_version_num >= 6.008000;
|
||||
|
||||
# print the TOC where @contents is used
|
||||
if ($program_version_6_8) {
|
||||
set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline');
|
||||
ff_set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline');
|
||||
} else {
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
ff_set_from_init_file('INLINE_CONTENTS', 1);
|
||||
}
|
||||
|
||||
# make chapters <h2>
|
||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
ff_set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
|
||||
# Do not add <hr>
|
||||
set_from_init_file('DEFAULT_RULE', '');
|
||||
set_from_init_file('BIG_RULE', '');
|
||||
ff_set_from_init_file('DEFAULT_RULE', '');
|
||||
ff_set_from_init_file('BIG_RULE', '');
|
||||
|
||||
# Customized file beginning
|
||||
sub ffmpeg_begin_file($$$)
|
||||
@ -159,7 +196,18 @@ sub ffmpeg_begin_file($$$)
|
||||
my ($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
$program, $generator);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_information($command);
|
||||
} else {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
}
|
||||
|
||||
my $links = $self->_get_links ($filename, $element);
|
||||
|
||||
@ -223,7 +271,7 @@ if ($program_version_6_8) {
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
my $self = shift;
|
||||
my $program_string = &{$self->{'format_program_string'}}($self);
|
||||
my $program_string = &{get_formatting_function($self,'format_program_string')}($self);
|
||||
my $program_text = <<EOT;
|
||||
<p style="font-size: small;">
|
||||
$program_string
|
||||
@ -244,7 +292,7 @@ if ($program_version_6_8) {
|
||||
|
||||
# Dummy title command
|
||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
ff_set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
sub ffmpeg_title($$$$)
|
||||
{
|
||||
return '';
|
||||
@ -262,8 +310,14 @@ sub ffmpeg_float($$$$$)
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
my ($caption, $prepended);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($caption, $prepended) = Texinfo::Convert::Converter::float_name_caption($self,
|
||||
$command);
|
||||
} else {
|
||||
($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
}
|
||||
my $caption_text = '';
|
||||
my $prepended_text;
|
||||
my $prepended_save = '';
|
||||
@ -335,8 +389,13 @@ sub ffmpeg_float($$$$$)
|
||||
$caption->{'args'}->[0], 'float caption');
|
||||
}
|
||||
if ($prepended_text.$caption_text ne '') {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$prepended_text = $self->html_attribute_class('div',['float-caption']). '>'
|
||||
. $prepended_text;
|
||||
} else {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
}
|
||||
$caption_text .= '</div>';
|
||||
}
|
||||
my $html_class = '';
|
||||
@ -349,8 +408,13 @@ sub ffmpeg_float($$$$$)
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
}
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
if ($program_version_num >= 7.000000) {
|
||||
return $self->html_attribute_class('div', [$html_class]). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
} else {
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('float',
|
||||
|
@ -1337,6 +1337,9 @@ static void do_video_out(OutputFile *of,
|
||||
in_picture->quality = enc->global_quality;
|
||||
in_picture->pict_type = forced_kf_apply(ost, &ost->kf, enc->time_base, in_picture, i);
|
||||
|
||||
if (ost->top_field_first >= 0)
|
||||
in_picture->top_field_first = !!ost->top_field_first;
|
||||
|
||||
ret = submit_encode_frame(of, ost, in_picture);
|
||||
if (ret == AVERROR_EOF)
|
||||
break;
|
||||
|
@ -628,8 +628,12 @@ static void add_input_streams(const OptionsContext *o, Demuxer *d)
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st);
|
||||
if (codec_tag) {
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
if (*next) {
|
||||
uint8_t buf[4] = { 0 };
|
||||
memcpy(buf, codec_tag, FFMIN(sizeof(buf), strlen(codec_tag)));
|
||||
tag = AV_RL32(buf);
|
||||
}
|
||||
|
||||
st->codecpar->codec_tag = tag;
|
||||
}
|
||||
|
||||
|
@ -352,11 +352,13 @@ static int read_binary(const char *path, uint8_t **data, int *len)
|
||||
|
||||
*len = fsize;
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
fail:
|
||||
avio_close(io);
|
||||
av_freep(data);
|
||||
*len = 0;
|
||||
if (ret < 0) {
|
||||
av_freep(data);
|
||||
*len = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -606,8 +606,11 @@ static OutputStream *new_output_stream(Muxer *mux, const OptionsContext *o,
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
|
||||
if (codec_tag) {
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
if (*next) {
|
||||
uint8_t buf[4] = { 0 };
|
||||
memcpy(buf, codec_tag, FFMIN(sizeof(buf), strlen(codec_tag)));
|
||||
tag = AV_RL32(buf);
|
||||
}
|
||||
ost->st->codecpar->codec_tag = tag;
|
||||
if (ost->enc_ctx)
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
@ -1819,11 +1822,11 @@ static int copy_metadata(Muxer *mux, AVFormatContext *ic,
|
||||
parse_meta_type(mux, inspec, &type_in, &idx_in, &istream_spec);
|
||||
parse_meta_type(mux, outspec, &type_out, &idx_out, &ostream_spec);
|
||||
|
||||
if (type_in == 'g' || type_out == 'g')
|
||||
if (type_in == 'g' || type_out == 'g' || (!*outspec && !ic))
|
||||
*metadata_global_manual = 1;
|
||||
if (type_in == 's' || type_out == 's')
|
||||
if (type_in == 's' || type_out == 's' || (!*outspec && !ic))
|
||||
*metadata_streams_manual = 1;
|
||||
if (type_in == 'c' || type_out == 'c')
|
||||
if (type_in == 'c' || type_out == 'c' || (!*outspec && !ic))
|
||||
*metadata_chapters_manual = 1;
|
||||
|
||||
/* ic is NULL when just disabling automatic mappings */
|
||||
@ -2063,7 +2066,7 @@ static void parse_forced_key_frames(KeyframeForceCtx *kf, const Muxer *mux,
|
||||
if (next)
|
||||
*next++ = 0;
|
||||
|
||||
if (!memcmp(p, "chapters", 8)) {
|
||||
if (strstr(p, "chapters") == p) {
|
||||
AVChapter * const *ch = mux->fc->chapters;
|
||||
unsigned int nb_ch = mux->fc->nb_chapters;
|
||||
int j;
|
||||
|
@ -887,6 +887,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *picture,
|
||||
}
|
||||
|
||||
if (i >= CFRAME_BUFFER_COUNT) {
|
||||
if (free_index < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
i = free_index;
|
||||
f->cfrm[i].id = id;
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ get_next:
|
||||
duration in seconds is still correct (as is the number of bits in
|
||||
the frame). */
|
||||
if (avctx->codec_id != AV_CODEC_ID_AAC) {
|
||||
#if CONFIG_AC3_PARSER
|
||||
AC3HeaderInfo hdr, *phrd = &hdr;
|
||||
int offset = ff_ac3_find_syncword(buf, buf_size);
|
||||
|
||||
@ -146,7 +147,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
if (hdr.bitstream_mode == 0x7 && hdr.channels > 1)
|
||||
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
|
||||
bit_rate = hdr.bit_rate;
|
||||
#endif
|
||||
} else {
|
||||
#if CONFIG_AAC_PARSER
|
||||
AACADTSHeaderInfo hdr, *phrd = &hdr;
|
||||
int ret = avpriv_adts_header_parse(&phrd, buf, buf_size);
|
||||
|
||||
@ -154,6 +157,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
return i;
|
||||
|
||||
bit_rate = hdr.bit_rate;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Calculate the average bit rate */
|
||||
|
@ -2856,8 +2856,8 @@ static void imdct_and_windowing_eld(AACContext *ac, SingleChannelElement *sce)
|
||||
ac->mdct512_fn(ac->mdct512, buf, in, sizeof(INTFLOAT));
|
||||
|
||||
for (i = 0; i < n; i+=2) {
|
||||
buf[i + 0] = -(USE_FIXED + 1)*buf[i + 0];
|
||||
buf[i + 1] = (USE_FIXED + 1)*buf[i + 1];
|
||||
buf[i + 0] = -(UINTFLOAT)(USE_FIXED + 1)*buf[i + 0];
|
||||
buf[i + 1] = (UINTFLOAT)(USE_FIXED + 1)*buf[i + 1];
|
||||
}
|
||||
// Like with the regular IMDCT at this point we still have the middle half
|
||||
// of a transform but with even symmetry on the left and odd symmetry on
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/mem_internal.h"
|
||||
#define NR_ALLPASS_BANDS20 30
|
||||
#define NR_ALLPASS_BANDS34 50
|
||||
#define PS_AP_LINKS 3
|
||||
|
@ -267,7 +267,7 @@ static av_cold void lame_window_init(AacPsyContext *ctx, AVCodecContext *avctx)
|
||||
AacPsyChannel *pch = &ctx->ch[i];
|
||||
|
||||
if (avctx->flags & AV_CODEC_FLAG_QSCALE)
|
||||
pch->attack_threshold = psy_vbr_map[avctx->global_quality / FF_QP2LAMBDA].st_lrm;
|
||||
pch->attack_threshold = psy_vbr_map[av_clip(avctx->global_quality / FF_QP2LAMBDA, 0, 10)].st_lrm;
|
||||
else
|
||||
pch->attack_threshold = lame_calc_attack_threshold(avctx->bit_rate / avctx->ch_layout.nb_channels / 1000);
|
||||
|
||||
|
@ -1579,11 +1579,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
nibble[0] = sign_extend(byte & 15, 4);
|
||||
nibble[1] = sign_extend(byte >> 4, 4);
|
||||
|
||||
out[2+n*2] = (nibble[0]*(scale<<14) + (history[0]*29336) - (history[1]*13136)) >> 14;
|
||||
out[2+n*2] = nibble[0]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
|
||||
history[1] = history[0];
|
||||
history[0] = out[2+n*2];
|
||||
|
||||
out[2+n*2+1] = (nibble[1]*(scale<<14) + (history[0]*29336) - (history[1]*13136)) >> 14;
|
||||
out[2+n*2+1] = nibble[1]*scale + ((history[0]*3667 - history[1]*1642) >> 11);
|
||||
history[1] = history[0];
|
||||
history[0] = out[2+n*2+1];
|
||||
}
|
||||
|
@ -145,8 +145,6 @@ typedef struct APEPredictor64 {
|
||||
uint64_t coeffsA[2][4]; ///< adaption coefficients
|
||||
uint64_t coeffsB[2][5]; ///< adaption coefficients
|
||||
int64_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE];
|
||||
|
||||
unsigned int sample_pos;
|
||||
} APEPredictor64;
|
||||
|
||||
/** Decoder context */
|
||||
@ -860,8 +858,6 @@ static void init_predictor_decoder(APEContext *ctx)
|
||||
p64->lastA[0] = p64->lastA[1] = 0;
|
||||
|
||||
p->sample_pos = 0;
|
||||
|
||||
p64->sample_pos = 0;
|
||||
}
|
||||
|
||||
/** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */
|
||||
@ -1170,7 +1166,8 @@ static void predictor_decode_mono_3930(APEContext *ctx, int count)
|
||||
static av_always_inline int predictor_update_filter(APEPredictor64 *p,
|
||||
const int decoded, const int filter,
|
||||
const int delayA, const int delayB,
|
||||
const int adaptA, const int adaptB)
|
||||
const int adaptA, const int adaptB,
|
||||
int compression_level)
|
||||
{
|
||||
int64_t predictionA, predictionB;
|
||||
int32_t sign;
|
||||
@ -1198,7 +1195,13 @@ static av_always_inline int predictor_update_filter(APEPredictor64 *p,
|
||||
p->buf[delayB - 3] * p->coeffsB[filter][3] +
|
||||
p->buf[delayB - 4] * p->coeffsB[filter][4];
|
||||
|
||||
p->lastA[filter] = decoded + ((int64_t)((uint64_t)predictionA + (predictionB >> 1)) >> 10);
|
||||
if (compression_level < COMPRESSION_LEVEL_INSANE) {
|
||||
predictionA = (int32_t)predictionA;
|
||||
predictionB = (int32_t)predictionB;
|
||||
p->lastA[filter] = (int32_t)(decoded + (unsigned)((int32_t)(predictionA + (predictionB >> 1)) >> 10));
|
||||
} else {
|
||||
p->lastA[filter] = decoded + ((int64_t)((uint64_t)predictionA + (predictionB >> 1)) >> 10);
|
||||
}
|
||||
p->filterA[filter] = p->lastA[filter] + ((int64_t)(p->filterA[filter] * 31ULL) >> 5);
|
||||
|
||||
sign = APESIGN(decoded);
|
||||
@ -1226,10 +1229,12 @@ static void predictor_decode_stereo_3950(APEContext *ctx, int count)
|
||||
while (count--) {
|
||||
/* Predictor Y */
|
||||
*decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB,
|
||||
YADAPTCOEFFSA, YADAPTCOEFFSB);
|
||||
YADAPTCOEFFSA, YADAPTCOEFFSB,
|
||||
ctx->compression_level);
|
||||
decoded0++;
|
||||
*decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB,
|
||||
XADAPTCOEFFSA, XADAPTCOEFFSB);
|
||||
XADAPTCOEFFSA, XADAPTCOEFFSB,
|
||||
ctx->compression_level);
|
||||
decoded1++;
|
||||
|
||||
/* Combined */
|
||||
@ -1611,13 +1616,24 @@ static int ape_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
s->samples -= blockstodecode;
|
||||
|
||||
if (avctx->err_recognition & AV_EF_CRCCHECK &&
|
||||
s->fileversion >= 3900 && s->bps < 24) {
|
||||
s->fileversion >= 3900) {
|
||||
uint32_t crc = s->CRC_state;
|
||||
const AVCRC *crc_tab = av_crc_get_table(AV_CRC_32_IEEE_LE);
|
||||
int stride = s->bps == 24 ? 4 : (s->bps>>3);
|
||||
int offset = s->bps == 24;
|
||||
int bytes = s->bps >> 3;
|
||||
|
||||
for (i = 0; i < blockstodecode; i++) {
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
uint8_t *smp = frame->data[ch] + (i*(s->bps >> 3));
|
||||
crc = av_crc(crc_tab, crc, smp, s->bps >> 3);
|
||||
#if HAVE_BIGENDIAN
|
||||
uint8_t *smp_native = frame->data[ch] + i*stride;
|
||||
uint8_t smp[4];
|
||||
for(int j = 0; j<stride; j++)
|
||||
smp[j] = smp_native[stride-j-1];
|
||||
#else
|
||||
uint8_t *smp = frame->data[ch] + i*stride;
|
||||
#endif
|
||||
crc = av_crc(crc_tab, crc, smp+offset, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,6 +155,7 @@ static int intlist_read(BonkContext *s, int *buf, int entries, int base_2_part)
|
||||
int n_zeros = 0, step = 256, dominant = 0;
|
||||
int pos = 0, level = 0;
|
||||
BitCount *bits = s->bits;
|
||||
int passes = 1;
|
||||
|
||||
memset(buf, 0, entries * sizeof(*buf));
|
||||
if (base_2_part) {
|
||||
@ -216,24 +217,28 @@ static int intlist_read(BonkContext *s, int *buf, int entries, int base_2_part)
|
||||
x = 0;
|
||||
n_zeros = 0;
|
||||
for (i = 0; n_zeros < entries; i++) {
|
||||
if (x >= max_x)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (pos >= entries) {
|
||||
pos = 0;
|
||||
level += 1 << low_bits;
|
||||
level += passes << low_bits;
|
||||
passes = 1;
|
||||
if (bits[x].bit && bits[x].count > entries - n_zeros)
|
||||
passes = bits[x].count / (entries - n_zeros);
|
||||
}
|
||||
|
||||
if (level > 1 << 16)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (x >= max_x)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (buf[pos] >= level) {
|
||||
if (bits[x].bit)
|
||||
buf[pos] += 1 << low_bits;
|
||||
buf[pos] += passes << low_bits;
|
||||
else
|
||||
n_zeros++;
|
||||
|
||||
bits[x].count--;
|
||||
av_assert1(bits[x].count >= passes);
|
||||
bits[x].count -= passes;
|
||||
x += bits[x].count == 0;
|
||||
}
|
||||
|
||||
@ -265,14 +270,14 @@ static inline int shift(int a, int b)
|
||||
|
||||
static int predictor_calc_error(int *k, int *state, int order, int error)
|
||||
{
|
||||
int i, x = error - shift_down(k[order-1] * state[order-1], LATTICE_SHIFT);
|
||||
int i, x = error - (unsigned)shift_down(k[order-1] * (unsigned)state[order-1], LATTICE_SHIFT);
|
||||
int *k_ptr = &(k[order-2]),
|
||||
*state_ptr = &(state[order-2]);
|
||||
|
||||
for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--) {
|
||||
unsigned k_value = *k_ptr, state_value = *state_ptr;
|
||||
|
||||
x -= shift_down(k_value * state_value, LATTICE_SHIFT);
|
||||
x -= (unsigned) shift_down(k_value * (unsigned)state_value, LATTICE_SHIFT);
|
||||
state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length);
|
||||
*
|
||||
* @return value << offset, if offset>=0; value >> -offset - otherwise
|
||||
*/
|
||||
static inline int bidir_sal(int value, int offset)
|
||||
static inline unsigned bidir_sal(unsigned value, int offset)
|
||||
{
|
||||
if(offset < 0) return value >> -offset;
|
||||
else return value << offset;
|
||||
|
@ -70,6 +70,9 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
int buf_size = avpkt->size;
|
||||
CamStudioContext *c = avctx->priv_data;
|
||||
int ret;
|
||||
int bpp = avctx->bits_per_coded_sample / 8;
|
||||
int bugdelta = FFALIGN(avctx->width * bpp, 4) * avctx->height
|
||||
- (avctx->width & ~3) * bpp * avctx->height;
|
||||
|
||||
if (buf_size < 2) {
|
||||
av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
|
||||
@ -83,7 +86,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
switch ((buf[0] >> 1) & 7) {
|
||||
case 0: { // lzo compression
|
||||
int outlen = c->decomp_size, inlen = buf_size - 2;
|
||||
if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen) || outlen) {
|
||||
if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen) || (outlen && outlen != bugdelta)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -92,7 +95,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
case 1: { // zlib compression
|
||||
#if CONFIG_ZLIB
|
||||
unsigned long dlen = c->decomp_size;
|
||||
if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK || dlen != c->decomp_size) {
|
||||
if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK || (dlen != c->decomp_size && dlen != c->decomp_size - bugdelta)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error during zlib decompression\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
|
||||
if (pkt) {
|
||||
ret = av_packet_copy_props(avci->last_pkt_props, pkt);
|
||||
if (!ret)
|
||||
avci->last_pkt_props->opaque = (void *)(intptr_t)pkt->size; // Needed for ff_decode_frame_props().
|
||||
avci->last_pkt_props->stream_index = pkt->size; // Needed for ff_decode_frame_props().
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -461,7 +461,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||
pkt->dts = AV_NOPTS_VALUE;
|
||||
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
|
||||
// See extract_packet_props() comment.
|
||||
avci->last_pkt_props->opaque = (void *)((intptr_t)avci->last_pkt_props->opaque - consumed);
|
||||
avci->last_pkt_props->stream_index = avci->last_pkt_props->stream_index - consumed;
|
||||
avci->last_pkt_props->pts = AV_NOPTS_VALUE;
|
||||
avci->last_pkt_props->dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
@ -1355,7 +1355,7 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
|
||||
int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
frame->pkt_size = (int)(intptr_t)pkt->opaque;
|
||||
frame->pkt_size = pkt->stream_index;
|
||||
}
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
|
@ -157,7 +157,7 @@ static inline uint64_t get_ue_coef(GetBitContext *gb, const AVDOVIRpuDataHeader
|
||||
|
||||
case RPU_COEFF_FLOAT:
|
||||
fpart.u32 = get_bits_long(gb, 32);
|
||||
return fpart.f32 * (1 << hdr->coef_log2_denom);
|
||||
return fpart.f32 * (1LL << hdr->coef_log2_denom);
|
||||
}
|
||||
|
||||
return 0; /* unreachable */
|
||||
@ -176,7 +176,7 @@ static inline int64_t get_se_coef(GetBitContext *gb, const AVDOVIRpuDataHeader *
|
||||
|
||||
case RPU_COEFF_FLOAT:
|
||||
fpart.u32 = get_bits_long(gb, 32);
|
||||
return fpart.f32 * (1 << hdr->coef_log2_denom);
|
||||
return fpart.f32 * (1LL << hdr->coef_log2_denom);
|
||||
}
|
||||
|
||||
return 0; /* unreachable */
|
||||
|
@ -444,7 +444,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if (n & 0x80)
|
||||
s->sample[idx] = sign_extend((n & 0x7f) << 9, 16);
|
||||
else
|
||||
s->sample[idx] += s->scale * wady_table[n & 0x7f];
|
||||
s->sample[idx] += s->scale * (unsigned)wady_table[n & 0x7f];
|
||||
*output_samples++ = av_clip_int16(s->sample[idx]);
|
||||
idx ^= stereo;
|
||||
}
|
||||
|
@ -104,7 +104,9 @@ static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
|
||||
ff_fdctdsp_init(&fdsp, avctx);
|
||||
ff_me_cmp_init(&mecc, avctx);
|
||||
ff_pixblockdsp_init(&pdsp, avctx);
|
||||
ff_set_cmp(&mecc, mecc.ildct_cmp, avctx->ildct_cmp);
|
||||
ret = ff_set_cmp(&mecc, mecc.ildct_cmp, avctx->ildct_cmp);
|
||||
if (ret < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
s->get_pixels = pdsp.get_pixels;
|
||||
s->ildct_cmp = mecc.ildct_cmp[5];
|
||||
|
@ -89,11 +89,6 @@ static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth,
|
||||
unsigned i, j;
|
||||
CodeBook cb = { 0 };
|
||||
|
||||
if (size >= INT_MAX / 34 || get_bits_left(gb) < size * 34)
|
||||
return cb;
|
||||
|
||||
if (size >= INT_MAX / sizeof(MacroBlock))
|
||||
return cb;
|
||||
cb.blocks = av_malloc(size ? size * sizeof(MacroBlock) : 1);
|
||||
if (!cb.blocks)
|
||||
return cb;
|
||||
@ -163,7 +158,7 @@ static MacroBlock decode_macroblock(Escape124Context* s, GetBitContext* gb,
|
||||
|
||||
// This condition can occur with invalid bitstreams and
|
||||
// *codebook_index == 2
|
||||
if (block_index >= s->codebooks[*codebook_index].size)
|
||||
if (block_index >= s->codebooks[*codebook_index].size || !s->codebooks[*codebook_index].blocks)
|
||||
return (MacroBlock) { { 0 } };
|
||||
|
||||
return s->codebooks[*codebook_index].blocks[block_index];
|
||||
@ -225,7 +220,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
// represent a lower bound of the space needed for skipped superblocks. Non
|
||||
// skipped SBs need more space.
|
||||
if (get_bits_left(&gb) < 64 + s->num_superblocks * 23LL / 4320)
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
frame_flags = get_bits_long(&gb, 32);
|
||||
frame_size = get_bits_long(&gb, 32);
|
||||
@ -242,7 +237,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if ((ret = av_frame_ref(frame, s->frame)) < 0)
|
||||
return ret;
|
||||
|
||||
return frame_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
@ -276,9 +271,14 @@ static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
}
|
||||
|
||||
av_freep(&s->codebooks[i].blocks);
|
||||
if (cb_size >= INT_MAX / 34 || get_bits_left(&gb) < (int)cb_size * 34)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (cb_size >= INT_MAX / sizeof(MacroBlock))
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size);
|
||||
if (!s->codebooks[i].blocks)
|
||||
return -1;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
|
||||
*got_frame = 1;
|
||||
|
||||
return frame_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1930,8 +1930,10 @@ static int decode_header(EXRContext *s, AVFrame *frame)
|
||||
|
||||
bytestream2_get_buffer(gb, key, FFMIN(sizeof(key) - 1, var_size));
|
||||
if (strncmp("scanlineimage", key, var_size) &&
|
||||
strncmp("tiledimage", key, var_size))
|
||||
return AVERROR_PATCHWELCOME;
|
||||
strncmp("tiledimage", key, var_size)) {
|
||||
ret = AVERROR_PATCHWELCOME;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
continue;
|
||||
} else if ((var_size = check_header_variable(s, "preview",
|
||||
@ -1939,12 +1941,16 @@ static int decode_header(EXRContext *s, AVFrame *frame)
|
||||
uint32_t pw = bytestream2_get_le32(gb);
|
||||
uint32_t ph = bytestream2_get_le32(gb);
|
||||
uint64_t psize = pw * ph;
|
||||
if (psize > INT64_MAX / 4)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (psize > INT64_MAX / 4) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
psize *= 4;
|
||||
|
||||
if ((int64_t)psize >= bytestream2_get_bytes_left(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if ((int64_t)psize >= bytestream2_get_bytes_left(gb)) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
bytestream2_skip(gb, psize);
|
||||
|
||||
|
@ -366,19 +366,19 @@ static int decode_subframe_fixed(FLACContext *s, int32_t *decoded,
|
||||
break; \
|
||||
case 1: \
|
||||
for (int i = pred_order; i < blocksize; i++) \
|
||||
decoded[i] = (int64_t)residual[i] + (int64_t)decoded[i-1];\
|
||||
decoded[i] = (uint64_t)residual[i] + (uint64_t)decoded[i-1];\
|
||||
break; \
|
||||
case 2: \
|
||||
for (int i = pred_order; i < blocksize; i++) \
|
||||
decoded[i] = (int64_t)residual[i] + 2*(int64_t)decoded[i-1] - (int64_t)decoded[i-2]; \
|
||||
decoded[i] = (uint64_t)residual[i] + 2*(uint64_t)decoded[i-1] - (uint64_t)decoded[i-2]; \
|
||||
break; \
|
||||
case 3: \
|
||||
for (int i = pred_order; i < blocksize; i++) \
|
||||
decoded[i] = (int64_t)residual[i] + 3*(int64_t)decoded[i-1] - 3*(int64_t)decoded[i-2] + (int64_t)decoded[i-3]; \
|
||||
decoded[i] = (uint64_t)residual[i] + 3*(uint64_t)decoded[i-1] - 3*(uint64_t)decoded[i-2] + (uint64_t)decoded[i-3]; \
|
||||
break; \
|
||||
case 4: \
|
||||
for (int i = pred_order; i < blocksize; i++) \
|
||||
decoded[i] = (int64_t)residual[i] + 4*(int64_t)decoded[i-1] - 6*(int64_t)decoded[i-2] + 4*(int64_t)decoded[i-3] - (int64_t)decoded[i-4]; \
|
||||
decoded[i] = (uint64_t)residual[i] + 4*(uint64_t)decoded[i-1] - 6*(uint64_t)decoded[i-2] + 4*(uint64_t)decoded[i-3] - (uint64_t)decoded[i-4]; \
|
||||
break; \
|
||||
default: \
|
||||
av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order); \
|
||||
@ -513,7 +513,7 @@ static int decode_subframe_lpc_33bps(FLACContext *s, int64_t *decoded,
|
||||
for (i = pred_order; i < s->blocksize; i++, decoded++) {
|
||||
int64_t sum = 0;
|
||||
for (j = 0; j < pred_order; j++)
|
||||
sum += (int64_t)coeffs[j] * decoded[j];
|
||||
sum += (int64_t)coeffs[j] * (uint64_t)decoded[j];
|
||||
decoded[j] = residual[i] + (sum >> qlevel);
|
||||
}
|
||||
|
||||
@ -706,10 +706,10 @@ static void decorrelate_33bps(int ch_mode, int32_t **decoded, int64_t *decoded_3
|
||||
int i;
|
||||
if (ch_mode == FLAC_CHMODE_LEFT_SIDE ) {
|
||||
for (i = 0; i < len; i++)
|
||||
decoded[1][i] = decoded[0][i] - decoded_33bps[i];
|
||||
decoded[1][i] = decoded[0][i] - (uint64_t)decoded_33bps[i];
|
||||
} else if (ch_mode == FLAC_CHMODE_RIGHT_SIDE ) {
|
||||
for (i = 0; i < len; i++)
|
||||
decoded[0][i] = decoded[1][i] + decoded_33bps[i];
|
||||
decoded[0][i] = decoded[1][i] + (uint64_t)decoded_33bps[i];
|
||||
} else if (ch_mode == FLAC_CHMODE_MID_SIDE ) {
|
||||
for (i = 0; i < len; i++) {
|
||||
uint64_t a = decoded[0][i];
|
||||
|
@ -145,7 +145,8 @@ typedef struct G2MContext {
|
||||
int got_header;
|
||||
|
||||
uint8_t *framebuf;
|
||||
int framebuf_stride, old_width, old_height;
|
||||
int framebuf_stride;
|
||||
unsigned int framebuf_allocated;
|
||||
|
||||
uint8_t *synth_tile, *jpeg_tile, *epic_buf, *epic_buf_base;
|
||||
int tile_stride, epic_buf_stride, old_tile_w, old_tile_h;
|
||||
@ -1160,14 +1161,13 @@ static int g2m_init_buffers(G2MContext *c)
|
||||
{
|
||||
int aligned_height;
|
||||
|
||||
if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
|
||||
c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
|
||||
aligned_height = c->height + 15;
|
||||
av_free(c->framebuf);
|
||||
c->framebuf = av_calloc(c->framebuf_stride, aligned_height);
|
||||
if (!c->framebuf)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
|
||||
aligned_height = c->height + 15;
|
||||
|
||||
av_fast_mallocz(&c->framebuf, &c->framebuf_allocated, c->framebuf_stride * aligned_height);
|
||||
if (!c->framebuf)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!c->synth_tile || !c->jpeg_tile ||
|
||||
(c->compression == 2 && !c->epic_buf_base) ||
|
||||
c->old_tile_w < c->tile_width ||
|
||||
@ -1617,6 +1617,7 @@ static av_cold int g2m_decode_end(AVCodecContext *avctx)
|
||||
av_freep(&c->jpeg_tile);
|
||||
av_freep(&c->cursor);
|
||||
av_freep(&c->framebuf);
|
||||
c->framebuf_allocated = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -353,7 +353,7 @@ static int16_t long_term_filter(AudioDSPContext *adsp, int pitch_delay_int,
|
||||
if (tmp > 0)
|
||||
L_temp0 >>= tmp;
|
||||
else
|
||||
L_temp1 >>= -tmp;
|
||||
L_temp1 >>= FFMIN(-tmp, 31);
|
||||
|
||||
/* Check if longer filter increases the values of R'(k). */
|
||||
if (L_temp1 > L_temp0) {
|
||||
@ -581,7 +581,7 @@ void ff_g729_postfilter(AudioDSPContext *adsp, int16_t* ht_prev_data, int* voici
|
||||
int16_t ff_g729_adaptive_gain_control(int gain_before, int gain_after, int16_t *speech,
|
||||
int subframe_size, int16_t gain_prev)
|
||||
{
|
||||
int gain; // (3.12)
|
||||
unsigned gain; // (3.12)
|
||||
int n;
|
||||
int exp_before, exp_after;
|
||||
|
||||
@ -603,7 +603,7 @@ int16_t ff_g729_adaptive_gain_control(int gain_before, int gain_after, int16_t *
|
||||
gain = ((gain_before - gain_after) << 14) / gain_after + 0x4000;
|
||||
gain = bidir_sal(gain, exp_after - exp_before);
|
||||
}
|
||||
gain = av_clip_int16(gain);
|
||||
gain = FFMIN(gain, 32767);
|
||||
gain = (gain * G729_AGC_FAC1 + 0x4000) >> 15; // gain * (1-0.9875)
|
||||
} else
|
||||
gain = 0;
|
||||
|
@ -281,7 +281,7 @@ static int decode_slice(MpegEncContext *s)
|
||||
ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
|
||||
s->mb_x, s->mb_y, ER_MB_ERROR & part_mask);
|
||||
|
||||
if (s->avctx->err_recognition & AV_EF_IGNORE_ERR)
|
||||
if ((s->avctx->err_recognition & AV_EF_IGNORE_ERR) && get_bits_left(&s->gb) > 0)
|
||||
continue;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static inline int get_nalsize(int nal_length_size, const uint8_t *buf,
|
||||
|
||||
if (*buf_index >= buf_size - nal_length_size) {
|
||||
// the end of the buffer is reached, refill it
|
||||
return AVERROR(EAGAIN);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < nal_length_size; i++)
|
||||
|
@ -646,10 +646,10 @@ static int h264_parse(AVCodecParserContext *s,
|
||||
int64_t num = time_base.num * (int64_t)avctx->pkt_timebase.den;
|
||||
if (s->dts != AV_NOPTS_VALUE) {
|
||||
// got DTS from the stream, update reference timestamp
|
||||
p->reference_dts = s->dts - av_rescale(s->dts_ref_dts_delta, num, den);
|
||||
p->reference_dts = av_sat_sub64(s->dts, av_rescale(s->dts_ref_dts_delta, num, den));
|
||||
} else if (p->reference_dts != AV_NOPTS_VALUE) {
|
||||
// compute DTS based on reference timestamp
|
||||
s->dts = p->reference_dts + av_rescale(s->dts_ref_dts_delta, num, den);
|
||||
s->dts = av_sat_add64(p->reference_dts, av_rescale(s->dts_ref_dts_delta, num, den));
|
||||
}
|
||||
|
||||
if (p->reference_dts != AV_NOPTS_VALUE && s->pts == AV_NOPTS_VALUE)
|
||||
|
@ -1523,7 +1523,8 @@ static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
|
||||
|
||||
if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
|
||||
x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
|
||||
y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
|
||||
y_off >= pic_height - block_h - QPEL_EXTRA_AFTER ||
|
||||
ref == s->frame) {
|
||||
const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
|
||||
int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
|
||||
int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
|
||||
@ -1673,6 +1674,7 @@ static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
|
||||
intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
|
||||
intptr_t _mx = mx << (1 - hshift);
|
||||
intptr_t _my = my << (1 - vshift);
|
||||
int emu = src0 == s->frame->data[1] || src0 == s->frame->data[2];
|
||||
|
||||
x_off += mv->x >> (2 + hshift);
|
||||
y_off += mv->y >> (2 + vshift);
|
||||
@ -1680,7 +1682,8 @@ static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
|
||||
|
||||
if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
|
||||
x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
|
||||
y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
|
||||
y_off >= pic_height - block_h - EPEL_EXTRA_AFTER ||
|
||||
emu) {
|
||||
const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
|
||||
int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
|
||||
int buf_offset0 = EPEL_EXTRA_BEFORE *
|
||||
@ -1920,13 +1923,13 @@ static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
|
||||
|
||||
if (current_mv.pred_flag & PF_L0) {
|
||||
ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
|
||||
if (!ref0)
|
||||
if (!ref0 || !ref0->frame->data[0])
|
||||
return;
|
||||
hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH);
|
||||
}
|
||||
if (current_mv.pred_flag & PF_L1) {
|
||||
ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
|
||||
if (!ref1)
|
||||
if (!ref1 || !ref1->frame->data[0])
|
||||
return;
|
||||
hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH);
|
||||
}
|
||||
|
@ -695,9 +695,9 @@ static void decode_422_bitstream(HYuvDecContext *s, int count)
|
||||
/* TODO instead of restarting the read when the code isn't in the first level
|
||||
* of the joint table, jump into the 2nd level of the individual table. */
|
||||
#define READ_2PIX_PLANE16(dst0, dst1, plane){\
|
||||
dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
|
||||
dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)*4;\
|
||||
dst0 += get_bits(&s->gb, 2);\
|
||||
dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
|
||||
dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)*4;\
|
||||
dst1 += get_bits(&s->gb, 2);\
|
||||
}
|
||||
static void decode_plane_bitstream(HYuvDecContext *s, int width, int plane)
|
||||
@ -755,7 +755,7 @@ static void decode_plane_bitstream(HYuvDecContext *s, int width, int plane)
|
||||
}
|
||||
}
|
||||
if( width&1 && get_bits_left(&s->gb)>0 ) {
|
||||
int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
|
||||
int dst = (unsigned)get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
|
||||
s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
|
||||
}
|
||||
}
|
||||
|
@ -721,11 +721,10 @@ static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg20
|
||||
|
||||
if (max == 0){
|
||||
cblk->nonzerobits = 0;
|
||||
bpno = 0;
|
||||
} else{
|
||||
cblk->nonzerobits = av_log2(max) + 1 - NMSEDEC_FRACBITS;
|
||||
bpno = cblk->nonzerobits - 1;
|
||||
}
|
||||
bpno = cblk->nonzerobits - 1;
|
||||
|
||||
cblk->data[0] = 0;
|
||||
ff_mqc_initenc(&t1->mqc, cblk->data + 1);
|
||||
@ -1531,6 +1530,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
int tileno, ret;
|
||||
Jpeg2000EncoderContext *s = avctx->priv_data;
|
||||
uint8_t *chunkstart, *jp2cstart, *jp2hstart;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
||||
|
||||
if ((ret = ff_alloc_packet(avctx, pkt, avctx->width*avctx->height*9 + AV_INPUT_BUFFER_MIN_SIZE)) < 0)
|
||||
return ret;
|
||||
@ -1543,7 +1543,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
|
||||
s->lambda = s->picture->quality * LAMBDA_SCALE;
|
||||
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_BGR48 || avctx->pix_fmt == AV_PIX_FMT_GRAY16)
|
||||
if (s->cbps[0] > 8)
|
||||
copy_frame_16(s);
|
||||
else
|
||||
copy_frame_8(s);
|
||||
@ -1587,7 +1587,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
bytestream_put_byte(&s->buf, 1);
|
||||
bytestream_put_byte(&s->buf, 0);
|
||||
bytestream_put_byte(&s->buf, 0);
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_RGB24 || avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||
if ((desc->flags & AV_PIX_FMT_FLAG_RGB) || avctx->pix_fmt == AV_PIX_FMT_PAL8) {
|
||||
bytestream_put_be32(&s->buf, 16);
|
||||
} else if (s->ncomponents == 1) {
|
||||
bytestream_put_be32(&s->buf, 17);
|
||||
@ -1717,6 +1717,7 @@ static av_cold int j2kenc_init(AVCodecContext *avctx)
|
||||
Jpeg2000EncoderContext *s = avctx->priv_data;
|
||||
Jpeg2000CodingStyle *codsty = &s->codsty;
|
||||
Jpeg2000QuantStyle *qntsty = &s->qntsty;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
|
||||
|
||||
s->avctx = avctx;
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "init\n");
|
||||
@ -1729,7 +1730,7 @@ static av_cold int j2kenc_init(AVCodecContext *avctx)
|
||||
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && (s->pred != FF_DWT97_INT || s->format != CODEC_JP2)) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Forcing lossless jp2 for pal8\n");
|
||||
s->pred = FF_DWT97_INT;
|
||||
s->pred = 1;
|
||||
s->format = CODEC_JP2;
|
||||
}
|
||||
|
||||
@ -1759,20 +1760,13 @@ static av_cold int j2kenc_init(AVCodecContext *avctx)
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
|
||||
s->ncomponents = desc->nb_components;
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_GRAY16 || avctx->pix_fmt == AV_PIX_FMT_RGB48)
|
||||
s->cbps[i] = 16;
|
||||
else
|
||||
s->cbps[i] = 8;
|
||||
s->cbps[i] = desc->comp[i].depth;
|
||||
}
|
||||
|
||||
if (avctx->pix_fmt == AV_PIX_FMT_RGB24 || avctx->pix_fmt == AV_PIX_FMT_RGB48){
|
||||
s->ncomponents = 3;
|
||||
} else if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY16){
|
||||
s->ncomponents = 1;
|
||||
} else{ // planar YUV
|
||||
if ((desc->flags & AV_PIX_FMT_FLAG_PLANAR) && s->ncomponents > 1) {
|
||||
s->planar = 1;
|
||||
s->ncomponents = 3;
|
||||
ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt,
|
||||
s->chroma_shift, s->chroma_shift + 1);
|
||||
if (ret)
|
||||
@ -1810,7 +1804,7 @@ static const AVOption options[] = {
|
||||
{ "tile_height", "Tile Height", OFFSET(tile_height), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, },
|
||||
{ "pred", "DWT Type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, "pred" },
|
||||
{ "dwt97int", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "pred" },
|
||||
{ "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "pred" },
|
||||
{ "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" },
|
||||
{ "sop", "SOP marker", OFFSET(sop), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, },
|
||||
{ "eph", "EPH marker", OFFSET(eph), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, },
|
||||
{ "prog", "Progression Order", OFFSET(prog), AV_OPT_TYPE_INT, { .i64 = 0 }, JPEG2000_PGOD_LRCP, JPEG2000_PGOD_CPRL, VE, "prog" },
|
||||
|
@ -323,6 +323,16 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->image_offset_x >= s->width || s->image_offset_y >= s->height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "image offsets outside image");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->reduction_factor && (s->image_offset_x || s->image_offset_y) ){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "reduction factor with image offsets is not fully implemented");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->ncomponents = ncomponents;
|
||||
|
||||
if (s->tile_width <= 0 || s->tile_height <= 0) {
|
||||
@ -388,7 +398,7 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
||||
dimy = FFMAX(dimy, ff_jpeg2000_ceildiv(o_dimy, s->cdy[i]));
|
||||
}
|
||||
|
||||
ret = ff_set_dimensions(s->avctx, dimx, dimy);
|
||||
ret = ff_set_dimensions(s->avctx, dimx << s->avctx->lowres, dimy << s->avctx->lowres);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -2471,6 +2481,14 @@ static av_cold int jpeg2000_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
Jpeg2000DecoderContext *s = avctx->priv_data;
|
||||
|
||||
if (avctx->lowres)
|
||||
av_log(avctx, AV_LOG_WARNING, "lowres is overriden by reduction_factor but set anyway\n");
|
||||
if (!s->reduction_factor && avctx->lowres < JPEG2000_MAX_RESLEVELS) {
|
||||
s->reduction_factor = avctx->lowres;
|
||||
}
|
||||
if (avctx->lowres != s->reduction_factor && avctx->lowres)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
ff_jpeg2000dsp_init(&s->dsp);
|
||||
ff_jpeg2000_init_tier1_luts();
|
||||
|
||||
|
@ -152,6 +152,8 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i
|
||||
if (expected != (unsigned int)zstream->total_out) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Decoded size differs (%d != %lu)\n",
|
||||
expected, zstream->total_out);
|
||||
if (expected > (unsigned int)zstream->total_out)
|
||||
return (unsigned int)zstream->total_out;
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
return zstream->total_out;
|
||||
@ -169,8 +171,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
int row, col;
|
||||
unsigned char *encoded = avpkt->data, *outptr;
|
||||
uint8_t *y_out, *u_out, *v_out;
|
||||
unsigned int width = avctx->width; // Real image width
|
||||
unsigned int height = avctx->height; // Real image height
|
||||
int width = avctx->width; // Real image width
|
||||
int height = avctx->height; // Real image height
|
||||
unsigned int mszh_dlen;
|
||||
unsigned char yq, y1q, uq, vq;
|
||||
int uqvq, ret;
|
||||
@ -227,16 +229,19 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
break;
|
||||
case COMP_MSZH_NOCOMP: {
|
||||
int bppx2;
|
||||
int aligned_width = width;
|
||||
switch (c->imgtype) {
|
||||
case IMGTYPE_YUV111:
|
||||
case IMGTYPE_RGB24:
|
||||
bppx2 = 6;
|
||||
break;
|
||||
case IMGTYPE_YUV422:
|
||||
aligned_width &= ~3;
|
||||
case IMGTYPE_YUV211:
|
||||
bppx2 = 4;
|
||||
break;
|
||||
case IMGTYPE_YUV411:
|
||||
aligned_width &= ~3;
|
||||
case IMGTYPE_YUV420:
|
||||
bppx2 = 3;
|
||||
break;
|
||||
@ -244,7 +249,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
bppx2 = 0; // will error out below
|
||||
break;
|
||||
}
|
||||
if (len < ((width * height * bppx2) >> 1))
|
||||
if (len < ((aligned_width * height * bppx2) >> 1))
|
||||
return AVERROR_INVALIDDATA;
|
||||
break;
|
||||
}
|
||||
@ -276,12 +281,13 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
ret = zlib_decomp(avctx, buf + 8 + mthread_inlen, len - 8 - mthread_inlen,
|
||||
mthread_outlen, mthread_outlen);
|
||||
if (ret < 0) return ret;
|
||||
len = c->decomp_size;
|
||||
} else {
|
||||
int ret = zlib_decomp(avctx, buf, len, 0, c->decomp_size);
|
||||
if (ret < 0) return ret;
|
||||
len = ret;
|
||||
}
|
||||
encoded = c->decomp_buf;
|
||||
len = c->decomp_size;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
@ -309,8 +315,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
}
|
||||
break;
|
||||
case IMGTYPE_YUV422:
|
||||
pixel_ptr = 0;
|
||||
for (row = 0; row < height; row++) {
|
||||
pixel_ptr = row * width * 2;
|
||||
yq = uq = vq =0;
|
||||
for (col = 0; col < width/4; col++) {
|
||||
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
|
||||
@ -326,8 +332,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
}
|
||||
break;
|
||||
case IMGTYPE_YUV411:
|
||||
pixel_ptr = 0;
|
||||
for (row = 0; row < height; row++) {
|
||||
pixel_ptr = row * width / 2 * 3;
|
||||
yq = uq = vq =0;
|
||||
for (col = 0; col < width/4; col++) {
|
||||
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
|
||||
@ -403,6 +409,11 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
v_out[ col >> 1 ] = *encoded++ + 128;
|
||||
v_out[(col >> 1) + 1] = *encoded++ + 128;
|
||||
}
|
||||
if (col && col < width) {
|
||||
u_out[ col >> 1 ] = u_out[(col>>1) - 1];
|
||||
v_out[ col >> 1 ] = v_out[(col>>1) - 1];
|
||||
}
|
||||
|
||||
y_out -= frame->linesize[0];
|
||||
u_out -= frame->linesize[1];
|
||||
v_out -= frame->linesize[2];
|
||||
@ -424,6 +435,10 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
u_out[col >> 2] = *encoded++ + 128;
|
||||
v_out[col >> 2] = *encoded++ + 128;
|
||||
}
|
||||
if (col && col < width) {
|
||||
u_out[col >> 2] = u_out[(col>>2) - 1];
|
||||
v_out[col >> 2] = v_out[(col>>2) - 1];
|
||||
}
|
||||
y_out -= frame->linesize[0];
|
||||
u_out -= frame->linesize[1];
|
||||
v_out -= frame->linesize[2];
|
||||
@ -481,6 +496,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
FFALIGN(avctx->height, 4);
|
||||
unsigned int max_decomp_size;
|
||||
int subsample_h, subsample_v;
|
||||
int partial_h_supported = 0;
|
||||
|
||||
if (avctx->extradata_size < 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n");
|
||||
@ -502,26 +518,24 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 1:1:1.\n");
|
||||
break;
|
||||
case IMGTYPE_YUV422:
|
||||
c->decomp_size = basesize * 2;
|
||||
c->decomp_size = (avctx->width & ~3) * avctx->height * 2;
|
||||
max_decomp_size = max_basesize * 2;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n");
|
||||
if (avctx->width % 4) {
|
||||
avpriv_request_sample(avctx, "Unsupported dimensions");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
partial_h_supported = 1;
|
||||
break;
|
||||
case IMGTYPE_RGB24:
|
||||
c->decomp_size = basesize * 3;
|
||||
c->decomp_size = FFALIGN(avctx->width*3, 4) * avctx->height;
|
||||
max_decomp_size = max_basesize * 3;
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is RGB 24.\n");
|
||||
break;
|
||||
case IMGTYPE_YUV411:
|
||||
c->decomp_size = basesize / 2 * 3;
|
||||
c->decomp_size = (avctx->width & ~3) * avctx->height / 2 * 3;
|
||||
max_decomp_size = max_basesize / 2 * 3;
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV411P;
|
||||
av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:1:1.\n");
|
||||
partial_h_supported = 1;
|
||||
break;
|
||||
case IMGTYPE_YUV211:
|
||||
c->decomp_size = basesize * 2;
|
||||
@ -541,7 +555,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v);
|
||||
if (avctx->width % (1<<subsample_h) || avctx->height % (1<<subsample_v)) {
|
||||
if ((avctx->width % (1<<subsample_h) && !partial_h_supported) || avctx->height % (1<<subsample_v)) {
|
||||
avpriv_request_sample(avctx, "Unsupported dimensions");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@ -473,8 +473,9 @@ static int zero_cmp(MpegEncContext *s, const uint8_t *a, const uint8_t *b,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
|
||||
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
|
||||
{
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
memset(cmp, 0, sizeof(void *) * 6);
|
||||
@ -533,9 +534,13 @@ void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
|
||||
#endif
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"internal error in cmp function selection\n");
|
||||
"invalid cmp function selection\n");
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BUTTERFLY2(o1, o2, i1, i2) \
|
||||
|
@ -89,7 +89,7 @@ void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx);
|
||||
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx);
|
||||
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx);
|
||||
|
||||
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
|
||||
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
|
||||
|
||||
void ff_dsputil_init_dwt(MECmpContext *c);
|
||||
|
||||
|
@ -309,6 +309,7 @@ int ff_init_me(MpegEncContext *s){
|
||||
MotionEstContext * const c= &s->me;
|
||||
int cache_size= FFMIN(ME_MAP_SIZE>>ME_MAP_SHIFT, 1<<ME_MAP_SHIFT);
|
||||
int dia_size= FFMAX(FFABS(s->avctx->dia_size)&255, FFABS(s->avctx->pre_dia_size)&255);
|
||||
int ret;
|
||||
|
||||
if(FFMIN(s->avctx->dia_size, s->avctx->pre_dia_size) < -FFMIN(ME_MAP_SIZE, MAX_SAB_SIZE)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ME_MAP size is too small for SAB diamond\n");
|
||||
@ -324,10 +325,12 @@ int ff_init_me(MpegEncContext *s){
|
||||
av_log(s->avctx, AV_LOG_INFO, "ME_MAP size may be a little small for the selected diamond size\n");
|
||||
}
|
||||
|
||||
ff_set_cmp(&s->mecc, s->mecc.me_pre_cmp, c->avctx->me_pre_cmp);
|
||||
ff_set_cmp(&s->mecc, s->mecc.me_cmp, c->avctx->me_cmp);
|
||||
ff_set_cmp(&s->mecc, s->mecc.me_sub_cmp, c->avctx->me_sub_cmp);
|
||||
ff_set_cmp(&s->mecc, s->mecc.mb_cmp, c->avctx->mb_cmp);
|
||||
ret = ff_set_cmp(&s->mecc, s->mecc.me_pre_cmp, c->avctx->me_pre_cmp);
|
||||
ret |= ff_set_cmp(&s->mecc, s->mecc.me_cmp, c->avctx->me_cmp);
|
||||
ret |= ff_set_cmp(&s->mecc, s->mecc.me_sub_cmp, c->avctx->me_sub_cmp);
|
||||
ret |= ff_set_cmp(&s->mecc, s->mecc.mb_cmp, c->avctx->mb_cmp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
c->flags = get_flags(c, 0, c->avctx->me_cmp &FF_CMP_CHROMA);
|
||||
c->sub_flags= get_flags(c, 0, c->avctx->me_sub_cmp&FF_CMP_CHROMA);
|
||||
|
@ -2956,6 +2956,10 @@ static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
GetBitContext *gb = &m->gb;
|
||||
int ret;
|
||||
|
||||
// Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC)
|
||||
if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2 + 3*4 + 2*2 + 2*6))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ret = ff_get_buffer(avctx, frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -295,7 +295,7 @@ void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb
|
||||
int hsub = i ? s->chroma_x_shift : 0;
|
||||
int lowres = s->avctx->lowres;
|
||||
int step = 1 << lowres;
|
||||
dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
|
||||
dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub + lowres) - 1);
|
||||
for (int h = (16 >> (vsub + lowres)) - 1; h >= 0; h--){
|
||||
for (int w = (16 >> (hsub + lowres)) - 1, idx = 0; w >= 0; w--, idx += step)
|
||||
dest_pcm[i][w] = src[idx];
|
||||
@ -861,7 +861,7 @@ static inline int get_amv(Mpeg4DecContext *ctx, int n)
|
||||
for (y = 0; y < 16; y++) {
|
||||
int v;
|
||||
|
||||
v = mb_v + dy * y;
|
||||
v = mb_v + (unsigned)dy * y;
|
||||
// FIXME optimize
|
||||
for (x = 0; x < 16; x++) {
|
||||
sum += v >> shift;
|
||||
@ -1437,7 +1437,7 @@ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
|
||||
if (SHOW_UBITS(re, &s->gb, 1) == 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"1. marker bit missing in 3. esc\n");
|
||||
if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR))
|
||||
if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&s->gb) <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
SKIP_CACHE(re, &s->gb, 1);
|
||||
@ -1448,7 +1448,7 @@ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
|
||||
if (SHOW_UBITS(re, &s->gb, 1) == 0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"2. marker bit missing in 3. esc\n");
|
||||
if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR))
|
||||
if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&s->gb) <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
|
@ -637,6 +637,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||
const int v_edge_pos = s->v_edge_pos >> lowres;
|
||||
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
|
||||
linesize = s->current_picture.f->linesize[0] << field_based;
|
||||
uvlinesize = s->current_picture.f->linesize[1] << field_based;
|
||||
|
||||
@ -699,7 +700,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||
|
||||
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
|
||||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
||||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
|
||||
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
|
||||
linesize >> field_based, linesize >> field_based,
|
||||
17, 17 + field_based,
|
||||
@ -744,7 +745,6 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
||||
|
||||
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
|
||||
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
|
||||
uvsx = (uvsx << 2) >> lowres;
|
||||
uvsy = (uvsy << 2) >> lowres;
|
||||
if (hc) {
|
||||
|
@ -902,8 +902,10 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
|
||||
s->quant_precision = 5;
|
||||
|
||||
ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
|
||||
ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
|
||||
ret = ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
|
||||
ret |= ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
|
||||
if (ret < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
|
||||
ff_h263_encode_init(s);
|
||||
|
@ -86,6 +86,12 @@ static int noise_init(AVBSFContext *ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if (ctx->par_in->codec_id == AV_CODEC_ID_WRAPPED_AVFRAME &&
|
||||
strcmp(s->amount_str, "0")) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Wrapped AVFrame noising is unsupported\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
ret = av_expr_parse(&s->amount_pexpr, s->amount_str,
|
||||
var_names, NULL, NULL, NULL, NULL, 0, ctx);
|
||||
if (ret < 0) {
|
||||
|
@ -305,6 +305,15 @@ static int nvdec_hevc_frame_params(AVCodecContext *avctx,
|
||||
static int nvdec_hevc_decode_init(AVCodecContext *avctx) {
|
||||
NVDECContext *ctx = avctx->internal->hwaccel_priv_data;
|
||||
ctx->supports_444 = 1;
|
||||
|
||||
if (avctx->profile != FF_PROFILE_HEVC_MAIN &&
|
||||
avctx->profile != FF_PROFILE_HEVC_MAIN_10 &&
|
||||
avctx->profile != FF_PROFILE_HEVC_MAIN_STILL_PICTURE &&
|
||||
avctx->profile != FF_PROFILE_HEVC_REXT) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported HEVC profile: %d\n", avctx->profile);
|
||||
return AVERROR(ENOTSUP);
|
||||
}
|
||||
|
||||
return ff_nvdec_decode_init(avctx);
|
||||
}
|
||||
|
||||
|
@ -83,8 +83,9 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer
|
||||
};
|
||||
|
||||
for (i = 0; i < 64; ++i) {
|
||||
ppc->QuantMatrixIntra[i] = s->intra_matrix[i];
|
||||
ppc->QuantMatrixInter[i] = s->inter_matrix[i];
|
||||
int n = s->idsp.idct_permutation[i];
|
||||
ppc->QuantMatrixIntra[i] = s->intra_matrix[n];
|
||||
ppc->QuantMatrixInter[i] = s->inter_matrix[n];
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -88,8 +88,9 @@ static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer,
|
||||
};
|
||||
|
||||
for (i = 0; i < 64; ++i) {
|
||||
ppc->QuantMatrixIntra[i] = s->intra_matrix[i];
|
||||
ppc->QuantMatrixInter[i] = s->inter_matrix[i];
|
||||
int n = s->idsp.idct_permutation[i];
|
||||
ppc->QuantMatrixIntra[i] = s->intra_matrix[n];
|
||||
ppc->QuantMatrixInter[i] = s->inter_matrix[n];
|
||||
}
|
||||
|
||||
// We need to pass the full frame buffer and not just the slice
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "av1.h"
|
||||
#endif
|
||||
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/hwcontext_cuda.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/cuda_check.h"
|
||||
@ -44,9 +43,14 @@
|
||||
#define CHECK_CU(x) FF_CUDA_CHECK_DL(avctx, dl_fn->cuda_dl, x)
|
||||
|
||||
#define NVENC_CAP 0x30
|
||||
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
#define IS_CBR(rc) (rc == NV_ENC_PARAMS_RC_CBR || \
|
||||
rc == NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ || \
|
||||
rc == NV_ENC_PARAMS_RC_CBR_HQ)
|
||||
#else
|
||||
#define IS_CBR(rc) (rc == NV_ENC_PARAMS_RC_CBR)
|
||||
#endif
|
||||
|
||||
const enum AVPixelFormat ff_nvenc_pix_fmts[] = {
|
||||
AV_PIX_FMT_YUV420P,
|
||||
@ -163,25 +167,6 @@ static int nvenc_print_error(AVCodecContext *avctx, NVENCSTATUS err,
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct FrameData {
|
||||
int64_t pts;
|
||||
int64_t duration;
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
int64_t reordered_opaque;
|
||||
#endif
|
||||
|
||||
void *frame_opaque;
|
||||
AVBufferRef *frame_opaque_ref;
|
||||
} FrameData;
|
||||
|
||||
static void reorder_queue_flush(AVFifo *queue)
|
||||
{
|
||||
FrameData fd;
|
||||
|
||||
while (av_fifo_read(queue, &fd, 1) >= 0)
|
||||
av_buffer_unref(&fd.frame_opaque_ref);
|
||||
}
|
||||
|
||||
typedef struct GUIDTuple {
|
||||
const GUID guid;
|
||||
int flags;
|
||||
@ -457,7 +442,7 @@ static int nvenc_check_cap(AVCodecContext *avctx, NV_ENC_CAPS cap)
|
||||
static int nvenc_check_capabilities(AVCodecContext *avctx)
|
||||
{
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
int ret;
|
||||
int tmp, ret;
|
||||
|
||||
ret = nvenc_check_codec_support(avctx);
|
||||
if (ret < 0) {
|
||||
@ -538,16 +523,18 @@ static int nvenc_check_capabilities(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
#ifdef NVENC_HAVE_BFRAME_REF_MODE
|
||||
tmp = (ctx->b_ref_mode >= 0) ? ctx->b_ref_mode : NV_ENC_BFRAME_REF_MODE_DISABLED;
|
||||
ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE);
|
||||
if (ctx->b_ref_mode == NV_ENC_BFRAME_REF_MODE_EACH && ret != 1 && ret != 3) {
|
||||
if (tmp == NV_ENC_BFRAME_REF_MODE_EACH && ret != 1 && ret != 3) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Each B frame as reference is not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
} else if (ctx->b_ref_mode != NV_ENC_BFRAME_REF_MODE_DISABLED && ret == 0) {
|
||||
} else if (tmp != NV_ENC_BFRAME_REF_MODE_DISABLED && ret == 0) {
|
||||
av_log(avctx, AV_LOG_WARNING, "B frames as references are not supported\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
#else
|
||||
if (ctx->b_ref_mode != 0) {
|
||||
tmp = (ctx->b_ref_mode >= 0) ? ctx->b_ref_mode : 0;
|
||||
if (tmp > 0) {
|
||||
av_log(avctx, AV_LOG_WARNING, "B frames as references need SDK 8.1 at build time\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
@ -922,6 +909,7 @@ static void nvenc_override_rate_control(AVCodecContext *avctx)
|
||||
case NV_ENC_PARAMS_RC_CONSTQP:
|
||||
set_constqp(avctx);
|
||||
return;
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
case NV_ENC_PARAMS_RC_VBR_MINQP:
|
||||
if (avctx->qmin < 0) {
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
@ -932,12 +920,15 @@ static void nvenc_override_rate_control(AVCodecContext *avctx)
|
||||
}
|
||||
/* fall through */
|
||||
case NV_ENC_PARAMS_RC_VBR_HQ:
|
||||
#endif
|
||||
case NV_ENC_PARAMS_RC_VBR:
|
||||
set_vbr(avctx);
|
||||
break;
|
||||
case NV_ENC_PARAMS_RC_CBR:
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
case NV_ENC_PARAMS_RC_CBR_HQ:
|
||||
case NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ:
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
|
||||
@ -980,6 +971,10 @@ static av_cold int nvenc_recalc_surfaces(AVCodecContext *avctx)
|
||||
ctx->nb_surfaces = FFMAX(1, FFMIN(MAX_REGISTERED_FRAMES, ctx->nb_surfaces));
|
||||
ctx->async_depth = FFMIN(ctx->async_depth, ctx->nb_surfaces - 1);
|
||||
|
||||
// Output in the worst case will only start when the surface buffer is completely full.
|
||||
// Hence we need to keep at least the max amount of surfaces plus the max reorder delay around.
|
||||
ctx->frame_data_array_nb = ctx->nb_surfaces + ctx->encode_config.frameIntervalP - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1207,12 +1202,14 @@ static av_cold int nvenc_setup_h264_config(AVCodecContext *avctx)
|
||||
|
||||
h264->outputPictureTimingSEI = 1;
|
||||
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
if (cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ ||
|
||||
cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_CBR_HQ ||
|
||||
cc->rcParams.rateControlMode == NV_ENC_PARAMS_RC_VBR_HQ) {
|
||||
h264->adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE;
|
||||
h264->fmoMode = NV_ENC_H264_FMO_DISABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ctx->flags & NVENC_LOSSLESS) {
|
||||
h264->qpPrimeYZeroTransformBypassFlag = 1;
|
||||
@ -1768,8 +1765,12 @@ static av_cold int nvenc_setup_surfaces(AVCodecContext *avctx)
|
||||
if (!ctx->surfaces)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ctx->reorder_queue = av_fifo_alloc2(ctx->nb_surfaces, sizeof(FrameData), 0);
|
||||
if (!ctx->reorder_queue)
|
||||
ctx->frame_data_array = av_calloc(ctx->frame_data_array_nb, sizeof(*ctx->frame_data_array));
|
||||
if (!ctx->frame_data_array)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ctx->timestamp_list = av_fifo_alloc2(ctx->nb_surfaces, sizeof(int64_t), 0);
|
||||
if (!ctx->timestamp_list)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ctx->unused_surface_queue = av_fifo_alloc2(ctx->nb_surfaces, sizeof(NvencSurface*), 0);
|
||||
@ -1853,12 +1854,17 @@ av_cold int ff_nvenc_encode_close(AVCodecContext *avctx)
|
||||
p_nvenc->nvEncEncodePicture(ctx->nvencoder, ¶ms);
|
||||
}
|
||||
|
||||
reorder_queue_flush(ctx->reorder_queue);
|
||||
av_fifo_freep2(&ctx->reorder_queue);
|
||||
av_fifo_freep2(&ctx->timestamp_list);
|
||||
av_fifo_freep2(&ctx->output_surface_ready_queue);
|
||||
av_fifo_freep2(&ctx->output_surface_queue);
|
||||
av_fifo_freep2(&ctx->unused_surface_queue);
|
||||
|
||||
if (ctx->frame_data_array) {
|
||||
for (i = 0; i < ctx->nb_surfaces; i++)
|
||||
av_buffer_unref(&ctx->frame_data_array[i].frame_opaque_ref);
|
||||
av_freep(&ctx->frame_data_array);
|
||||
}
|
||||
|
||||
if (ctx->surfaces && (avctx->pix_fmt == AV_PIX_FMT_CUDA || avctx->pix_fmt == AV_PIX_FMT_D3D11)) {
|
||||
for (i = 0; i < ctx->nb_registered_frames; i++) {
|
||||
if (ctx->registered_frames[i].mapped)
|
||||
@ -2198,53 +2204,18 @@ static void nvenc_codec_specific_pic_params(AVCodecContext *avctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void reorder_queue_enqueue(AVFifo *queue, const AVCodecContext *avctx,
|
||||
const AVFrame *frame, AVBufferRef **opaque_ref)
|
||||
static inline void timestamp_queue_enqueue(AVFifo *queue, int64_t timestamp)
|
||||
{
|
||||
FrameData fd;
|
||||
|
||||
fd.pts = frame->pts;
|
||||
fd.duration = frame->duration;
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
fd.reordered_opaque = frame->reordered_opaque;
|
||||
FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#endif
|
||||
fd.frame_opaque = frame->opaque;
|
||||
fd.frame_opaque_ref = *opaque_ref;
|
||||
|
||||
*opaque_ref = NULL;
|
||||
|
||||
av_fifo_write(queue, &fd, 1);
|
||||
av_fifo_write(queue, ×tamp, 1);
|
||||
}
|
||||
|
||||
static int64_t reorder_queue_dequeue(AVFifo *queue, AVCodecContext *avctx,
|
||||
AVPacket *pkt)
|
||||
static inline int64_t timestamp_queue_dequeue(AVFifo *queue)
|
||||
{
|
||||
FrameData fd;
|
||||
|
||||
int64_t timestamp = AV_NOPTS_VALUE;
|
||||
// The following call might fail if the queue is empty.
|
||||
if (av_fifo_read(queue, &fd, 1) < 0)
|
||||
return AV_NOPTS_VALUE;
|
||||
av_fifo_read(queue, ×tamp, 1);
|
||||
|
||||
if (pkt) {
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
avctx->reordered_opaque = fd.reordered_opaque;
|
||||
FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#endif
|
||||
pkt->duration = fd.duration;
|
||||
|
||||
if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
|
||||
pkt->opaque = fd.frame_opaque;
|
||||
pkt->opaque_ref = fd.frame_opaque_ref;
|
||||
fd.frame_opaque_ref = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
av_buffer_unref(&fd.frame_opaque_ref);
|
||||
|
||||
return fd.pts;
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
static int nvenc_set_timestamp(AVCodecContext *avctx,
|
||||
@ -2252,14 +2223,12 @@ static int nvenc_set_timestamp(AVCodecContext *avctx,
|
||||
AVPacket *pkt)
|
||||
{
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
int64_t dts;
|
||||
|
||||
pkt->pts = params->outputTimeStamp;
|
||||
|
||||
dts = reorder_queue_dequeue(ctx->reorder_queue, avctx, pkt);
|
||||
|
||||
if (avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER) {
|
||||
pkt->dts = dts - FFMAX(ctx->encode_config.frameIntervalP - 1, 0) * FFMAX(avctx->ticks_per_frame, 1);
|
||||
pkt->dts = timestamp_queue_dequeue(ctx->timestamp_list) -
|
||||
FFMAX(ctx->encode_config.frameIntervalP - 1, 0) * FFMAX(avctx->ticks_per_frame, 1);
|
||||
} else {
|
||||
pkt->dts = pkt->pts;
|
||||
}
|
||||
@ -2267,6 +2236,65 @@ static int nvenc_set_timestamp(AVCodecContext *avctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvenc_store_frame_data(AVCodecContext *avctx, NV_ENC_PIC_PARAMS *pic_params, const AVFrame *frame)
|
||||
{
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
int res = 0;
|
||||
|
||||
int idx = ctx->frame_data_array_pos;
|
||||
NvencFrameData *frame_data = &ctx->frame_data_array[idx];
|
||||
|
||||
// in case the encoder got reconfigured, there might be leftovers
|
||||
av_buffer_unref(&frame_data->frame_opaque_ref);
|
||||
|
||||
if (frame && frame->opaque_ref && avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
|
||||
frame_data->frame_opaque_ref = av_buffer_ref(frame->opaque_ref);
|
||||
if (!frame_data->frame_opaque_ref)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
frame_data->duration = frame->duration;
|
||||
frame_data->frame_opaque = frame->opaque;
|
||||
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
frame_data->reordered_opaque = frame->reordered_opaque;
|
||||
FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#endif
|
||||
|
||||
ctx->frame_data_array_pos = (ctx->frame_data_array_pos + 1) % ctx->frame_data_array_nb;
|
||||
pic_params->inputDuration = idx;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int nvenc_retrieve_frame_data(AVCodecContext *avctx, NV_ENC_LOCK_BITSTREAM *lock_params, AVPacket *pkt)
|
||||
{
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
int res = 0;
|
||||
|
||||
int idx = lock_params->outputDuration;
|
||||
NvencFrameData *frame_data = &ctx->frame_data_array[idx];
|
||||
|
||||
pkt->duration = frame_data->duration;
|
||||
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
avctx->reordered_opaque = frame_data->reordered_opaque;
|
||||
FF_ENABLE_DEPRECATION_WARNINGS
|
||||
#endif
|
||||
|
||||
if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
|
||||
pkt->opaque = frame_data->frame_opaque;
|
||||
pkt->opaque_ref = frame_data->frame_opaque_ref;
|
||||
frame_data->frame_opaque_ref = NULL;
|
||||
}
|
||||
|
||||
av_buffer_unref(&frame_data->frame_opaque_ref);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSurface *tmpoutsurf)
|
||||
{
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
@ -2353,10 +2381,14 @@ static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSur
|
||||
if (res < 0)
|
||||
goto error2;
|
||||
|
||||
res = nvenc_retrieve_frame_data(avctx, &lock_params, pkt);
|
||||
if (res < 0)
|
||||
goto error2;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
reorder_queue_dequeue(ctx->reorder_queue, avctx, NULL);
|
||||
timestamp_queue_dequeue(ctx->timestamp_list);
|
||||
|
||||
error2:
|
||||
return res;
|
||||
@ -2586,8 +2618,6 @@ static int nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame)
|
||||
int sei_count = 0;
|
||||
int i;
|
||||
|
||||
AVBufferRef *opaque_ref = NULL;
|
||||
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
|
||||
NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
|
||||
@ -2650,22 +2680,18 @@ static int nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame)
|
||||
sei_count = res;
|
||||
}
|
||||
|
||||
res = nvenc_store_frame_data(avctx, &pic_params, frame);
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
nvenc_codec_specific_pic_params(avctx, &pic_params, ctx->sei_data, sei_count);
|
||||
} else {
|
||||
pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
|
||||
}
|
||||
|
||||
// make a reference for enqueing in the reorder queue here,
|
||||
// so that reorder_queue_enqueue() cannot fail
|
||||
if (frame && frame->opaque_ref && avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
|
||||
opaque_ref = av_buffer_ref(frame->opaque_ref);
|
||||
if (!opaque_ref)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
res = nvenc_push_context(avctx);
|
||||
if (res < 0)
|
||||
goto opaque_ref_fail;
|
||||
return res;
|
||||
|
||||
nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params);
|
||||
|
||||
@ -2674,17 +2700,17 @@ static int nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame)
|
||||
|
||||
res = nvenc_pop_context(avctx);
|
||||
if (res < 0)
|
||||
goto opaque_ref_fail;
|
||||
return res;
|
||||
|
||||
if (nv_status != NV_ENC_SUCCESS &&
|
||||
nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
|
||||
res = nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
|
||||
goto opaque_ref_fail;
|
||||
}
|
||||
nv_status != NV_ENC_ERR_NEED_MORE_INPUT)
|
||||
return nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
|
||||
|
||||
if (frame && frame->buf[0]) {
|
||||
av_fifo_write(ctx->output_surface_queue, &in_surf, 1);
|
||||
reorder_queue_enqueue(ctx->reorder_queue, avctx, frame, &opaque_ref);
|
||||
|
||||
if (avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER)
|
||||
timestamp_queue_enqueue(ctx->timestamp_list, frame->pts);
|
||||
}
|
||||
|
||||
/* all the pending buffers are now ready for output */
|
||||
@ -2694,10 +2720,6 @@ static int nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
opaque_ref_fail:
|
||||
av_buffer_unref(&opaque_ref);
|
||||
return res;
|
||||
}
|
||||
|
||||
int ff_nvenc_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
|
||||
@ -2756,5 +2778,5 @@ av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx)
|
||||
NvencContext *ctx = avctx->priv_data;
|
||||
|
||||
nvenc_send_frame(avctx, NULL);
|
||||
reorder_queue_flush(ctx->reorder_queue);
|
||||
av_fifo_reset2(ctx->timestamp_list);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ typedef void ID3D11Device;
|
||||
#include <ffnvcodec/nvEncodeAPI.h>
|
||||
|
||||
#include "compat/cuda/dynlink_loader.h"
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "hwconfig.h"
|
||||
@ -77,6 +78,11 @@ typedef void ID3D11Device;
|
||||
#define NVENC_HAVE_SINGLE_SLICE_INTRA_REFRESH
|
||||
#endif
|
||||
|
||||
// SDK 12.1 compile time feature checks
|
||||
#if NVENCAPI_CHECK_VERSION(12, 1)
|
||||
#define NVENC_NO_DEPRECATED_RC
|
||||
#endif
|
||||
|
||||
typedef struct NvencSurface
|
||||
{
|
||||
NV_ENC_INPUT_PTR input_surface;
|
||||
@ -90,6 +96,18 @@ typedef struct NvencSurface
|
||||
NV_ENC_BUFFER_FORMAT format;
|
||||
} NvencSurface;
|
||||
|
||||
typedef struct NvencFrameData
|
||||
{
|
||||
int64_t duration;
|
||||
|
||||
#if FF_API_REORDERED_OPAQUE
|
||||
int64_t reordered_opaque;
|
||||
#endif
|
||||
|
||||
void *frame_opaque;
|
||||
AVBufferRef *frame_opaque_ref;
|
||||
} NvencFrameData;
|
||||
|
||||
typedef struct NvencDynLoadFunctions
|
||||
{
|
||||
CudaFunctions *cuda_dl;
|
||||
@ -168,10 +186,14 @@ typedef struct NvencContext
|
||||
int nb_surfaces;
|
||||
NvencSurface *surfaces;
|
||||
|
||||
NvencFrameData *frame_data_array;
|
||||
int frame_data_array_nb;
|
||||
int frame_data_array_pos;
|
||||
|
||||
AVFifo *unused_surface_queue;
|
||||
AVFifo *output_surface_queue;
|
||||
AVFifo *output_surface_ready_queue;
|
||||
AVFifo *reorder_queue;
|
||||
AVFifo *timestamp_list;
|
||||
|
||||
NV_ENC_SEI_PAYLOAD *sei_data;
|
||||
int sei_data_size;
|
||||
|
@ -100,6 +100,7 @@ static const AVOption options[] = {
|
||||
{ "constqp", "Constant QP mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_CONSTQP }, 0, 0, VE, "rc" },
|
||||
{ "vbr", "Variable bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_VBR }, 0, 0, VE, "rc" },
|
||||
{ "cbr", "Constant bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_CBR }, 0, 0, VE, "rc" },
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
{ "vbr_minqp", "Variable bitrate mode with MinQP (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR_MINQP) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_quality", "Multi-pass optimized for image quality (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_2_PASS_QUALITY) }, 0, 0, VE, "rc" },
|
||||
@ -109,6 +110,17 @@ static const AVOption options[] = {
|
||||
{ "cbr_ld_hq", "Constant bitrate low delay high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_hq", "Constant bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR_HQ) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_hq", "Variable bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR_HQ) }, 0, 0, VE, "rc" },
|
||||
#else
|
||||
{ "vbr_minqp", "Variable bitrate mode with MinQP (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_quality", "Multi-pass optimized for image quality (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_size", "Multi-pass optimized for constant frame size (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_2pass", "Multi-pass variable bitrate mode (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_ld_hq", "Constant bitrate low delay high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_hq", "Constant bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_hq", "Variable bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
#endif
|
||||
{ "rc-lookahead", "Number of frames to look ahead for rate-control",
|
||||
OFFSET(rc_lookahead), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
|
||||
{ "surfaces", "Number of concurrent surfaces", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, MAX_REGISTERED_FRAMES, VE },
|
||||
|
@ -89,6 +89,7 @@ static const AVOption options[] = {
|
||||
{ "constqp", "Constant QP mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_CONSTQP }, 0, 0, VE, "rc" },
|
||||
{ "vbr", "Variable bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_VBR }, 0, 0, VE, "rc" },
|
||||
{ "cbr", "Constant bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = NV_ENC_PARAMS_RC_CBR }, 0, 0, VE, "rc" },
|
||||
#ifndef NVENC_NO_DEPRECATED_RC
|
||||
{ "vbr_minqp", "Variable bitrate mode with MinQP (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR_MINQP) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_quality", "Multi-pass optimized for image quality (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_2_PASS_QUALITY) }, 0, 0, VE, "rc" },
|
||||
@ -98,6 +99,17 @@ static const AVOption options[] = {
|
||||
{ "cbr_ld_hq", "Constant bitrate low delay high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_hq", "Constant bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR_HQ) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_hq", "Variable bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR_HQ) }, 0, 0, VE, "rc" },
|
||||
#else
|
||||
{ "vbr_minqp", "Variable bitrate mode with MinQP (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_quality", "Multi-pass optimized for image quality (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "ll_2pass_size", "Multi-pass optimized for constant frame size (deprecated)",
|
||||
0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_2pass", "Multi-pass variable bitrate mode (deprecated)", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_ld_hq", "Constant bitrate low delay high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "cbr_hq", "Constant bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_CBR) }, 0, 0, VE, "rc" },
|
||||
{ "vbr_hq", "Variable bitrate high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RCD(NV_ENC_PARAMS_RC_VBR) }, 0, 0, VE, "rc" },
|
||||
#endif
|
||||
{ "rc-lookahead", "Number of frames to look ahead for rate-control",
|
||||
OFFSET(rc_lookahead), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
|
||||
{ "surfaces", "Number of concurrent surfaces", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, MAX_REGISTERED_FRAMES, VE },
|
||||
|
@ -51,9 +51,9 @@ typedef struct On2AVCContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFloatDSPContext *fdsp;
|
||||
AVTXContext *mdct, *mdct_half, *mdct_small;
|
||||
AVTXContext *fft128, *fft256, *fft512, *fft1024;
|
||||
AVTXContext *fft64, *fft128, *fft256, *fft512;
|
||||
av_tx_fn mdct_fn, mdct_half_fn, mdct_small_fn;
|
||||
av_tx_fn fft128_fn, fft256_fn, fft512_fn, fft1024_fn;
|
||||
av_tx_fn fft64_fn, fft128_fn, fft256_fn, fft512_fn;
|
||||
void (*wtf)(struct On2AVCContext *ctx, float *out, float *in, int size);
|
||||
|
||||
int is_av500;
|
||||
@ -475,16 +475,16 @@ static void wtf_end_512(On2AVCContext *c, float *out, float *src,
|
||||
zero_head_and_tail(tmp1 + 256, 128, 13, 7);
|
||||
zero_head_and_tail(tmp1 + 384, 128, 15, 5);
|
||||
|
||||
c->fft128_fn(c->fft128, src + 0, tmp1 + 0, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 128, tmp1 + 128, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 256, tmp1 + 256, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 384, tmp1 + 384, sizeof(float));
|
||||
c->fft64_fn(c->fft64, src + 0, tmp1 + 0, sizeof(float));
|
||||
c->fft64_fn(c->fft64, src + 128, tmp1 + 128, sizeof(float));
|
||||
c->fft64_fn(c->fft64, src + 256, tmp1 + 256, sizeof(float));
|
||||
c->fft64_fn(c->fft64, src + 384, tmp1 + 384, sizeof(float));
|
||||
|
||||
combine_fft(src, src + 128, src + 256, src + 384, tmp1,
|
||||
ff_on2avc_ctab_1, ff_on2avc_ctab_2,
|
||||
ff_on2avc_ctab_3, ff_on2avc_ctab_4, 512, 2);
|
||||
|
||||
c->fft512_fn(c->fft512, src, tmp1, sizeof(float));
|
||||
c->fft256_fn(c->fft256, src, tmp1, sizeof(float));
|
||||
|
||||
pretwiddle(&tmp0[ 0], src, 512, 84, 4, 16, 4, ff_on2avc_tabs_20_84_1);
|
||||
pretwiddle(&tmp0[128], src, 512, 84, 4, 16, 4, ff_on2avc_tabs_20_84_2);
|
||||
@ -503,16 +503,16 @@ static void wtf_end_1024(On2AVCContext *c, float *out, float *src,
|
||||
zero_head_and_tail(tmp1 + 512, 256, 13, 7);
|
||||
zero_head_and_tail(tmp1 + 768, 256, 15, 5);
|
||||
|
||||
c->fft256_fn(c->fft256, src + 0, tmp1 + 0, sizeof(float));
|
||||
c->fft256_fn(c->fft256, src + 256, tmp1 + 256, sizeof(float));
|
||||
c->fft256_fn(c->fft256, src + 512, tmp1 + 512, sizeof(float));
|
||||
c->fft256_fn(c->fft256, src + 768, tmp1 + 768, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 0, tmp1 + 0, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 256, tmp1 + 256, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 512, tmp1 + 512, sizeof(float));
|
||||
c->fft128_fn(c->fft128, src + 768, tmp1 + 768, sizeof(float));
|
||||
|
||||
combine_fft(src, src + 256, src + 512, src + 768, tmp1,
|
||||
ff_on2avc_ctab_1, ff_on2avc_ctab_2,
|
||||
ff_on2avc_ctab_3, ff_on2avc_ctab_4, 1024, 1);
|
||||
|
||||
c->fft1024_fn(c->fft1024, src, tmp1, sizeof(float));
|
||||
c->fft512_fn(c->fft512, src, tmp1, sizeof(float));
|
||||
|
||||
pretwiddle(&tmp0[ 0], src, 1024, 84, 4, 16, 4, ff_on2avc_tabs_20_84_1);
|
||||
pretwiddle(&tmp0[256], src, 1024, 84, 4, 16, 4, ff_on2avc_tabs_20_84_2);
|
||||
@ -700,7 +700,7 @@ static int on2avc_reconstruct_channel_ext(On2AVCContext *c, AVFrame *dst, int of
|
||||
break;
|
||||
case WINDOW_TYPE_EXT5:
|
||||
c->wtf(c, buf, in, 512);
|
||||
c->mdct_half_fn(c->mdct, buf + 512, in + 512, sizeof(float));
|
||||
c->mdct_half_fn(c->mdct_half, buf + 512, in + 512, sizeof(float));
|
||||
for (i = 0; i < 256; i++) {
|
||||
FFSWAP(float, buf[i + 512], buf[1023 - i]);
|
||||
}
|
||||
@ -956,14 +956,14 @@ static av_cold int on2avc_decode_init(AVCodecContext *avctx)
|
||||
if ((ret = av_tx_init(&c->mdct_small, &c->mdct_small_fn, AV_TX_FLOAT_MDCT, 1, 128, &scale, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = av_tx_init(&c->fft1024, &c->fft1024_fn, AV_TX_FLOAT_FFT, 1, 1024, NULL, 0)) < 0)
|
||||
return ret;
|
||||
if ((ret = av_tx_init(&c->fft512, &c->fft512_fn, AV_TX_FLOAT_FFT, 1, 512, NULL, 0)) < 0)
|
||||
return ret;
|
||||
if ((ret = av_tx_init(&c->fft256, &c->fft256_fn, AV_TX_FLOAT_FFT, 0, 256, NULL, 0)) < 0)
|
||||
if ((ret = av_tx_init(&c->fft256, &c->fft256_fn, AV_TX_FLOAT_FFT, 1, 256, NULL, 0)) < 0)
|
||||
return ret;
|
||||
if ((ret = av_tx_init(&c->fft128, &c->fft128_fn, AV_TX_FLOAT_FFT, 0, 128, NULL, 0)) < 0)
|
||||
return ret;
|
||||
if ((ret = av_tx_init(&c->fft64, &c->fft64_fn, AV_TX_FLOAT_FFT, 0, 64, NULL, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
c->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
|
||||
if (!c->fdsp)
|
||||
@ -998,10 +998,10 @@ static av_cold int on2avc_decode_close(AVCodecContext *avctx)
|
||||
av_tx_uninit(&c->mdct);
|
||||
av_tx_uninit(&c->mdct_half);
|
||||
av_tx_uninit(&c->mdct_small);
|
||||
av_tx_uninit(&c->fft64);
|
||||
av_tx_uninit(&c->fft128);
|
||||
av_tx_uninit(&c->fft256);
|
||||
av_tx_uninit(&c->fft512);
|
||||
av_tx_uninit(&c->fft1024);
|
||||
|
||||
av_freep(&c->fdsp);
|
||||
|
||||
|
@ -578,7 +578,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
|
||||
.priv_data_size = sizeof(PCMDecode), \
|
||||
.init = pcm_decode_init, \
|
||||
FF_CODEC_DECODE_CB(pcm_decode_frame), \
|
||||
.p.capabilities = AV_CODEC_CAP_DR1, \
|
||||
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE, \
|
||||
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
|
||||
AV_SAMPLE_FMT_NONE }, \
|
||||
}
|
||||
|
@ -647,6 +647,8 @@ static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
int ret;
|
||||
size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
|
||||
|
||||
if (!p)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (!(s->hdr_state & PNG_IHDR)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -883,7 +885,7 @@ static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb, AVFrame *f)
|
||||
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb)
|
||||
{
|
||||
int ret, cnt = 0;
|
||||
AVBPrint bp;
|
||||
@ -1338,7 +1340,7 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
s->have_srgb = 1;
|
||||
break;
|
||||
case MKTAG('i', 'C', 'C', 'P'): {
|
||||
if ((ret = decode_iccp_chunk(s, &gb_chunk, p)) < 0)
|
||||
if ((ret = decode_iccp_chunk(s, &gb_chunk)) < 0)
|
||||
goto fail;
|
||||
break;
|
||||
}
|
||||
@ -1383,6 +1385,9 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
}
|
||||
exit_loop:
|
||||
|
||||
if (!p)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (avctx->codec_id == AV_CODEC_ID_PNG &&
|
||||
avctx->skip_frame == AVDISCARD_ALL) {
|
||||
return 0;
|
||||
@ -1643,7 +1648,7 @@ static int decode_frame_apng(AVCodecContext *avctx, AVFrame *p,
|
||||
if ((ret = inflateReset(&s->zstream.zstream)) != Z_OK)
|
||||
return AVERROR_EXTERNAL;
|
||||
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
|
||||
if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
|
||||
if ((ret = decode_frame_common(avctx, s, NULL, avpkt)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ static int chctx_init(RKAContext *s, ChContext *c,
|
||||
c->bprob[0] = s->bprob[0];
|
||||
c->bprob[1] = s->bprob[1];
|
||||
|
||||
c->srate_pad = (sample_rate << 13) / 44100 & 0xFFFFFFFCU;
|
||||
c->srate_pad = ((int64_t)sample_rate << 13) / 44100 & 0xFFFFFFFCU;
|
||||
c->pos_idx = 1;
|
||||
|
||||
for (int i = 0; i < FF_ARRAY_ELEMS(s->bprob[0]); i++)
|
||||
@ -732,25 +732,25 @@ static int decode_filter(RKAContext *s, ChContext *ctx, ACoder *ac, int off, uns
|
||||
if (bits == 0) {
|
||||
ctx->buf1[off] = sum + val;
|
||||
} else {
|
||||
ctx->buf1[off] = (val + (sum >> bits)) * (1 << bits) +
|
||||
ctx->buf1[off] = (val + (sum >> bits)) * (1U << bits) +
|
||||
(((1U << bits) - 1U) & ctx->buf1[off + -1]);
|
||||
}
|
||||
ctx->buf0[off] = ctx->buf1[off] + ctx->buf0[off + -1];
|
||||
ctx->buf0[off] = ctx->buf1[off] + (unsigned)ctx->buf0[off + -1];
|
||||
} else {
|
||||
val *= 1 << ctx->cmode;
|
||||
sum += ctx->buf0[off + -1] + val;
|
||||
val *= 1U << ctx->cmode;
|
||||
sum += ctx->buf0[off + -1] + (unsigned)val;
|
||||
switch (s->bps) {
|
||||
case 16: sum = av_clip_int16(sum); break;
|
||||
case 8: sum = av_clip_int8(sum); break;
|
||||
}
|
||||
ctx->buf1[off] = sum - ctx->buf0[off + -1];
|
||||
ctx->buf0[off] = sum;
|
||||
m += FFABS(ctx->buf1[off]);
|
||||
m += (unsigned)FFABS(ctx->buf1[off]);
|
||||
}
|
||||
}
|
||||
if (ctx->cmode2 != 0) {
|
||||
int sum = 0;
|
||||
for (int i = (m << 6) / split; i > 0; i = i >> 1)
|
||||
for (int i = (signed)((unsigned)m << 6) / split; i > 0; i = i >> 1)
|
||||
sum++;
|
||||
sum = sum - (ctx->cmode2 + 7);
|
||||
ctx->cmode = FFMAX(sum, tab[ctx->cmode2]);
|
||||
|
@ -129,8 +129,10 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ff_set_cmp(&s->mecc, s->mecc.me_cmp, s->avctx->me_cmp);
|
||||
ff_set_cmp(&s->mecc, s->mecc.me_sub_cmp, s->avctx->me_sub_cmp);
|
||||
ret = ff_set_cmp(&s->mecc, s->mecc.me_cmp, s->avctx->me_cmp);
|
||||
ret |= ff_set_cmp(&s->mecc, s->mecc.me_sub_cmp, s->avctx->me_sub_cmp);
|
||||
if (ret < 0)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
s->input_picture = av_frame_alloc();
|
||||
if (!s->input_picture)
|
||||
@ -1551,10 +1553,10 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){
|
||||
int level, orientation, x, y;
|
||||
|
||||
for(level=0; level<s->spatial_decomposition_count; level++){
|
||||
int64_t error=0;
|
||||
for(orientation=level ? 1 : 0; orientation<4; orientation++){
|
||||
SubBand *b= &p->band[level][orientation];
|
||||
IDWTELEM *ibuf= b->ibuf;
|
||||
int64_t error=0;
|
||||
|
||||
memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
|
||||
ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
|
||||
@ -1565,9 +1567,13 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){
|
||||
error += d*d;
|
||||
}
|
||||
}
|
||||
|
||||
if (orientation == 2)
|
||||
error /= 2;
|
||||
b->qlog= (int)(QROOT * log2(352256.0/sqrt(error)) + 0.5);
|
||||
if (orientation != 1)
|
||||
error = 0;
|
||||
}
|
||||
p->band[level][1].qlog = p->band[level][2].qlog;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ static void predictor_init_state(int *k, int *state, int order)
|
||||
|
||||
static int predictor_calc_error(int *k, int *state, int order, int error)
|
||||
{
|
||||
int i, x = error - shift_down(k[order-1] * (unsigned)state[order-1], LATTICE_SHIFT);
|
||||
int i, x = error - (unsigned)shift_down(k[order-1] * (unsigned)state[order-1], LATTICE_SHIFT);
|
||||
|
||||
#if 1
|
||||
int *k_ptr = &(k[order-2]),
|
||||
@ -1013,7 +1013,7 @@ static int sonic_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if (s->lossless)
|
||||
quant = 1;
|
||||
else
|
||||
quant = get_symbol(&c, state, 0) * SAMPLE_FACTOR;
|
||||
quant = get_symbol(&c, state, 0) * (unsigned)SAMPLE_FACTOR;
|
||||
|
||||
// av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant);
|
||||
|
||||
|
@ -169,6 +169,9 @@ int ff_tak_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
|
||||
if (ti->flags & TAK_FRAME_FLAG_HAS_METADATA)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (get_bits_left(gb) < 24)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
skip_bits(gb, 24);
|
||||
|
||||
return 0;
|
||||
|
@ -28,8 +28,8 @@ static void decorrelate_ls(int32_t *p1, int32_t *p2, int length)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
int32_t a = p1[i];
|
||||
int32_t b = p2[i];
|
||||
uint32_t a = p1[i];
|
||||
uint32_t b = p2[i];
|
||||
p2[i] = a + b;
|
||||
}
|
||||
}
|
||||
@ -39,8 +39,8 @@ static void decorrelate_sr(int32_t *p1, int32_t *p2, int length)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
int32_t a = p1[i];
|
||||
int32_t b = p2[i];
|
||||
uint32_t a = p1[i];
|
||||
uint32_t b = p2[i];
|
||||
p1[i] = b - a;
|
||||
}
|
||||
}
|
||||
@ -50,7 +50,7 @@ static void decorrelate_sm(int32_t *p1, int32_t *p2, int length)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
int32_t a = p1[i];
|
||||
uint32_t a = p1[i];
|
||||
int32_t b = p2[i];
|
||||
a -= b >> 1;
|
||||
p1[i] = a;
|
||||
@ -63,7 +63,7 @@ static void decorrelate_sf(int32_t *p1, int32_t *p2, int length, int dshift, int
|
||||
int i;
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
int32_t a = p1[i];
|
||||
uint32_t a = p1[i];
|
||||
int32_t b = p2[i];
|
||||
b = (unsigned)((int)(dfactor * (unsigned)(b >> dshift) + 128) >> 8) << dshift;
|
||||
p1[i] = b - a;
|
||||
|
@ -31,11 +31,13 @@ int main(void){
|
||||
#define width 256
|
||||
#define height 256
|
||||
int buffer[2][width*height];
|
||||
short obuffer[width*height];
|
||||
SnowContext s;
|
||||
int i;
|
||||
AVLFG prng;
|
||||
s.spatial_decomposition_count=6;
|
||||
s.spatial_decomposition_type=1;
|
||||
int ret = 0;
|
||||
|
||||
s.temp_dwt_buffer = av_calloc(width, sizeof(*s.temp_dwt_buffer));
|
||||
s.temp_idwt_buffer = av_calloc(width, sizeof(*s.temp_idwt_buffer));
|
||||
@ -49,24 +51,34 @@ int main(void){
|
||||
|
||||
printf("testing 5/3 DWT\n");
|
||||
for(i=0; i<width*height; i++)
|
||||
buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
|
||||
buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 19000 - 9000;
|
||||
|
||||
ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
for(i=0; i<width*height; i++)
|
||||
obuffer[i] = buffer[0][i];
|
||||
ff_spatial_idwt(obuffer, s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
|
||||
for(i=0; i<width*height; i++)
|
||||
if(buffer[0][i]!= buffer[1][i]) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
|
||||
if(buffer[1][i]!= obuffer[i]) {
|
||||
printf("fsck: %4dx%4dx %12d %7d\n",i%width, i/width, buffer[1][i], obuffer[i]);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
printf("testing 9/7 DWT\n");
|
||||
s.spatial_decomposition_type=0;
|
||||
for(i=0; i<width*height; i++)
|
||||
buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
|
||||
buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 11000 - 5000;
|
||||
|
||||
ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
for(i=0; i<width*height; i++)
|
||||
obuffer[i] = buffer[0][i];
|
||||
ff_spatial_idwt(obuffer, s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
|
||||
for(i=0; i<width*height; i++)
|
||||
if(FFABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
|
||||
if(FFABS(buffer[1][i] - obuffer[i])>20) {
|
||||
printf("fsck: %4dx%4d %12d %7d\n",i%width, i/width, buffer[1][i], obuffer[i]);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
{
|
||||
int level, orientation, x, y;
|
||||
@ -81,18 +93,18 @@ int main(void){
|
||||
int w= width >> (s.spatial_decomposition_count-level);
|
||||
int h= height >> (s.spatial_decomposition_count-level);
|
||||
int stride= width << (s.spatial_decomposition_count-level);
|
||||
DWTELEM *buf= buffer[0];
|
||||
IDWTELEM *buf= obuffer;
|
||||
int64_t error=0;
|
||||
|
||||
if(orientation&1) buf+=w;
|
||||
if(orientation>1) buf+=stride>>1;
|
||||
|
||||
memset(buffer[0], 0, sizeof(int)*width*height);
|
||||
buf[w/2 + h/2*stride]= 256*256;
|
||||
ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
memset(obuffer, 0, sizeof(short)*width*height);
|
||||
buf[w/2 + h/2*stride]= 8*256;
|
||||
ff_spatial_idwt(obuffer, s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
|
||||
for(y=0; y<height; y++){
|
||||
for(x=0; x<width; x++){
|
||||
int64_t d= buffer[0][x + y*width];
|
||||
int64_t d= obuffer[x + y*width];
|
||||
error += d*d;
|
||||
if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9 && level==2) printf("%8"PRId64" ", d);
|
||||
}
|
||||
@ -132,5 +144,5 @@ int main(void){
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
@ -1451,7 +1451,7 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
break;
|
||||
case TIFF_GRAY_RESPONSE_CURVE:
|
||||
case DNG_LINEARIZATION_TABLE:
|
||||
if (count > FF_ARRAY_ELEMS(s->dng_lut))
|
||||
if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (int i = 0; i < count; i++)
|
||||
s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
|
||||
|
@ -342,7 +342,7 @@ static int tta_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
if (s->channels > 1) {
|
||||
int32_t *r = p - 1;
|
||||
for (*p += *r / 2; r > (int32_t*)p - s->channels; r--)
|
||||
*r = *(r + 1) - *r;
|
||||
*r = *(r + 1) - (unsigned)*r;
|
||||
}
|
||||
cur_chan = 0;
|
||||
i++;
|
||||
|
@ -317,7 +317,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
}
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_IFF_ILBM) {
|
||||
w_align = FFMAX(w_align, 8);
|
||||
w_align = FFMAX(w_align, 16);
|
||||
}
|
||||
|
||||
*width = FFALIGN(*width, w_align);
|
||||
@ -641,9 +641,9 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
|
||||
if (sr > 0) {
|
||||
/* calc from sample rate */
|
||||
if (id == AV_CODEC_ID_TTA)
|
||||
return 256 * sr / 245;
|
||||
return 256ll * sr / 245;
|
||||
else if (id == AV_CODEC_ID_DST)
|
||||
return 588 * sr / 44100;
|
||||
return 588ll * sr / 44100;
|
||||
else if (id == AV_CODEC_ID_BINKAUDIO_DCT) {
|
||||
if (sr / 22050 > 22)
|
||||
return 0;
|
||||
|
@ -75,8 +75,9 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
|
||||
info->f_code[1][0] = s->mpeg_f_code[1][0];
|
||||
info->f_code[1][1] = s->mpeg_f_code[1][1];
|
||||
for (i = 0; i < 64; ++i) {
|
||||
info->intra_quantizer_matrix[i] = s->intra_matrix[i];
|
||||
info->non_intra_quantizer_matrix[i] = s->inter_matrix[i];
|
||||
int n = s->idsp.idct_permutation[i];
|
||||
info->intra_quantizer_matrix[i] = s->intra_matrix[n];
|
||||
info->non_intra_quantizer_matrix[i] = s->inter_matrix[n];
|
||||
}
|
||||
|
||||
return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
|
||||
|
@ -74,8 +74,9 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
info->alternate_vertical_scan_flag = s->alternate_scan;
|
||||
info->top_field_first = s->top_field_first;
|
||||
for (i = 0; i < 64; ++i) {
|
||||
info->intra_quantizer_matrix[i] = s->intra_matrix[i];
|
||||
info->non_intra_quantizer_matrix[i] = s->inter_matrix[i];
|
||||
int n = s->idsp.idct_permutation[i];
|
||||
info->intra_quantizer_matrix[i] = s->intra_matrix[n];
|
||||
info->non_intra_quantizer_matrix[i] = s->inter_matrix[n];
|
||||
}
|
||||
|
||||
ff_vdpau_common_start_frame(pic_ctx, buffer, size);
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "codec_internal.h"
|
||||
#include "decode.h"
|
||||
#include "get_bits.h"
|
||||
#include "internal.h"
|
||||
#include "vorbis.h"
|
||||
#include "vorbisdsp.h"
|
||||
#include "vorbis_data.h"
|
||||
@ -134,7 +135,6 @@ typedef struct vorbis_context_s {
|
||||
av_tx_fn mdct_fn[2];
|
||||
|
||||
uint8_t first_frame;
|
||||
int64_t initial_pts;
|
||||
uint32_t version;
|
||||
uint8_t audio_channels;
|
||||
uint32_t audio_samplerate;
|
||||
@ -368,6 +368,10 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc)
|
||||
unsigned codebook_value_bits = get_bits(gb, 4) + 1;
|
||||
unsigned codebook_sequence_p = get_bits1(gb);
|
||||
|
||||
if (!isfinite(codebook_minimum_value) || !isfinite(codebook_delta_value)) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto error;
|
||||
}
|
||||
ff_dlog(NULL, " We expect %d numbers for building the codevectors. \n",
|
||||
codebook_lookup_values);
|
||||
ff_dlog(NULL, " delta %f minmum %f \n",
|
||||
@ -1839,13 +1843,7 @@ static int vorbis_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
|
||||
if (!vc->first_frame) {
|
||||
vc->first_frame = 1;
|
||||
vc->initial_pts = frame->pts;
|
||||
}
|
||||
|
||||
if (frame->pts == vc->initial_pts) {
|
||||
*got_frame_ptr = 0;
|
||||
av_frame_unref(frame);
|
||||
return buf_size;
|
||||
avctx->internal->skip_samples = len;
|
||||
}
|
||||
|
||||
ff_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
|
||||
@ -1877,6 +1875,7 @@ static av_cold void vorbis_decode_flush(AVCodecContext *avctx)
|
||||
sizeof(*vc->saved));
|
||||
}
|
||||
vc->previous_window = -1;
|
||||
vc->first_frame = 0;
|
||||
}
|
||||
|
||||
const FFCodec ff_vorbis_decoder = {
|
||||
|
@ -2353,6 +2353,8 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
|
||||
s->avctx = avctx;
|
||||
s->width = FFALIGN(avctx->coded_width, 16);
|
||||
s->height = FFALIGN(avctx->coded_height, 16);
|
||||
if (s->width < 18)
|
||||
return AVERROR_PATCHWELCOME;
|
||||
if (avctx->codec_id != AV_CODEC_ID_THEORA)
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
|
||||
@ -2919,7 +2921,9 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
||||
/* sanity check */
|
||||
if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
|
||||
visible_width + offset_x > s->width ||
|
||||
visible_height + offset_y > s->height) {
|
||||
visible_height + offset_y > s->height ||
|
||||
visible_width < 18
|
||||
) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
|
||||
visible_width, visible_height, offset_x, offset_y,
|
||||
@ -2965,6 +2969,8 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
||||
} else
|
||||
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (s->width < 18)
|
||||
return AVERROR_PATCHWELCOME;
|
||||
ret = ff_set_dimensions(avctx, s->width, s->height);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -98,7 +98,7 @@ static av_cold int wavarc_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
s->max_framesize = s->nb_samples * 16;
|
||||
s->bitstream = av_calloc(s->max_framesize, sizeof(*s->bitstream));
|
||||
s->bitstream = av_calloc(s->max_framesize + AV_INPUT_BUFFER_PADDING_SIZE, sizeof(*s->bitstream));
|
||||
if (!s->bitstream)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -141,11 +141,11 @@ static void do_stereo(WavArcContext *s, int ch, int correlated, int len)
|
||||
} else {
|
||||
if (correlated) {
|
||||
for (int n = 0; n < nb_samples; n++)
|
||||
s->samples[1][n + len] += s->samples[0][n + len];
|
||||
s->samples[1][n + len] += (unsigned)s->samples[0][n + len];
|
||||
}
|
||||
for (int n = 0; n < len; n++) {
|
||||
s->pred[0][n] = s->samples[1][nb_samples + n];
|
||||
s->pred[1][n] = s->pred[0][n] - s->samples[0][nb_samples + n];
|
||||
s->pred[1][n] = s->pred[0][n] - (unsigned)s->samples[0][nb_samples + n];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -192,7 +192,7 @@ static int decode_1dif(AVCodecContext *avctx,
|
||||
if (block_type < 4 && block_type >= 0) {
|
||||
k = 1 + (avctx->sample_fmt == AV_SAMPLE_FMT_S16P);
|
||||
k = get_urice(gb, k) + 1;
|
||||
if (k > 32)
|
||||
if (k >= 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@ -205,6 +205,10 @@ static int decode_1dif(AVCodecContext *avctx,
|
||||
continue;
|
||||
case 6:
|
||||
s->shift = get_urice(gb, 2);
|
||||
if ((unsigned)s->shift > 31) {
|
||||
s->shift = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
case 5:
|
||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_U8P) {
|
||||
@ -284,7 +288,7 @@ static int decode_2slp(AVCodecContext *avctx,
|
||||
if (block_type < 5 && block_type >= 0) {
|
||||
k = 1 + (avctx->sample_fmt == AV_SAMPLE_FMT_S16P);
|
||||
k = get_urice(gb, k) + 1;
|
||||
if (k > 32)
|
||||
if (k >= 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@ -294,13 +298,17 @@ static int decode_2slp(AVCodecContext *avctx,
|
||||
return AVERROR_EOF;
|
||||
case 8:
|
||||
s->nb_samples = get_urice(gb, 8);
|
||||
if (s->nb_samples > 570) {
|
||||
if (s->nb_samples > 570U) {
|
||||
s->nb_samples = 570;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
case 7:
|
||||
s->shift = get_urice(gb, 2);
|
||||
if ((unsigned)s->shift > 31) {
|
||||
s->shift = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
continue;
|
||||
case 6:
|
||||
if (avctx->sample_fmt == AV_SAMPLE_FMT_U8P) {
|
||||
@ -343,13 +351,15 @@ static int decode_2slp(AVCodecContext *avctx,
|
||||
break;
|
||||
case 0:
|
||||
order = get_urice(gb, 2);
|
||||
if ((unsigned)order >= FF_ARRAY_ELEMS(s->filter[ch]))
|
||||
return AVERROR_INVALIDDATA;
|
||||
for (int o = 0; o < order; o++)
|
||||
s->filter[ch][o] = get_srice(gb, 2);
|
||||
for (int n = 0; n < s->nb_samples; n++) {
|
||||
int sum = 15;
|
||||
|
||||
for (int o = 0; o < order; o++)
|
||||
sum += s->filter[ch][o] * samples[n + 70 - o - 1];
|
||||
sum += s->filter[ch][o] * (unsigned)samples[n + 70 - o - 1];
|
||||
|
||||
samples[n + 70] = get_srice(gb, k) + (sum >> 4);
|
||||
}
|
||||
@ -452,7 +462,7 @@ fail:
|
||||
const int *src = s->samples[ch] + s->offset;
|
||||
|
||||
for (int n = 0; n < frame->nb_samples; n++)
|
||||
dst[n] = src[n] * (1 << s->shift) + 0x80U;
|
||||
dst[n] = src[n] * (1U << s->shift) + 0x80U;
|
||||
}
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S16P:
|
||||
@ -461,7 +471,7 @@ fail:
|
||||
const int *src = s->samples[ch] + s->offset;
|
||||
|
||||
for (int n = 0; n < frame->nb_samples; n++)
|
||||
dst[n] = src[n] * (1 << s->shift);
|
||||
dst[n] = src[n] * (1U << s->shift);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -35,12 +35,20 @@
|
||||
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
|
||||
{
|
||||
int rt, dummy;
|
||||
if (__builtin_constant_p(shift))
|
||||
__asm__ (
|
||||
"imull %3 \n\t"
|
||||
"shrdl %4, %%edx, %%eax \n\t"
|
||||
:"=a"(rt), "=d"(dummy)
|
||||
:"a"(a), "rm"(b), "ci"((uint8_t)shift)
|
||||
:"a"(a), "rm"(b), "i"(shift & 0x1F)
|
||||
);
|
||||
else
|
||||
__asm__ (
|
||||
"imull %3 \n\t"
|
||||
"shrdl %4, %%edx, %%eax \n\t"
|
||||
:"=a"(rt), "=d"(dummy)
|
||||
:"a"(a), "rm"(b), "c"((uint8_t)shift)
|
||||
);
|
||||
return rt;
|
||||
}
|
||||
|
||||
@ -113,19 +121,31 @@ __asm__ volatile(\
|
||||
// avoid +32 for shift optimization (gcc should do that ...)
|
||||
#define NEG_SSR32 NEG_SSR32
|
||||
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
|
||||
if (__builtin_constant_p(s))
|
||||
__asm__ ("sarl %1, %0\n\t"
|
||||
: "+r" (a)
|
||||
: "ic" ((uint8_t)(-s))
|
||||
: "i" (-s & 0x1F)
|
||||
);
|
||||
else
|
||||
__asm__ ("sarl %1, %0\n\t"
|
||||
: "+r" (a)
|
||||
: "c" ((uint8_t)(-s))
|
||||
);
|
||||
return a;
|
||||
}
|
||||
|
||||
#define NEG_USR32 NEG_USR32
|
||||
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
|
||||
if (__builtin_constant_p(s))
|
||||
__asm__ ("shrl %1, %0\n\t"
|
||||
: "+r" (a)
|
||||
: "ic" ((uint8_t)(-s))
|
||||
: "i" (-s & 0x1F)
|
||||
);
|
||||
else
|
||||
__asm__ ("shrl %1, %0\n\t"
|
||||
: "+r" (a)
|
||||
: "c" ((uint8_t)(-s))
|
||||
);
|
||||
return a;
|
||||
}
|
||||
|
||||
|
@ -56,37 +56,37 @@ static const int TAB35[] = { 26722, 25172, 22654, 19266, 15137, 10426, 5315 };
|
||||
|
||||
static int idct_row(short *in, const int *const tab, int rnd)
|
||||
{
|
||||
const int c1 = tab[0];
|
||||
const int c2 = tab[1];
|
||||
const int c3 = tab[2];
|
||||
const int c4 = tab[3];
|
||||
const int c5 = tab[4];
|
||||
const int c6 = tab[5];
|
||||
const int c7 = tab[6];
|
||||
const unsigned c1 = tab[0];
|
||||
const unsigned c2 = tab[1];
|
||||
const unsigned c3 = tab[2];
|
||||
const unsigned c4 = tab[3];
|
||||
const unsigned c5 = tab[4];
|
||||
const unsigned c6 = tab[5];
|
||||
const unsigned c7 = tab[6];
|
||||
|
||||
const int right = in[5] | in[6] | in[7];
|
||||
const int left = in[1] | in[2] | in[3];
|
||||
if (!(right | in[4])) {
|
||||
const int k = c4 * in[0] + rnd;
|
||||
if (left) {
|
||||
const int a0 = k + c2 * in[2];
|
||||
const int a1 = k + c6 * in[2];
|
||||
const int a2 = k - c6 * in[2];
|
||||
const int a3 = k - c2 * in[2];
|
||||
const unsigned a0 = k + c2 * in[2];
|
||||
const unsigned a1 = k + c6 * in[2];
|
||||
const unsigned a2 = k - c6 * in[2];
|
||||
const unsigned a3 = k - c2 * in[2];
|
||||
|
||||
const int b0 = c1 * in[1] + c3 * in[3];
|
||||
const int b1 = c3 * in[1] - c7 * in[3];
|
||||
const int b2 = c5 * in[1] - c1 * in[3];
|
||||
const int b3 = c7 * in[1] - c5 * in[3];
|
||||
|
||||
in[0] = (a0 + b0) >> ROW_SHIFT;
|
||||
in[1] = (a1 + b1) >> ROW_SHIFT;
|
||||
in[2] = (a2 + b2) >> ROW_SHIFT;
|
||||
in[3] = (a3 + b3) >> ROW_SHIFT;
|
||||
in[4] = (a3 - b3) >> ROW_SHIFT;
|
||||
in[5] = (a2 - b2) >> ROW_SHIFT;
|
||||
in[6] = (a1 - b1) >> ROW_SHIFT;
|
||||
in[7] = (a0 - b0) >> ROW_SHIFT;
|
||||
in[0] = (int)(a0 + b0) >> ROW_SHIFT;
|
||||
in[1] = (int)(a1 + b1) >> ROW_SHIFT;
|
||||
in[2] = (int)(a2 + b2) >> ROW_SHIFT;
|
||||
in[3] = (int)(a3 + b3) >> ROW_SHIFT;
|
||||
in[4] = (int)(a3 - b3) >> ROW_SHIFT;
|
||||
in[5] = (int)(a2 - b2) >> ROW_SHIFT;
|
||||
in[6] = (int)(a1 - b1) >> ROW_SHIFT;
|
||||
in[7] = (int)(a0 - b0) >> ROW_SHIFT;
|
||||
} else {
|
||||
const int a0 = k >> ROW_SHIFT;
|
||||
if (a0) {
|
||||
@ -102,8 +102,8 @@ static int idct_row(short *in, const int *const tab, int rnd)
|
||||
return 0;
|
||||
}
|
||||
} else if (!(left | right)) {
|
||||
const int a0 = (rnd + c4 * (in[0] + in[4])) >> ROW_SHIFT;
|
||||
const int a1 = (rnd + c4 * (in[0] - in[4])) >> ROW_SHIFT;
|
||||
const int a0 = (int)(rnd + c4 * (in[0] + in[4])) >> ROW_SHIFT;
|
||||
const int a1 = (int)(rnd + c4 * (in[0] - in[4])) >> ROW_SHIFT;
|
||||
|
||||
in[0] = a0;
|
||||
in[3] = a0;
|
||||
@ -114,7 +114,7 @@ static int idct_row(short *in, const int *const tab, int rnd)
|
||||
in[5] = a1;
|
||||
in[6] = a1;
|
||||
} else {
|
||||
const int k = c4 * in[0] + rnd;
|
||||
const unsigned int k = c4 * in[0] + rnd;
|
||||
const unsigned int a0 = k + c2 * in[2] + c4 * in[4] + c6 * in[6];
|
||||
const unsigned int a1 = k + c6 * in[2] - c4 * in[4] - c2 * in[6];
|
||||
const unsigned int a2 = k - c6 * in[2] - c4 * in[4] + c2 * in[6];
|
||||
|
@ -291,7 +291,7 @@ static int asink_query_formats(AVFilterContext *ctx)
|
||||
cleanup_redundant_layouts(ctx);
|
||||
for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++)
|
||||
if ((ret = av_channel_layout_from_mask(&layout, buf->channel_layouts[i])) < 0 ||
|
||||
(ret = ff_add_channel_layout(&layouts, &layout) < 0))
|
||||
(ret = ff_add_channel_layout(&layouts, &layout)) < 0)
|
||||
return ret;
|
||||
for (i = 0; i < NB_ITEMS(buf->channel_counts); i++) {
|
||||
layout = FF_COUNT2LAYOUT(buf->channel_counts[i]);
|
||||
|
@ -288,7 +288,7 @@ int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe,
|
||||
if (need_copy) {
|
||||
if (!(frame = av_frame_clone(frame)))
|
||||
return AVERROR(ENOMEM);
|
||||
if ((ret = ff_inlink_make_frame_writable(fs->parent->inputs[in], &frame) < 0)) {
|
||||
if ((ret = ff_inlink_make_frame_writable(fs->parent->inputs[in], &frame)) < 0) {
|
||||
av_frame_free(&frame);
|
||||
return ret;
|
||||
}
|
||||
|
@ -532,8 +532,7 @@ int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
|
||||
for (size_t j = 0; j < ch->nb_filters; j++) {
|
||||
AVFilterParams *p = ch->filters[j];
|
||||
const AVFilter *f = avfilter_get_by_name(p->filter_name);
|
||||
char inst_name[30], *name = p->instance_name ? p->instance_name :
|
||||
inst_name;
|
||||
char name[64];
|
||||
|
||||
// skip already processed filters
|
||||
if (p->filter || !p->filter_name)
|
||||
@ -546,7 +545,9 @@ int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
|
||||
}
|
||||
|
||||
if (!p->instance_name)
|
||||
snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%zu", f->name, idx);
|
||||
snprintf(name, sizeof(name), "Parsed_%s_%zu", f->name, idx);
|
||||
else
|
||||
snprintf(name, sizeof(name), "%s@%s", f->name, p->instance_name);
|
||||
|
||||
p->filter = avfilter_graph_alloc_filter(seg->graph, f, name);
|
||||
if (!p->filter)
|
||||
|
@ -134,8 +134,8 @@ static int activate(AVFilterContext *ctx)
|
||||
if (!(s->desc->flags & AV_PIX_FMT_FLAG_PAL)) {
|
||||
for (i = 1; i < 3; i ++) {
|
||||
if (out->data[i]) {
|
||||
out->data[i] += (y >> s->desc->log2_chroma_w) * out->linesize[i];
|
||||
out->data[i] += (x >> s->desc->log2_chroma_h) * s->max_step[i];
|
||||
out->data[i] += (y >> s->desc->log2_chroma_h) * out->linesize[i];
|
||||
out->data[i] += (x >> s->desc->log2_chroma_w) * s->max_step[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -602,6 +602,7 @@ not_ready:
|
||||
return FFERROR_NOT_READY;
|
||||
|
||||
eof:
|
||||
pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
|
||||
ff_outlink_set_status(outlink, status, pts);
|
||||
return 0;
|
||||
}
|
||||
|
@ -533,8 +533,23 @@ static int update_mouse_pointer(AVFilterContext *avctx, DXGI_OUTDUPL_FRAME_INFO
|
||||
return 0;
|
||||
|
||||
if (frame_info->PointerPosition.Visible) {
|
||||
dda->mouse_x = frame_info->PointerPosition.Position.x;
|
||||
dda->mouse_y = frame_info->PointerPosition.Position.y;
|
||||
switch (dda->output_desc.Rotation) {
|
||||
case DXGI_MODE_ROTATION_ROTATE90:
|
||||
dda->mouse_x = frame_info->PointerPosition.Position.y;
|
||||
dda->mouse_y = dda->output_desc.DesktopCoordinates.right - dda->output_desc.DesktopCoordinates.left - frame_info->PointerPosition.Position.x - 1;
|
||||
break;
|
||||
case DXGI_MODE_ROTATION_ROTATE180:
|
||||
dda->mouse_x = dda->output_desc.DesktopCoordinates.right - dda->output_desc.DesktopCoordinates.left - frame_info->PointerPosition.Position.x - 1;
|
||||
dda->mouse_y = dda->output_desc.DesktopCoordinates.bottom - dda->output_desc.DesktopCoordinates.top - frame_info->PointerPosition.Position.y - 1;
|
||||
break;
|
||||
case DXGI_MODE_ROTATION_ROTATE270:
|
||||
dda->mouse_x = dda->output_desc.DesktopCoordinates.bottom - dda->output_desc.DesktopCoordinates.top - frame_info->PointerPosition.Position.y - 1;
|
||||
dda->mouse_y = frame_info->PointerPosition.Position.x;
|
||||
break;
|
||||
default:
|
||||
dda->mouse_x = frame_info->PointerPosition.Position.x;
|
||||
dda->mouse_y = frame_info->PointerPosition.Position.y;
|
||||
}
|
||||
} else {
|
||||
dda->mouse_x = dda->mouse_y = -1;
|
||||
}
|
||||
@ -585,7 +600,7 @@ static int update_mouse_pointer(AVFilterContext *avctx, DXGI_OUTDUPL_FRAME_INFO
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int next_frame_internal(AVFilterContext *avctx, ID3D11Texture2D **desktop_texture)
|
||||
static int next_frame_internal(AVFilterContext *avctx, ID3D11Texture2D **desktop_texture, int need_frame)
|
||||
{
|
||||
DXGI_OUTDUPL_FRAME_INFO frame_info;
|
||||
DdagrabContext *dda = avctx->priv;
|
||||
@ -608,18 +623,32 @@ static int next_frame_internal(AVFilterContext *avctx, ID3D11Texture2D **desktop
|
||||
if (dda->draw_mouse) {
|
||||
ret = update_mouse_pointer(avctx, &frame_info);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (need_frame && (!frame_info.LastPresentTime.QuadPart || !frame_info.AccumulatedFrames)) {
|
||||
ret = AVERROR(EAGAIN);
|
||||
goto error;
|
||||
}
|
||||
|
||||
hr = IDXGIResource_QueryInterface(desktop_resource, &IID_ID3D11Texture2D, (void**)desktop_texture);
|
||||
IDXGIResource_Release(desktop_resource);
|
||||
desktop_resource = NULL;
|
||||
release_resource(&desktop_resource);
|
||||
if (FAILED(hr)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "DXGIResource QueryInterface failed\n");
|
||||
return AVERROR_EXTERNAL;
|
||||
ret = AVERROR_EXTERNAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
release_resource(&desktop_resource);
|
||||
|
||||
hr = IDXGIOutputDuplication_ReleaseFrame(dda->dxgi_outdupl);
|
||||
if (FAILED(hr))
|
||||
av_log(avctx, AV_LOG_ERROR, "DDA error ReleaseFrame failed!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int probe_output_format(AVFilterContext *avctx)
|
||||
@ -631,7 +660,7 @@ static int probe_output_format(AVFilterContext *avctx)
|
||||
av_assert1(!dda->probed_texture);
|
||||
|
||||
do {
|
||||
ret = next_frame_internal(avctx, &dda->probed_texture);
|
||||
ret = next_frame_internal(avctx, &dda->probed_texture, 1);
|
||||
} while(ret == AVERROR(EAGAIN));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -839,6 +868,41 @@ static int draw_mouse_pointer(AVFilterContext *avctx, AVFrame *frame)
|
||||
D3D11_SUBRESOURCE_DATA init_data = { 0 };
|
||||
D3D11_BUFFER_DESC buf_desc = { 0 };
|
||||
|
||||
switch (dda->output_desc.Rotation) {
|
||||
case DXGI_MODE_ROTATION_ROTATE90:
|
||||
vertices[ 0] = x; vertices[ 1] = y;
|
||||
vertices[ 5] = x; vertices[ 6] = y - tex_desc.Width;
|
||||
vertices[10] = x + tex_desc.Height; vertices[11] = y;
|
||||
vertices[15] = x + tex_desc.Height; vertices[16] = y - tex_desc.Width;
|
||||
vertices[ 3] = 0.0f; vertices[ 4] = 0.0f;
|
||||
vertices[ 8] = 1.0f; vertices[ 9] = 0.0f;
|
||||
vertices[13] = 0.0f; vertices[14] = 1.0f;
|
||||
vertices[18] = 1.0f; vertices[19] = 1.0f;
|
||||
break;
|
||||
case DXGI_MODE_ROTATION_ROTATE180:
|
||||
vertices[ 0] = x - tex_desc.Width; vertices[ 1] = y;
|
||||
vertices[ 5] = x - tex_desc.Width; vertices[ 6] = y - tex_desc.Height;
|
||||
vertices[10] = x; vertices[11] = y;
|
||||
vertices[15] = x; vertices[16] = y - tex_desc.Height;
|
||||
vertices[ 3] = 1.0f; vertices[ 4] = 0.0f;
|
||||
vertices[ 8] = 1.0f; vertices[ 9] = 1.0f;
|
||||
vertices[13] = 0.0f; vertices[14] = 0.0f;
|
||||
vertices[18] = 0.0f; vertices[19] = 1.0f;
|
||||
break;
|
||||
case DXGI_MODE_ROTATION_ROTATE270:
|
||||
vertices[ 0] = x - tex_desc.Height; vertices[ 1] = y + tex_desc.Width;
|
||||
vertices[ 5] = x - tex_desc.Height; vertices[ 6] = y;
|
||||
vertices[10] = x; vertices[11] = y + tex_desc.Width;
|
||||
vertices[15] = x; vertices[16] = y;
|
||||
vertices[ 3] = 1.0f; vertices[ 4] = 1.0f;
|
||||
vertices[ 8] = 0.0f; vertices[ 9] = 1.0f;
|
||||
vertices[13] = 1.0f; vertices[14] = 0.0f;
|
||||
vertices[18] = 0.0f; vertices[19] = 0.0f;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
num_vertices = sizeof(vertices) / (sizeof(FLOAT) * 5);
|
||||
|
||||
buf_desc.Usage = D3D11_USAGE_DEFAULT;
|
||||
@ -918,7 +982,7 @@ static int ddagrab_request_frame(AVFilterLink *outlink)
|
||||
now -= dda->first_pts;
|
||||
|
||||
if (!dda->probed_texture) {
|
||||
ret = next_frame_internal(avctx, &cur_texture);
|
||||
ret = next_frame_internal(avctx, &cur_texture, 0);
|
||||
} else {
|
||||
cur_texture = dda->probed_texture;
|
||||
dda->probed_texture = NULL;
|
||||
|
@ -670,7 +670,7 @@ static int asf_read_marker(AVFormatContext *s)
|
||||
|
||||
avio_rl64(pb); // offset, 8 bytes
|
||||
pres_time = avio_rl64(pb); // presentation time
|
||||
pres_time -= asf->hdr.preroll * 10000;
|
||||
pres_time = av_sat_sub64(pres_time, asf->hdr.preroll * 10000);
|
||||
avio_rl16(pb); // entry length
|
||||
avio_rl32(pb); // send time
|
||||
avio_rl32(pb); // flags
|
||||
|
@ -70,6 +70,9 @@ static int avr_read_header(AVFormatContext *s)
|
||||
avio_skip(s->pb, 1); // replay speed
|
||||
|
||||
st->codecpar->sample_rate = avio_rb24(s->pb);
|
||||
if (st->codecpar->sample_rate == 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
avio_skip(s->pb, 4 * 3);
|
||||
avio_skip(s->pb, 2 * 3);
|
||||
avio_skip(s->pb, 20);
|
||||
|
@ -140,6 +140,10 @@ static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt)
|
||||
return 0; /* this indicate EOS */
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (size != (int)size) {
|
||||
av_packet_unref(pkt);
|
||||
return AVERROR(EDOM);
|
||||
}
|
||||
|
||||
pkt->stream_index = avs->st_audio->index;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
|
@ -296,6 +296,8 @@ static av_cold int concatf_open(URLContext *h, const char *uri, int flags)
|
||||
av_bprint_finalize(&bp, NULL);
|
||||
data->length = i;
|
||||
|
||||
if (!data->length)
|
||||
err = AVERROR_INVALIDDATA;
|
||||
if (err < 0)
|
||||
concat_close(h);
|
||||
|
||||
|
@ -667,7 +667,9 @@ static int concat_read_header(AVFormatContext *avf)
|
||||
else
|
||||
time = cat->files[i].start_time;
|
||||
if (cat->files[i].user_duration == AV_NOPTS_VALUE) {
|
||||
if (cat->files[i].inpoint == AV_NOPTS_VALUE || cat->files[i].outpoint == AV_NOPTS_VALUE)
|
||||
if (cat->files[i].inpoint == AV_NOPTS_VALUE || cat->files[i].outpoint == AV_NOPTS_VALUE ||
|
||||
cat->files[i].outpoint - (uint64_t)cat->files[i].inpoint != av_sat_sub64(cat->files[i].outpoint, cat->files[i].inpoint)
|
||||
)
|
||||
break;
|
||||
cat->files[i].user_duration = cat->files[i].outpoint - cat->files[i].inpoint;
|
||||
}
|
||||
|
@ -231,6 +231,7 @@ int av_probe_input_buffer2(AVIOContext *pb, const AVInputFormat **fmt,
|
||||
int ret = 0, probe_size, buf_offset = 0;
|
||||
int score = 0;
|
||||
int ret2;
|
||||
int eof = 0;
|
||||
|
||||
if (!max_probe_size)
|
||||
max_probe_size = PROBE_BUF_MAX;
|
||||
@ -254,7 +255,7 @@ int av_probe_input_buffer2(AVIOContext *pb, const AVInputFormat **fmt,
|
||||
}
|
||||
}
|
||||
|
||||
for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
|
||||
for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt && !eof;
|
||||
probe_size = FFMIN(probe_size << 1,
|
||||
FFMAX(max_probe_size, probe_size + 1))) {
|
||||
score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
|
||||
@ -270,6 +271,7 @@ int av_probe_input_buffer2(AVIOContext *pb, const AVInputFormat **fmt,
|
||||
|
||||
score = 0;
|
||||
ret = 0; /* error was end of file, nothing read */
|
||||
eof = 1;
|
||||
}
|
||||
buf_offset += ret;
|
||||
if (buf_offset < offset)
|
||||
|
@ -2549,7 +2549,7 @@ static const AVOption hls_options[] = {
|
||||
{.str = "3gp,aac,avi,ac3,eac3,flac,mkv,m3u8,m4a,m4s,m4v,mpg,mov,mp2,mp3,mp4,mpeg,mpegts,ogg,ogv,oga,ts,vob,wav"},
|
||||
INT_MIN, INT_MAX, FLAGS},
|
||||
{"max_reload", "Maximum number of times a insufficient list is attempted to be reloaded",
|
||||
OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS},
|
||||
OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 3}, 0, INT_MAX, FLAGS},
|
||||
{"m3u8_hold_counters", "The maximum number of times to load m3u8 when it refreshes without new segments",
|
||||
OFFSET(m3u8_hold_counters), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS},
|
||||
{"http_persistent", "Use persistent HTTP connections",
|
||||
|
@ -75,6 +75,8 @@ int ff_imf_xml_read_uuid(xmlNodePtr element, AVUUID uuid)
|
||||
int ret = 0;
|
||||
|
||||
xmlChar *element_text = xmlNodeListGetString(element->doc, element->xmlChildrenNode, 1);
|
||||
if (!element_text)
|
||||
return AVERROR_INVALIDDATA;
|
||||
ret = av_uuid_urn_parse(element_text, uuid);
|
||||
if (ret) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid UUID\n");
|
||||
@ -90,7 +92,7 @@ int ff_imf_xml_read_rational(xmlNodePtr element, AVRational *rational)
|
||||
int ret = 0;
|
||||
|
||||
xmlChar *element_text = xmlNodeListGetString(element->doc, element->xmlChildrenNode, 1);
|
||||
if (sscanf(element_text, "%i %i", &rational->num, &rational->den) != 2) {
|
||||
if (element_text == NULL || sscanf(element_text, "%i %i", &rational->num, &rational->den) != 2) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid rational number\n");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -104,7 +106,7 @@ int ff_imf_xml_read_uint32(xmlNodePtr element, uint32_t *number)
|
||||
int ret = 0;
|
||||
|
||||
xmlChar *element_text = xmlNodeListGetString(element->doc, element->xmlChildrenNode, 1);
|
||||
if (sscanf(element_text, "%" PRIu32, number) != 1) {
|
||||
if (element_text == NULL || sscanf(element_text, "%" PRIu32, number) != 1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid unsigned 32-bit integer");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
@ -188,6 +190,10 @@ static int fill_content_title(xmlNodePtr cpl_element, FFIMFCPL *cpl)
|
||||
cpl->content_title_utf8 = xmlNodeListGetString(cpl_element->doc,
|
||||
element->xmlChildrenNode,
|
||||
1);
|
||||
if (!cpl->content_title_utf8)
|
||||
cpl->content_title_utf8 = xmlStrdup("");
|
||||
if (!cpl->content_title_utf8)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -260,6 +266,8 @@ static int fill_timecode(xmlNodePtr cpl_element, FFIMFCPL *cpl)
|
||||
}
|
||||
|
||||
tc_str = xmlNodeListGetString(element->doc, element->xmlChildrenNode, 1);
|
||||
if (!tc_str)
|
||||
return AVERROR_INVALIDDATA;
|
||||
ret = parse_cpl_tc_type(tc_str, comps);
|
||||
xmlFree(tc_str);
|
||||
if (ret)
|
||||
@ -608,11 +616,10 @@ static int push_main_audio_sequence(xmlNodePtr audio_sequence_elem, FFIMFCPL *cp
|
||||
ret = fill_trackfile_resource(resource_elem,
|
||||
&vt->resources[vt->resource_count],
|
||||
cpl);
|
||||
vt->resource_count++;
|
||||
if (ret) {
|
||||
if (ret)
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid Resource\n");
|
||||
continue;
|
||||
}
|
||||
else
|
||||
vt->resource_count++;
|
||||
|
||||
resource_elem = xmlNextElementSibling(resource_elem);
|
||||
}
|
||||
@ -691,11 +698,10 @@ static int push_main_image_2d_sequence(xmlNodePtr image_sequence_elem, FFIMFCPL
|
||||
ret = fill_trackfile_resource(resource_elem,
|
||||
&cpl->main_image_2d_track->resources[cpl->main_image_2d_track->resource_count],
|
||||
cpl);
|
||||
cpl->main_image_2d_track->resource_count++;
|
||||
if (ret) {
|
||||
if (ret)
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid Resource\n");
|
||||
continue;
|
||||
}
|
||||
else
|
||||
cpl->main_image_2d_track->resource_count++;
|
||||
|
||||
resource_elem = xmlNextElementSibling(resource_elem);
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ shift_and_ret:
|
||||
return buf + len;
|
||||
}
|
||||
|
||||
static int get_shift(int timeres, const char *buf)
|
||||
static int get_shift(unsigned timeres, const char *buf)
|
||||
{
|
||||
int sign = 1;
|
||||
int a = 0, b = 0, c = 0, d = 0;
|
||||
@ -143,16 +143,16 @@ static int get_shift(int timeres, const char *buf)
|
||||
|
||||
ret = 0;
|
||||
switch (n) {
|
||||
case 4:
|
||||
ret = sign * (((int64_t)a*3600 + (int64_t)b*60 + c) * timeres + d);
|
||||
break;
|
||||
case 3:
|
||||
ret = sign * (( (int64_t)a*60 + b) * timeres + c);
|
||||
break;
|
||||
case 2:
|
||||
ret = sign * (( (int64_t)a) * timeres + b);
|
||||
break;
|
||||
case 1: a = 0;
|
||||
case 2: c = b; b = a; a = 0;
|
||||
case 3: d = c; c = b; b = a; a = 0;
|
||||
}
|
||||
|
||||
ret = (int64_t)a*3600 + (int64_t)b*60 + c;
|
||||
if (FFABS(ret) > (INT64_MAX - FFABS(d)) / timeres)
|
||||
return 0;
|
||||
ret = sign * (ret * timeres + d);
|
||||
|
||||
if ((int)ret != ret)
|
||||
ret = 0;
|
||||
|
||||
@ -227,14 +227,17 @@ static int jacosub_read_header(AVFormatContext *s)
|
||||
}
|
||||
av_bprintf(&header, "#S %s", p);
|
||||
break;
|
||||
case 'T': // ...but must be placed after TIMERES
|
||||
jacosub->timeres = strtol(p, NULL, 10);
|
||||
if (!jacosub->timeres)
|
||||
case 'T': { // ...but must be placed after TIMERES
|
||||
int64_t timeres = strtol(p, NULL, 10);
|
||||
if (timeres <= 0 || timeres > UINT32_MAX) {
|
||||
jacosub->timeres = 30;
|
||||
else
|
||||
} else {
|
||||
jacosub->timeres = timeres;
|
||||
av_bprintf(&header, "#T %s", p);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* general/essential directives in the extradata */
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include "jpegxl_probe.h"
|
||||
|
||||
#define UNCHECKED_BITSTREAM_READER 0
|
||||
#define BITSTREAM_READER_LE
|
||||
#include "libavcodec/get_bits.h"
|
||||
|
||||
@ -57,49 +58,50 @@ enum JpegXLPrimaries {
|
||||
FF_JPEGXL_PR_P3 = 11,
|
||||
};
|
||||
|
||||
#define jxl_bits(n) get_bits_long(gb, (n))
|
||||
#define jxl_bits_skip(n) skip_bits_long(gb, (n))
|
||||
#define jxl_u32(c0, c1, c2, c3, u0, u1, u2, u3) jpegxl_u32(gb, \
|
||||
(const uint32_t[]){c0, c1, c2, c3}, (const uint32_t[]){u0, u1, u2, u3})
|
||||
#define jxl_u64() jpegxl_u64(gb)
|
||||
#define jxl_enum() jxl_u32(0, 1, 2, 18, 0, 0, 4, 6)
|
||||
|
||||
/* read a U32(c_i + u(u_i)) */
|
||||
static uint32_t jpegxl_u32(GetBitContext *gb,
|
||||
const uint32_t constants[4], const uint32_t ubits[4])
|
||||
static av_always_inline uint32_t jxl_u32(GetBitContext *gb,
|
||||
uint32_t c0, uint32_t c1, uint32_t c2, uint32_t c3,
|
||||
uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3)
|
||||
{
|
||||
uint32_t ret, choice = jxl_bits(2);
|
||||
const uint32_t constants[4] = {c0, c1, c2, c3};
|
||||
const uint32_t ubits [4] = {u0, u1, u2, u3};
|
||||
uint32_t ret, choice = get_bits(gb, 2);
|
||||
|
||||
ret = constants[choice];
|
||||
if (ubits[choice])
|
||||
ret += jxl_bits(ubits[choice]);
|
||||
ret += get_bits_long(gb, ubits[choice]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static av_always_inline uint32_t jxl_enum(GetBitContext *gb)
|
||||
{
|
||||
return jxl_u32(gb, 0, 1, 2, 18, 0, 0, 4, 6);
|
||||
}
|
||||
|
||||
/* read a U64() */
|
||||
static uint64_t jpegxl_u64(GetBitContext *gb)
|
||||
{
|
||||
uint64_t shift = 12, ret;
|
||||
|
||||
switch (jxl_bits(2)) {
|
||||
switch (get_bits(gb, 2)) {
|
||||
case 0:
|
||||
ret = 0;
|
||||
break;
|
||||
case 1:
|
||||
ret = 1 + jxl_bits(4);
|
||||
ret = 1 + get_bits(gb, 4);
|
||||
break;
|
||||
case 2:
|
||||
ret = 17 + jxl_bits(8);
|
||||
ret = 17 + get_bits(gb, 8);
|
||||
break;
|
||||
case 3:
|
||||
ret = jxl_bits(12);
|
||||
while (jxl_bits(1)) {
|
||||
ret = get_bits(gb, 12);
|
||||
while (get_bits1(gb)) {
|
||||
if (shift < 60) {
|
||||
ret |= (uint64_t)jxl_bits(8) << shift;
|
||||
ret |= (uint64_t)get_bits(gb, 8) << shift;
|
||||
shift += 8;
|
||||
} else {
|
||||
ret |= (uint64_t)jxl_bits(4) << shift;
|
||||
ret |= (uint64_t)get_bits(gb, 4) << shift;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -142,18 +144,18 @@ static int jpegxl_read_size_header(GetBitContext *gb)
|
||||
{
|
||||
uint32_t width, height;
|
||||
|
||||
if (jxl_bits(1)) {
|
||||
if (get_bits1(gb)) {
|
||||
/* small size header */
|
||||
height = (jxl_bits(5) + 1) << 3;
|
||||
width = jpegxl_width_from_ratio(height, jxl_bits(3));
|
||||
height = (get_bits(gb, 5) + 1) << 3;
|
||||
width = jpegxl_width_from_ratio(height, get_bits(gb, 3));
|
||||
if (!width)
|
||||
width = (jxl_bits(5) + 1) << 3;
|
||||
width = (get_bits(gb, 5) + 1) << 3;
|
||||
} else {
|
||||
/* large size header */
|
||||
height = 1 + jxl_u32(0, 0, 0, 0, 9, 13, 18, 30);
|
||||
width = jpegxl_width_from_ratio(height, jxl_bits(3));
|
||||
height = 1 + jxl_u32(gb, 0, 0, 0, 0, 9, 13, 18, 30);
|
||||
width = jpegxl_width_from_ratio(height, get_bits(gb, 3));
|
||||
if (!width)
|
||||
width = 1 + jxl_u32(0, 0, 0, 0, 9, 13, 18, 30);
|
||||
width = 1 + jxl_u32(gb, 0, 0, 0, 0, 9, 13, 18, 30);
|
||||
}
|
||||
if (width > (1 << 18) || height > (1 << 18)
|
||||
|| (width >> 4) * (height >> 4) > (1 << 20))
|
||||
@ -170,18 +172,18 @@ static int jpegxl_read_preview_header(GetBitContext *gb)
|
||||
{
|
||||
uint32_t width, height;
|
||||
|
||||
if (jxl_bits(1)) {
|
||||
if (get_bits1(gb)) {
|
||||
/* coded height and width divided by eight */
|
||||
height = jxl_u32(16, 32, 1, 33, 0, 0, 5, 9) << 3;
|
||||
width = jpegxl_width_from_ratio(height, jxl_bits(3));
|
||||
height = jxl_u32(gb, 16, 32, 1, 33, 0, 0, 5, 9) << 3;
|
||||
width = jpegxl_width_from_ratio(height, get_bits(gb, 3));
|
||||
if (!width)
|
||||
width = jxl_u32(16, 32, 1, 33, 0, 0, 5, 9) << 3;
|
||||
width = jxl_u32(gb, 16, 32, 1, 33, 0, 0, 5, 9) << 3;
|
||||
} else {
|
||||
/* full height and width coded */
|
||||
height = jxl_u32(1, 65, 321, 1345, 6, 8, 10, 12);
|
||||
width = jpegxl_width_from_ratio(height, jxl_bits(3));
|
||||
height = jxl_u32(gb, 1, 65, 321, 1345, 6, 8, 10, 12);
|
||||
width = jpegxl_width_from_ratio(height, get_bits(gb, 3));
|
||||
if (!width)
|
||||
width = jxl_u32(1, 65, 321, 1345, 6, 8, 10, 12);
|
||||
width = jxl_u32(gb, 1, 65, 321, 1345, 6, 8, 10, 12);
|
||||
}
|
||||
if (width > 4096 || height > 4096)
|
||||
return -1;
|
||||
@ -194,13 +196,13 @@ static int jpegxl_read_preview_header(GetBitContext *gb)
|
||||
*/
|
||||
static void jpegxl_skip_bit_depth(GetBitContext *gb)
|
||||
{
|
||||
if (jxl_bits(1)) {
|
||||
if (get_bits1(gb)) {
|
||||
/* float samples */
|
||||
jxl_u32(32, 16, 24, 1, 0, 0, 0, 6); /* mantissa */
|
||||
jxl_bits_skip(4); /* exponent */
|
||||
jxl_u32(gb, 32, 16, 24, 1, 0, 0, 0, 6); /* mantissa */
|
||||
skip_bits_long(gb, 4); /* exponent */
|
||||
} else {
|
||||
/* integer samples */
|
||||
jxl_u32(8, 10, 12, 1, 0, 0, 0, 6);
|
||||
jxl_u32(gb, 8, 10, 12, 1, 0, 0, 0, 6);
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,34 +212,34 @@ static void jpegxl_skip_bit_depth(GetBitContext *gb)
|
||||
*/
|
||||
static int jpegxl_read_extra_channel_info(GetBitContext *gb)
|
||||
{
|
||||
int all_default = jxl_bits(1);
|
||||
int all_default = get_bits1(gb);
|
||||
uint32_t type, name_len = 0;
|
||||
|
||||
if (!all_default) {
|
||||
type = jxl_enum();
|
||||
type = jxl_enum(gb);
|
||||
if (type > 63)
|
||||
return -1; /* enum types cannot be 64+ */
|
||||
if (type == FF_JPEGXL_CT_BLACK)
|
||||
return -1;
|
||||
jpegxl_skip_bit_depth(gb);
|
||||
jxl_u32(0, 3, 4, 1, 0, 0, 0, 3); /* dim-shift */
|
||||
jxl_u32(gb, 0, 3, 4, 1, 0, 0, 0, 3); /* dim-shift */
|
||||
/* max of name_len is 1071 = 48 + 2^10 - 1 */
|
||||
name_len = jxl_u32(0, 0, 16, 48, 0, 4, 5, 10);
|
||||
name_len = jxl_u32(gb, 0, 0, 16, 48, 0, 4, 5, 10);
|
||||
} else {
|
||||
type = FF_JPEGXL_CT_ALPHA;
|
||||
}
|
||||
|
||||
/* skip over the name */
|
||||
jxl_bits_skip(8 * name_len);
|
||||
skip_bits_long(gb, 8 * name_len);
|
||||
|
||||
if (!all_default && type == FF_JPEGXL_CT_ALPHA)
|
||||
jxl_bits_skip(1);
|
||||
skip_bits1(gb);
|
||||
|
||||
if (type == FF_JPEGXL_CT_SPOT_COLOR)
|
||||
jxl_bits_skip(16 * 4);
|
||||
skip_bits_long(gb, 16 * 4);
|
||||
|
||||
if (type == FF_JPEGXL_CT_CFA)
|
||||
jxl_u32(1, 0, 3, 19, 0, 2, 4, 8);
|
||||
jxl_u32(gb, 1, 0, 3, 19, 0, 2, 4, 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -256,136 +258,149 @@ int ff_jpegxl_verify_codestream_header(const uint8_t *buf, int buflen)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (jxl_bits(16) != FF_JPEGXL_CODESTREAM_SIGNATURE_LE)
|
||||
if (get_bits_long(gb, 16) != FF_JPEGXL_CODESTREAM_SIGNATURE_LE)
|
||||
return -1;
|
||||
|
||||
if (jpegxl_read_size_header(gb) < 0)
|
||||
return -1;
|
||||
if ((ret = jpegxl_read_size_header(gb)) < 0)
|
||||
return ret;
|
||||
|
||||
all_default = jxl_bits(1);
|
||||
all_default = get_bits1(gb);
|
||||
if (!all_default)
|
||||
extra_fields = jxl_bits(1);
|
||||
extra_fields = get_bits1(gb);
|
||||
|
||||
if (extra_fields) {
|
||||
jxl_bits_skip(3); /* orientation */
|
||||
skip_bits_long(gb, 3); /* orientation */
|
||||
|
||||
/*
|
||||
* intrinstic size
|
||||
* any size header here is valid, but as it
|
||||
* is variable length we have to read it
|
||||
*/
|
||||
if (jxl_bits(1))
|
||||
if (get_bits1(gb))
|
||||
jpegxl_read_size_header(gb);
|
||||
|
||||
/* preview header */
|
||||
if (jxl_bits(1)) {
|
||||
if (jpegxl_read_preview_header(gb) < 0)
|
||||
return -1;
|
||||
if (get_bits1(gb)) {
|
||||
ret = jpegxl_read_preview_header(gb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* animation header */
|
||||
if (jxl_bits(1)) {
|
||||
jxl_u32(100, 1000, 1, 1, 0, 0, 10, 30);
|
||||
jxl_u32(1, 1001, 1, 1, 0, 0, 8, 10);
|
||||
jxl_u32(0, 0, 0, 0, 0, 3, 16, 32);
|
||||
jxl_bits_skip(1);
|
||||
if (get_bits1(gb)) {
|
||||
jxl_u32(gb, 100, 1000, 1, 1, 0, 0, 10, 30);
|
||||
jxl_u32(gb, 1, 1001, 1, 1, 0, 0, 8, 10);
|
||||
jxl_u32(gb, 0, 0, 0, 0, 0, 3, 16, 32);
|
||||
skip_bits_long(gb, 1);
|
||||
}
|
||||
}
|
||||
if (get_bits_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (!all_default) {
|
||||
jpegxl_skip_bit_depth(gb);
|
||||
|
||||
/* modular_16bit_buffers must equal 1 */
|
||||
if (!jxl_bits(1))
|
||||
if (!get_bits1(gb))
|
||||
return -1;
|
||||
|
||||
num_extra_channels = jxl_u32(0, 1, 2, 1, 0, 0, 4, 12);
|
||||
num_extra_channels = jxl_u32(gb, 0, 1, 2, 1, 0, 0, 4, 12);
|
||||
if (num_extra_channels > 4)
|
||||
return -1;
|
||||
for (uint32_t i = 0; i < num_extra_channels; i++) {
|
||||
if (jpegxl_read_extra_channel_info(gb) < 0)
|
||||
return -1;
|
||||
ret = jpegxl_read_extra_channel_info(gb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (get_bits_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
xyb_encoded = jxl_bits(1);
|
||||
xyb_encoded = get_bits1(gb);
|
||||
|
||||
/* color encoding bundle */
|
||||
if (!jxl_bits(1)) {
|
||||
if (!get_bits1(gb)) {
|
||||
uint32_t color_space;
|
||||
have_icc_profile = jxl_bits(1);
|
||||
color_space = jxl_enum();
|
||||
have_icc_profile = get_bits1(gb);
|
||||
color_space = jxl_enum(gb);
|
||||
if (color_space > 63)
|
||||
return -1;
|
||||
|
||||
if (!have_icc_profile) {
|
||||
if (color_space != FF_JPEGXL_CS_XYB) {
|
||||
uint32_t white_point = jxl_enum();
|
||||
uint32_t white_point = jxl_enum(gb);
|
||||
if (white_point > 63)
|
||||
return -1;
|
||||
if (white_point == FF_JPEGXL_WP_CUSTOM) {
|
||||
/* ux and uy values */
|
||||
jxl_u32(0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
jxl_u32(0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
jxl_u32(gb, 0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
jxl_u32(gb, 0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
}
|
||||
if (color_space != FF_JPEGXL_CS_GRAY) {
|
||||
/* primaries */
|
||||
uint32_t primaries = jxl_enum();
|
||||
uint32_t primaries = jxl_enum(gb);
|
||||
if (primaries > 63)
|
||||
return -1;
|
||||
if (primaries == FF_JPEGXL_PR_CUSTOM) {
|
||||
/* ux/uy values for r,g,b */
|
||||
for (int i = 0; i < 6; i++)
|
||||
jxl_u32(0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
for (int i = 0; i < 6; i++) {
|
||||
jxl_u32(gb, 0, 524288, 1048576, 2097152, 19, 19, 20, 21);
|
||||
if (get_bits_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* transfer characteristics */
|
||||
if (jxl_bits(1)) {
|
||||
if (get_bits1(gb)) {
|
||||
/* gamma */
|
||||
jxl_bits_skip(24);
|
||||
skip_bits_long(gb, 24);
|
||||
} else {
|
||||
/* transfer function */
|
||||
if (jxl_enum() > 63)
|
||||
if (jxl_enum(gb) > 63)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* rendering intent */
|
||||
if (jxl_enum() > 63)
|
||||
if (jxl_enum(gb) > 63)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* tone mapping bundle */
|
||||
if (extra_fields && !jxl_bits(1))
|
||||
jxl_bits_skip(16 + 16 + 1 + 16);
|
||||
if (extra_fields && !get_bits1(gb))
|
||||
skip_bits_long(gb, 16 + 16 + 1 + 16);
|
||||
|
||||
extensions = jxl_u64();
|
||||
extensions = jpegxl_u64(gb);
|
||||
if (get_bits_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (extensions) {
|
||||
for (int i = 0; i < 64; i++) {
|
||||
if (extensions & (UINT64_C(1) << i))
|
||||
jxl_u64();
|
||||
jpegxl_u64(gb);
|
||||
if (get_bits_left(gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* default transform */
|
||||
if (!jxl_bits(1)) {
|
||||
if (!get_bits1(gb)) {
|
||||
/* opsin inverse matrix */
|
||||
if (xyb_encoded && !jxl_bits(1))
|
||||
jxl_bits_skip(16 * 16);
|
||||
if (xyb_encoded && !get_bits1(gb))
|
||||
skip_bits_long(gb, 16 * 16);
|
||||
/* cw_mask and default weights */
|
||||
if (jxl_bits(1))
|
||||
jxl_bits_skip(16 * 15);
|
||||
if (jxl_bits(1))
|
||||
jxl_bits_skip(16 * 55);
|
||||
if (jxl_bits(1))
|
||||
jxl_bits_skip(16 * 210);
|
||||
if (get_bits1(gb))
|
||||
skip_bits_long(gb, 16 * 15);
|
||||
if (get_bits1(gb))
|
||||
skip_bits_long(gb, 16 * 55);
|
||||
if (get_bits1(gb))
|
||||
skip_bits_long(gb, 16 * 210);
|
||||
}
|
||||
|
||||
if (!have_icc_profile) {
|
||||
int bits_remaining = 7 - (get_bits_count(gb) - 1) % 8;
|
||||
if (bits_remaining && jxl_bits(bits_remaining))
|
||||
if (bits_remaining && get_bits(gb, bits_remaining))
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,9 @@ static int laf_read_header(AVFormatContext *ctx)
|
||||
s->index = 0;
|
||||
s->stored_index = 0;
|
||||
s->bpp = bpp;
|
||||
if ((int64_t)bpp * st_count * (int64_t)sample_rate >= INT32_MAX)
|
||||
if ((int64_t)bpp * st_count * (int64_t)sample_rate >= INT32_MAX ||
|
||||
(int64_t)bpp * st_count * (int64_t)sample_rate == 0
|
||||
)
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->data = av_calloc(st_count * sample_rate, bpp);
|
||||
if (!s->data)
|
||||
|
@ -4191,16 +4191,19 @@ static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t
|
||||
int64_t prebuffer_ns = 1000000000;
|
||||
int64_t time_ns = sti->index_entries[i].timestamp * matroska->time_scale;
|
||||
double nano_seconds_per_second = 1000000000.0;
|
||||
int64_t prebuffered_ns = time_ns + prebuffer_ns;
|
||||
int64_t prebuffered_ns;
|
||||
double prebuffer_bytes = 0.0;
|
||||
int64_t temp_prebuffer_ns = prebuffer_ns;
|
||||
int64_t pre_bytes, pre_ns;
|
||||
double pre_sec, prebuffer, bits_per_second;
|
||||
CueDesc desc_beg = get_cue_desc(s, time_ns, cues_start);
|
||||
|
||||
// Start with the first Cue.
|
||||
CueDesc desc_end = desc_beg;
|
||||
|
||||
if (time_ns > INT64_MAX - prebuffer_ns)
|
||||
return -1;
|
||||
prebuffered_ns = time_ns + prebuffer_ns;
|
||||
|
||||
// Figure out how much data we have downloaded for the prebuffer. This will
|
||||
// be used later to adjust the bits per sample to try.
|
||||
while (desc_end.start_time_ns != -1 && desc_end.end_time_ns < prebuffered_ns) {
|
||||
|
@ -1131,6 +1131,8 @@ static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
int ret = ffio_read_size(pb, type, 4);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (c->fc->nb_streams)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (strcmp(type, "qt "))
|
||||
c->isom = 1;
|
||||
@ -4470,6 +4472,10 @@ static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
MOVStreamContext *sc;
|
||||
int ret;
|
||||
|
||||
if (c->is_still_picture_avif) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
st = avformat_new_stream(c->fc, NULL);
|
||||
if (!st) return AVERROR(ENOMEM);
|
||||
st->id = -1;
|
||||
@ -7637,10 +7643,11 @@ static int mov_read_iloc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (c->fc->nb_streams) {
|
||||
if (c->avif_info) {
|
||||
av_log(c->fc, AV_LOG_INFO, "Duplicate iloc box found\n");
|
||||
return 0;
|
||||
}
|
||||
av_assert0(!c->fc->nb_streams);
|
||||
|
||||
version = avio_r8(pb);
|
||||
avio_rb24(pb); // flags.
|
||||
@ -8653,12 +8660,13 @@ static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
|
||||
if (msc->pb && msc->current_sample < avsti->nb_index_entries) {
|
||||
AVIndexEntry *current_sample = &avsti->index_entries[msc->current_sample];
|
||||
int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
|
||||
uint64_t dtsdiff = best_dts > dts ? best_dts - (uint64_t)dts : ((uint64_t)dts - best_dts);
|
||||
av_log(s, AV_LOG_TRACE, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
|
||||
if (!sample || (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && current_sample->pos < sample->pos) ||
|
||||
((s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&
|
||||
((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb && dts != AV_NOPTS_VALUE &&
|
||||
((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
|
||||
(FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) {
|
||||
((dtsdiff <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
|
||||
(dtsdiff > AV_TIME_BASE && dts < best_dts)))))) {
|
||||
sample = current_sample;
|
||||
best_dts = dts;
|
||||
*st = avst;
|
||||
|
@ -100,7 +100,6 @@ typedef struct MXFPartition {
|
||||
uint64_t previous_partition;
|
||||
int index_sid;
|
||||
int body_sid;
|
||||
int64_t this_partition;
|
||||
int64_t essence_offset; ///< absolute offset of essence
|
||||
int64_t essence_length;
|
||||
int32_t kag_size;
|
||||
@ -457,12 +456,15 @@ static int mxf_read_sync(AVIOContext *pb, const uint8_t *key, unsigned size)
|
||||
return i == size;
|
||||
}
|
||||
|
||||
static int klv_read_packet(KLVPacket *klv, AVIOContext *pb)
|
||||
static int klv_read_packet(MXFContext *mxf, KLVPacket *klv, AVIOContext *pb)
|
||||
{
|
||||
int64_t length, pos;
|
||||
if (!mxf_read_sync(pb, mxf_klv_key, 4))
|
||||
return AVERROR_INVALIDDATA;
|
||||
klv->offset = avio_tell(pb) - 4;
|
||||
if (klv->offset < mxf->run_in)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
memcpy(klv->key, mxf_klv_key, 4);
|
||||
avio_read(pb, klv->key + 4, 12);
|
||||
length = klv_decode_ber_length(pb);
|
||||
@ -725,10 +727,13 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size
|
||||
UID op;
|
||||
uint64_t footer_partition;
|
||||
uint32_t nb_essence_containers;
|
||||
uint64_t this_partition;
|
||||
|
||||
if (mxf->partitions_count >= INT_MAX / 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
av_assert0(klv_offset >= mxf->run_in);
|
||||
|
||||
tmp_part = av_realloc_array(mxf->partitions, mxf->partitions_count + 1, sizeof(*mxf->partitions));
|
||||
if (!tmp_part)
|
||||
return AVERROR(ENOMEM);
|
||||
@ -771,7 +776,13 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size
|
||||
partition->complete = uid[14] > 2;
|
||||
avio_skip(pb, 4);
|
||||
partition->kag_size = avio_rb32(pb);
|
||||
partition->this_partition = avio_rb64(pb);
|
||||
this_partition = avio_rb64(pb);
|
||||
if (this_partition != klv_offset - mxf->run_in) {
|
||||
av_log(mxf->fc, AV_LOG_ERROR,
|
||||
"this_partition %"PRId64" mismatches %"PRId64"\n",
|
||||
this_partition, klv_offset - mxf->run_in);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
partition->previous_partition = avio_rb64(pb);
|
||||
footer_partition = avio_rb64(pb);
|
||||
partition->header_byte_count = avio_rb64(pb);
|
||||
@ -791,8 +802,8 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size
|
||||
av_dict_set(&s->metadata, "operational_pattern_ul", str, 0);
|
||||
}
|
||||
|
||||
if (partition->this_partition &&
|
||||
partition->previous_partition == partition->this_partition) {
|
||||
if (this_partition &&
|
||||
partition->previous_partition == this_partition) {
|
||||
av_log(mxf->fc, AV_LOG_ERROR,
|
||||
"PreviousPartition equal to ThisPartition %"PRIx64"\n",
|
||||
partition->previous_partition);
|
||||
@ -800,11 +811,11 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size
|
||||
if (!mxf->parsing_backward && mxf->last_forward_partition > 1) {
|
||||
MXFPartition *prev =
|
||||
mxf->partitions + mxf->last_forward_partition - 2;
|
||||
partition->previous_partition = prev->this_partition;
|
||||
partition->previous_partition = prev->pack_ofs - mxf->run_in;
|
||||
}
|
||||
/* if no previous body partition are found point to the header
|
||||
* partition */
|
||||
if (partition->previous_partition == partition->this_partition)
|
||||
if (partition->previous_partition == this_partition)
|
||||
partition->previous_partition = 0;
|
||||
av_log(mxf->fc, AV_LOG_ERROR,
|
||||
"Overriding PreviousPartition with %"PRIx64"\n",
|
||||
@ -826,7 +837,7 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size
|
||||
"PartitionPack: ThisPartition = 0x%"PRIX64
|
||||
", PreviousPartition = 0x%"PRIX64", "
|
||||
"FooterPartition = 0x%"PRIX64", IndexSID = %i, BodySID = %i\n",
|
||||
partition->this_partition,
|
||||
this_partition,
|
||||
partition->previous_partition, footer_partition,
|
||||
partition->index_sid, partition->body_sid);
|
||||
|
||||
@ -900,7 +911,7 @@ static uint64_t partition_score(MXFPartition *p)
|
||||
score = 3;
|
||||
else
|
||||
score = 1;
|
||||
return (score << 60) | ((uint64_t)p->this_partition >> 4);
|
||||
return (score << 60) | ((uint64_t)p->pack_ofs >> 4);
|
||||
}
|
||||
|
||||
static int mxf_add_metadata_set(MXFContext *mxf, MXFMetadataSet **metadata_set)
|
||||
@ -3415,7 +3426,7 @@ static int mxf_seek_to_previous_partition(MXFContext *mxf)
|
||||
/* Make sure this is actually a PartitionPack, and if so parse it.
|
||||
* See deadlock2.mxf
|
||||
*/
|
||||
if ((ret = klv_read_packet(&klv, pb)) < 0) {
|
||||
if ((ret = klv_read_packet(mxf, &klv, pb)) < 0) {
|
||||
av_log(mxf->fc, AV_LOG_ERROR, "failed to read PartitionPack KLV\n");
|
||||
return ret;
|
||||
}
|
||||
@ -3532,14 +3543,14 @@ static void mxf_compute_essence_containers(AVFormatContext *s)
|
||||
|
||||
/* essence container spans to the next partition */
|
||||
if (x < mxf->partitions_count - 1)
|
||||
p->essence_length = mxf->partitions[x+1].this_partition - p->essence_offset;
|
||||
p->essence_length = mxf->partitions[x+1].pack_ofs - mxf->run_in - p->essence_offset;
|
||||
|
||||
if (p->essence_length < 0) {
|
||||
/* next ThisPartition < essence_offset */
|
||||
p->essence_length = 0;
|
||||
av_log(mxf->fc, AV_LOG_ERROR,
|
||||
"partition %i: bad ThisPartition = %"PRIX64"\n",
|
||||
x+1, mxf->partitions[x+1].this_partition);
|
||||
x+1, mxf->partitions[x+1].pack_ofs - mxf->run_in);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3692,7 +3703,7 @@ static void mxf_read_random_index_pack(AVFormatContext *s)
|
||||
if (length < min_rip_length || length > max_rip_length)
|
||||
goto end;
|
||||
avio_seek(s->pb, file_size - length, SEEK_SET);
|
||||
if (klv_read_packet(&klv, s->pb) < 0 ||
|
||||
if (klv_read_packet(mxf, &klv, s->pb) < 0 ||
|
||||
!IS_KLV_KEY(klv.key, ff_mxf_random_index_pack_key))
|
||||
goto end;
|
||||
if (klv.next_klv != file_size || klv.length <= 4 || (klv.length - 4) % 12) {
|
||||
@ -3739,7 +3750,7 @@ static int mxf_read_header(AVFormatContext *s)
|
||||
while (!avio_feof(s->pb)) {
|
||||
const MXFMetadataReadTableEntry *metadata;
|
||||
|
||||
if (klv_read_packet(&klv, s->pb) < 0) {
|
||||
if (klv_read_packet(mxf, &klv, s->pb) < 0) {
|
||||
/* EOF - seek to previous partition or stop */
|
||||
if(mxf_parse_handle_partition_or_eof(mxf) <= 0)
|
||||
break;
|
||||
@ -3988,7 +3999,7 @@ static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
if (pos < mxf->current_klv_data.next_klv - mxf->current_klv_data.length || pos >= mxf->current_klv_data.next_klv) {
|
||||
mxf->current_klv_data = (KLVPacket){{0}};
|
||||
ret = klv_read_packet(&klv, s->pb);
|
||||
ret = klv_read_packet(mxf, &klv, s->pb);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_data_size = klv.length;
|
||||
|
@ -196,7 +196,7 @@ static int theora_packet(AVFormatContext *s, int idx)
|
||||
if(s->streams[idx]->start_time == AV_NOPTS_VALUE && os->lastpts != AV_NOPTS_VALUE) {
|
||||
s->streams[idx]->start_time = os->lastpts;
|
||||
if (s->streams[idx]->duration > 0)
|
||||
s->streams[idx]->duration -= s->streams[idx]->start_time;
|
||||
s->streams[idx]->duration = av_sat_sub64(s->streams[idx]->duration, s->streams[idx]->start_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ static int rka_read_header(AVFormatContext *s)
|
||||
if (channels == 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bps = par->extradata[13];
|
||||
if (bps == 0)
|
||||
if (bps < 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
size_offset = avio_rl32(s->pb);
|
||||
framepos = avio_tell(s->pb);
|
||||
|
@ -268,6 +268,9 @@ static int rpl_read_header(AVFormatContext *s)
|
||||
"Video stream will be broken!\n", av_fourcc2str(vst->codecpar->codec_tag));
|
||||
|
||||
number_of_chunks = read_line_and_int(pb, &error); // number of chunks in the file
|
||||
if (number_of_chunks == INT_MAX)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
// The number in the header is actually the index of the last chunk.
|
||||
number_of_chunks++;
|
||||
|
||||
|
@ -410,7 +410,7 @@ static void parse_fmtp(AVFormatContext *s, RTSPState *rt,
|
||||
if (rtsp_st->sdp_payload_type == payload_type &&
|
||||
rtsp_st->dynamic_handler &&
|
||||
rtsp_st->dynamic_handler->parse_sdp_a_line) {
|
||||
rtsp_st->dynamic_handler->parse_sdp_a_line(s, i,
|
||||
rtsp_st->dynamic_handler->parse_sdp_a_line(s, rtsp_st->stream_index,
|
||||
rtsp_st->dynamic_protocol_context, line);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user