mirror of
https://gitee.com/openharmony/third_party_ffmpeg
synced 2024-11-23 03:09:51 +00:00
ffmpeg回退
Signed-off-by: vvtest <815508462@qq.com>
This commit is contained in:
parent
a6a457fb89
commit
df7caf8708
16
BUILD.gn
16
BUILD.gn
@ -139,13 +139,11 @@ config("ffmpeg_config") {
|
|||||||
cflags += [ "-Wno-bool-operation" ]
|
cflags += [ "-Wno-bool-operation" ]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config("libohosffmpeg_public_config") {
|
config("libohosffmpeg_public_config") {
|
||||||
visibility = [ ":*" ]
|
visibility = [ ":*" ]
|
||||||
|
|
||||||
include_dirs = [ "//third_party/ffmpeg" ]
|
include_dirs = [ "//third_party/ffmpeg" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
ohos_source_set("ffmpeg_dynamic") {
|
ohos_source_set("ffmpeg_dynamic") {
|
||||||
sources = [
|
sources = [
|
||||||
# "//third_party/ffmpeg/libavcodec/012v.c",
|
# "//third_party/ffmpeg/libavcodec/012v.c",
|
||||||
@ -256,8 +254,7 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
"//third_party/ffmpeg/libavcodec/avdct.c",
|
"//third_party/ffmpeg/libavcodec/avdct.c",
|
||||||
"//third_party/ffmpeg/libavcodec/avfft.c",
|
"//third_party/ffmpeg/libavcodec/avfft.c",
|
||||||
"//third_party/ffmpeg/libavcodec/avpacket.c",
|
"//third_party/ffmpeg/libavcodec/avpacket.c",
|
||||||
|
"//third_party/ffmpeg/libavcodec/avpicture.c",
|
||||||
#"//third_party/ffmpeg/libavcodec/avpicture.c",
|
|
||||||
|
|
||||||
# "//third_party/ffmpeg/libavcodec/avrndec.c",
|
# "//third_party/ffmpeg/libavcodec/avrndec.c",
|
||||||
# "//third_party/ffmpeg/libavcodec/avs.c",
|
# "//third_party/ffmpeg/libavcodec/avs.c",
|
||||||
@ -273,8 +270,7 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
# "//third_party/ffmpeg/libavcodec/bintext.c",
|
# "//third_party/ffmpeg/libavcodec/bintext.c",
|
||||||
# "//third_party/ffmpeg/libavcodec/bitpacked.c",
|
# "//third_party/ffmpeg/libavcodec/bitpacked.c",
|
||||||
"//third_party/ffmpeg/libavcodec/bitstream.c",
|
"//third_party/ffmpeg/libavcodec/bitstream.c",
|
||||||
|
"//third_party/ffmpeg/libavcodec/bitstream_filter.c",
|
||||||
#"//third_party/ffmpeg/libavcodec/bitstream_filter.c",
|
|
||||||
"//third_party/ffmpeg/libavcodec/bitstream_filters.c",
|
"//third_party/ffmpeg/libavcodec/bitstream_filters.c",
|
||||||
"//third_party/ffmpeg/libavcodec/blockdsp.c",
|
"//third_party/ffmpeg/libavcodec/blockdsp.c",
|
||||||
"//third_party/ffmpeg/libavcodec/bmp.c",
|
"//third_party/ffmpeg/libavcodec/bmp.c",
|
||||||
@ -497,7 +493,6 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
"//third_party/ffmpeg/libavcodec/h264_parser.c",
|
"//third_party/ffmpeg/libavcodec/h264_parser.c",
|
||||||
"//third_party/ffmpeg/libavcodec/h264_picture.c",
|
"//third_party/ffmpeg/libavcodec/h264_picture.c",
|
||||||
"//third_party/ffmpeg/libavcodec/h264_ps.c",
|
"//third_party/ffmpeg/libavcodec/h264_ps.c",
|
||||||
"//third_party/ffmpeg/libavcodec/h274.c",
|
|
||||||
|
|
||||||
# "//third_party/ffmpeg/libavcodec/h264_redundant_pps_bsf.c",
|
# "//third_party/ffmpeg/libavcodec/h264_redundant_pps_bsf.c",
|
||||||
"//third_party/ffmpeg/libavcodec/h264_refs.c",
|
"//third_party/ffmpeg/libavcodec/h264_refs.c",
|
||||||
@ -741,7 +736,6 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
# "//third_party/ffmpeg/libavcodec/pafaudio.c",
|
# "//third_party/ffmpeg/libavcodec/pafaudio.c",
|
||||||
# "//third_party/ffmpeg/libavcodec/pafvideo.c",
|
# "//third_party/ffmpeg/libavcodec/pafvideo.c",
|
||||||
# "//third_party/ffmpeg/libavcodec/pamenc.c",
|
# "//third_party/ffmpeg/libavcodec/pamenc.c",
|
||||||
"//third_party/ffmpeg/libavcodec/amr_parser.c",
|
|
||||||
"//third_party/ffmpeg/libavcodec/parser.c",
|
"//third_party/ffmpeg/libavcodec/parser.c",
|
||||||
"//third_party/ffmpeg/libavcodec/parsers.c",
|
"//third_party/ffmpeg/libavcodec/parsers.c",
|
||||||
|
|
||||||
@ -1248,12 +1242,10 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
# "//third_party/ffmpeg/libavformat/mms.c",
|
# "//third_party/ffmpeg/libavformat/mms.c",
|
||||||
# "//third_party/ffmpeg/libavformat/mmsh.c",
|
# "//third_party/ffmpeg/libavformat/mmsh.c",
|
||||||
# "//third_party/ffmpeg/libavformat/mmst.c",
|
# "//third_party/ffmpeg/libavformat/mmst.c",
|
||||||
"//third_party/ffmpeg/libavformat/dovi_isom.c",
|
|
||||||
"//third_party/ffmpeg/libavformat/mov.c",
|
"//third_party/ffmpeg/libavformat/mov.c",
|
||||||
"//third_party/ffmpeg/libavformat/mov_chan.c",
|
"//third_party/ffmpeg/libavformat/mov_chan.c",
|
||||||
"//third_party/ffmpeg/libavformat/mov_esds.c",
|
"//third_party/ffmpeg/libavformat/mov_esds.c",
|
||||||
"//third_party/ffmpeg/libavformat/movenc.c",
|
"//third_party/ffmpeg/libavformat/movenc.c",
|
||||||
"//third_party/ffmpeg/libavformat/movenc_ttml.c",
|
|
||||||
"//third_party/ffmpeg/libavformat/movenccenc.c",
|
"//third_party/ffmpeg/libavformat/movenccenc.c",
|
||||||
"//third_party/ffmpeg/libavformat/movenchint.c",
|
"//third_party/ffmpeg/libavformat/movenchint.c",
|
||||||
"//third_party/ffmpeg/libavformat/mp3dec.c",
|
"//third_party/ffmpeg/libavformat/mp3dec.c",
|
||||||
@ -1264,8 +1256,8 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
# "//third_party/ffmpeg/libavformat/mpeg.c",
|
# "//third_party/ffmpeg/libavformat/mpeg.c",
|
||||||
# "//third_party/ffmpeg/libavformat/mpegenc.c",
|
# "//third_party/ffmpeg/libavformat/mpegenc.c",
|
||||||
"//third_party/ffmpeg/libavformat/mpegts.c",
|
"//third_party/ffmpeg/libavformat/mpegts.c",
|
||||||
|
|
||||||
"//third_party/ffmpeg/libavformat/mpegtsenc.c",
|
"//third_party/ffmpeg/libavformat/mpegtsenc.c",
|
||||||
|
|
||||||
# "//third_party/ffmpeg/libavformat/mpegvideodec.c",
|
# "//third_party/ffmpeg/libavformat/mpegvideodec.c",
|
||||||
# "//third_party/ffmpeg/libavformat/mpjpeg.c",
|
# "//third_party/ffmpeg/libavformat/mpjpeg.c",
|
||||||
# "//third_party/ffmpeg/libavformat/mpjpegdec.c",
|
# "//third_party/ffmpeg/libavformat/mpjpegdec.c",
|
||||||
@ -1473,8 +1465,6 @@ ohos_source_set("ffmpeg_dynamic") {
|
|||||||
"//third_party/ffmpeg/libavformat/url.c",
|
"//third_party/ffmpeg/libavformat/url.c",
|
||||||
|
|
||||||
# "//third_party/ffmpeg/libavformat/urldecode.c",
|
# "//third_party/ffmpeg/libavformat/urldecode.c",
|
||||||
"//third_party/ffmpeg/libavformat/demux.c",
|
|
||||||
"//third_party/ffmpeg/libavformat/seek.c",
|
|
||||||
"//third_party/ffmpeg/libavformat/utils.c",
|
"//third_party/ffmpeg/libavformat/utils.c",
|
||||||
|
|
||||||
# "//third_party/ffmpeg/libavformat/v210.c",
|
# "//third_party/ffmpeg/libavformat/v210.c",
|
||||||
|
408
Changelog
408
Changelog
@ -1,228 +1,194 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version 5.0.2:
|
version 4.4.1:
|
||||||
- swscale: aarch64: Fix yuv2rgb with negative strides
|
- avcodec/flac_parser: Consider AV_INPUT_BUFFER_PADDING_SIZE
|
||||||
- avcodec/atrac3plusdec: fix compilation failure after last commit
|
- avcodec/ttadsp: Fix integer overflows in tta_filter_process_c()
|
||||||
- avcodec/atrac3plus: reorder channels to match the output layout
|
- avutil/mathematics: Document av_rescale_rnd() behavior on non int64 results
|
||||||
- avcodec/aacdec: fix parsing streams with channel configuration 11
|
- avcodec/utils: Ensure 8x8 alignment for ARGO in avcodec_align_dimensions2()
|
||||||
|
- avformat/matroskadec: Reset state also on failure in matroska_reset_status()
|
||||||
|
- avformat/wavdec: Check smv_block_size
|
||||||
|
- avformat/rmdec: Check for multiple audio_stream_info
|
||||||
|
- avcodec/apedec: Use 64bit to avoid overflow
|
||||||
|
- avcodec/apedec: Fix undefined integer overflow in long_filter_ehigh_3830()
|
||||||
|
- oavformat/avidec: Check offset in odml
|
||||||
|
- avformat/mpegts: use actually read packet size in mpegts_resync special case
|
||||||
|
- fftools/ffmpeg: Fix crash when flushing non-fully setup output stream
|
||||||
|
- avfilter/scale_npp: fix non-aligned output frame dimensions
|
||||||
|
- Revert "avformat/hlsenc: compute video_keyframe_size after write keyframe"
|
||||||
- Changelog: update
|
- Changelog: update
|
||||||
- avcodec/speexdec: Check channels > 2
|
- swscale/alphablend: Fix slice handling
|
||||||
- avformat/vividas: Check packet size
|
- avcodec/apedec: Fix integer overflow in filter_fast_3320()
|
||||||
- avcodec/dstdec: Check for overflow in build_filter()
|
- avformat/mov: Fix last mfra check
|
||||||
- avformat/spdifdec: Use 64bit to compute bit rate
|
- avcodec/mxpegdec: Check for AVDISCARD_ALL
|
||||||
- avformat/rpl: Use 64bit for duration computation
|
- avcodec/flicvideo: Check remaining bytes in FLI*COPY
|
||||||
- avformat/xwma: Use av_rescale() for duration computation
|
- avcodec/utils: ARGO writes 4x4 blocks without regard to the image dimensions
|
||||||
- avformat/sdsdec: Use av_rescale() to avoid intermediate overflow in duration calculation
|
- avcodec/cbs_h265_syntax_template: Limit sps_num_palette_predictor_initializer_minus1 to 127
|
||||||
- avformat/sbgdec: Check ts_int in genrate_intervals
|
- avcodec/snowdec: Maintain avmv buffer
|
||||||
- avformat/sbgdec: clamp end_ts
|
- avcodec/mpeg12dec: Do not put mpeg_f_code into an invalid state on error return
|
||||||
- avformat/rmdec: check tag_size
|
- avcodec/mpegvideo_enc: Limit bitrate tolerance to the representable
|
||||||
- avformat/nutdec: Check fields
|
- avcodec/apedec: Fix integer overflow in intermediate
|
||||||
- avformat/flvdec: Use 64bit for sum_flv_tag_size
|
- avformat/mvdec: Do not set invalid sample rate
|
||||||
- avformat/jacosubdec: Fix overflow in get_shift()
|
- avformat/sbgdec: Check for t0 overflow in expand_tseq()
|
||||||
- avformat/dxa: avoid bpc overflows
|
- avformat/rmdec: Use 64bit for intermediate for DEINT_ID_INT4
|
||||||
- avformat/dhav: Use 64bit seek_back
|
- avformat/sbgdec: Check opt_duration and start for overflow
|
||||||
- avformat/cafdec: Check that nb_frasmes fits within 64bit
|
- avcodec/exr: Fix undefined integer multiplication
|
||||||
- avformat/asfdec_o: Limit packet offset
|
- avformat/mov: Check for duplicate clli
|
||||||
- avformat/ape: Check frames size
|
- avformat/utils: Ignore negative duration in codec_info_duration computation
|
||||||
- avformat/icodec: Check nb_pal
|
- avformat/jacosubdec: Check for min in t overflow in get_shift()
|
||||||
- avformat/aiffdec: Use 64bit for block_duration use
|
- avformat/mxfdec: check channel number in mxf_get_d10_aes3_packet()
|
||||||
- avformat/aiffdec: Check block_duration
|
- (origin/release/4.4) avcodec/wmadec: handle run_level_decode error
|
||||||
- avformat/mxfdec: only probe max run in
|
- avcodec/wma: Return specific error code
|
||||||
- avformat/mxfdec: Check run_in is within 65536
|
- avcodec/dxva2_av1: fix superres_denom parameter
|
||||||
- avcodec/mjpegdec: Check for unsupported bayer case
|
- avcodec/libdav1d: fix compilation after recent libdav1d API changes
|
||||||
- avcodec/apedec: Fix integer overflow in filter_3800()
|
- Changelog: update
|
||||||
- avcodec/tta: Check 24bit scaling for overflow
|
- avcodec/utils: don't return negative values in av_get_audio_frame_duration()
|
||||||
- avcodec/mobiclip: Check quantizer for overflow
|
- avcodec/jpeg2000dec: Check that atom header is within bytsetream
|
||||||
- avcodec/exr: Check preview psize
|
- avcodec/apedec: Fix 2 integer overflows in filter_3800()
|
||||||
- avcodec/tiff: Fix loop detection
|
- avcodec/xpmdec: Move allocations down after more error checks
|
||||||
- libavformat/hls: Free keys
|
- avcodec/argo: Move U, fix shift
|
||||||
- avcodec/fmvc: Move frame allocation to a later stage
|
- avformat/mov: Check dts for overflow in mov_read_trun()
|
||||||
- avfilter/vf_showinfo: remove backspaces
|
- avformat/avidec: Use 64bit for frame number in odml index parsing
|
||||||
- avcodec/speedhq: Check width
|
- avcodec/mjpegbdec: Skip SOS on AVDISCARD_ALL as does mjpeg
|
||||||
- avcodec/bink: disallow odd positioned scaled blocks
|
- avcodec/mjpegdec: Check for bits left in mjpeg_decode_scan_progressive_ac()
|
||||||
- libswscale: force a minimum size of the slide for bayer sources
|
- avformat/adtsenc: return value check for init_get_bits in adts_decode_extradata
|
||||||
- lavc/videotoolbox: do not pass AVCodecContext to decoder output callback
|
- avcodec/webp: Check available space in loop in decode_entropy_coded_image()
|
||||||
- lavc/pthread_frame: always transfer stashed hwaccel state
|
- avcodec/h264dec: use picture parameters in ff_print_debug_info2()
|
||||||
- avformat/cafenc: derive Opus frame size from the relevant stream parameters
|
- avcodec/vc1dec: ff_print_debug_info() does not support WMV3 field_mode
|
||||||
- avcodec/arm/sbcenc: avoid callee preserved vfp registers
|
- avcodec/frame_thread_encoder: Free AVCodecContext structure on error during init
|
||||||
- avfilter/vf_scale: overwrite the width and height expressions with the original values
|
- avcodec/faxcompr: Check for end of input in cmode == 1 in decode_group3_2d_line()
|
||||||
- lavc/pthread_frame: avoid leaving stale hwaccel state in worker threads
|
- avcodec/vc1dec: Disable error concealment for *IMAGE
|
||||||
- Update for 5.0.2
|
- avcodec/sbrdsp_fixed: Fix negation overflow in sbr_neg_odd_64_c()
|
||||||
- avformat/asfdec_o: limit recursion depth in asf_read_unknown()
|
- avcodec/argo: Check for even dimensions
|
||||||
- doc/git-howto.texi: Document commit signing
|
- avformat/wtvdec: Check for EOF before seeking back in parse_media_type()
|
||||||
- libavcodec/8bps: Check that line lengths fit within the buffer
|
- avformat/mpc8: Check first keyframe position for overflow
|
||||||
- avcodec/midivid: Perform lzss_uncompress() before ff_reget_buffer()
|
- avcodec/exr: Check ac_count
|
||||||
- libavformat/iff: Check for overflow in body_end calculation
|
- avformat/wavdec: Use 64bit in new_pos computation
|
||||||
- avformat/avidec: Prevent entity expansion attacks
|
- avformat/sbgdec: Check for overflow in timestamp preparation
|
||||||
- avcodec/h263dec: Sanity check against minimal I/P frame size
|
- avformat/dsicin: Check packet size for overflow
|
||||||
- avcodec/hevcdec: Check s->ref in the md5 path similar to hwaccel
|
- avformat/dsfdec: Change order of operations in bitrate computation
|
||||||
- avcodec/mpegaudiodec_template: use unsigned shift in handle_crc()
|
- avformat/bfi: check nframes
|
||||||
- avformat/subviewerdec: Make read_ts() more flexible
|
- avformat/avidec: fix position overflow in avi_load_index()
|
||||||
- avcodec/mjpegdec: bayer and rct are incompatible
|
- avformat/asfdec_f: Check sizeX against padding
|
||||||
- MAINTAINERS: Add ED25519 key for signing my commits in the future
|
- avformat/aiffdec: Check for size overflow in header parsing
|
||||||
- avcodec/hevc_filter: copy_CTB() only within width&height
|
- avcodec/aaccoder: Add minimal bias in search_for_ms()
|
||||||
- avcodec/tiff: Check tile_length and tile_width
|
- avformat/mov: Fix incorrect overflow detection in mov_read_sidx()
|
||||||
- avcodec/mss4: Check image size with av_image_check_size2()
|
- avformat/mov: Avoid undefined overflow in time_offset calculation
|
||||||
- avformat/flvdec: Check for EOF in index reading
|
- avfilter/af_drmeter: Check that there is data
|
||||||
- avformat/nutdec: Check get_packetheader() in mainheader
|
- avfilter/vf_fftdnoiz: Use lrintf() in export_row8()
|
||||||
- avformat/asfdec_f: Use 64bit for packet start time
|
- avfilter/vf_mestimate: Check b_count
|
||||||
- avcodec/exr: Check x/ysize
|
- avformat/mov: do not ignore errors in mov_metadata_hmmt()
|
||||||
- tools/target_dec_fuzzer: Adjust threshold for MMVIDEO
|
- avformat/mxfdec: Check size for shrinking
|
||||||
- avcodec/lagarith: Check dst/src in zero run code
|
- avcodec/dnxhddec: check and propagate function return value
|
||||||
- avcodec/h264dec: Skip late SEI
|
- swscale/slice: Fix wrong return on error
|
||||||
- avcodec/sbrdsp_fixed: Fix integer overflows in sbr_qmf_deint_neg_c()
|
- avcodec/aacdec_template: Avoid some invalid values to be set by decode_audio_specific_config_gb()
|
||||||
- avfilter/vf_signature: Fix integer overflow in filter_frame()
|
- swscale/slice: Check slice for allocation failure
|
||||||
- avformat/rtsp: break on unknown protocols
|
- avformat/matroskadec: Fix handling of huge default durations
|
||||||
- avcodec/hevcdsp_template: stay within tables in sao_band_filter()
|
- avcodec/lpc: check for zero err in normalization in compute_lpc_coefs()
|
||||||
- avcodec/tiff: Check pixel format types for dng
|
- avcodec/j2kenc: Check for av_strtok() failure
|
||||||
- avcodec/qpeldsp: copy less for the mc0x cases
|
- avformat/ftp: Check for av_strtok() failure
|
||||||
- avformat/aaxdec: Check for empty segments
|
- tools/cws2fws: Check read() for failure
|
||||||
- avcodec/ffv1dec: Limit golomb rice coded slices to width 8M
|
- avcodec/cpia: Fix missing src_size update
|
||||||
- avformat/iff: simplify duration calculation
|
- avcodec/exr: Better size checks
|
||||||
- avcodec/wnv1: Check for width =1
|
- avcodec/clearvideo: Check tile_size to be not too large
|
||||||
- avcodec/ffv1dec_template: fix indention
|
- avcodec/utils: Use 64bit for intermediate in AV_CODEC_ID_ADPCM_THP* duration calculation
|
||||||
- avformat/sctp: close socket on errors
|
- avformat/aaxdec: Check avio_seek() in header reading
|
||||||
- avformat/cinedec: Check size and pos more
|
- avcodec/hevc_sei: Use get_bits_long() for time_offset_value
|
||||||
- avcodec/aasc: Fix indention
|
- avformat/rmdec: Check old_format len for overflow
|
||||||
- avcodec/qdrw: adjust max colors to array size
|
- avformat/realtextdec: Check the pts difference before using it for the duration computation
|
||||||
- avcodec/alacdsp: Make intermediates unsigned
|
- avformat/qcp: Avoid negative nb_rates
|
||||||
- avformat/aiffdec: cleanup size handling for extreem cases
|
- avformat/pp_bnk: Use 64bit in bitrate computation
|
||||||
- avformat/matroskadec: avoid integer overflows in SAR computation
|
- avformat/nutdec: Check tmp_size
|
||||||
- avcodec/jpeglsdec: fix end check for xfrm
|
- avformat/msf: Check that channels doesnt overflow during extradata construction
|
||||||
- avcodec/cdgraphics: limit scrolling to the line
|
- avformat/subtitles: Check pts difference before use
|
||||||
- avformat/hls: Limit start_seq_no to one bit less
|
- avformat/mpc8: Check for position overflow in mpc8_handle_chunk()
|
||||||
- avformat/aiffdec: avoid integer overflow in get_meta()
|
- avformat/mccdec: Fix overflows in num/den
|
||||||
- avformat/aaxdec: Check for overlaping segments
|
- avformat/iff: Use 64bit in duration computation
|
||||||
- avformat/ape: more bits in size for less overflows
|
- avformat/dxa: Check fps to be within the supported range more precissely
|
||||||
- avformat/aviobuf: Check buf_size in ffio_ensure_seekback()
|
- avcodec/iff: Only write palette to plane 1 if its PAL8
|
||||||
- avformat/bfi: Check offsets better
|
- avformat/tta: Check for EOF in index reading loop
|
||||||
- avformat/asfdec_f: Check packet_frag_timestamp
|
- avfilter/vf_scale: set the RGB matrix coefficients in case of RGB
|
||||||
- avcodec/texturedspenc: Fix indexing in color distribution determination
|
- avfilter/vf_scale: reset color matrix in case of identity & non-RGB
|
||||||
- avformat/act: Check ff_get_wav_header() for failure
|
- ffmpeg: fix order between field order autodetection and override
|
||||||
- avcodec/libxavs2: Improve r redundancy in occured
|
- avcodec/h264_slice: clear old slice POC values on parsing failure
|
||||||
- avformat/libzmq: Improve r redundancy in occured
|
- avfilter/f_metadata: do not return the frame early if there is no metadata
|
||||||
- avfilter/vf_libplacebo: Match AV_OPT_TYPE_FLOAT to dbl
|
- ffbuild: Avoid using the --preprocessor argument to windres
|
||||||
- avfilter/vsrc_mandelbrot: Check for malloc failure
|
- avcodec/crystalhd: signal that the decoder sets all output frame properties
|
||||||
- avfilter/vf_frei0r: Copy to frame allocated according to frei0r requirements
|
- avcodec/cuviddec: signal that the decoder sets all output frame properties
|
||||||
- avfilter/video: Add ff_default_get_video_buffer2() to set specific alignment
|
- avcodec/decode: reindent after the previous commit
|
||||||
- avformat/genh: Check sample rate
|
- avcodec/decode: add an internal codec flag to signal a decoder sets all output frame properties
|
||||||
- avformat/demux: Use unsigned to check duration vs duration_text
|
- avcodec/decode: fetch packets from the pkt_props FIFO on every frame returned
|
||||||
- avutil/hwcontext_d3d11va: fix texture_infos writes on non-fixed-size pools
|
- Update missed irc links
|
||||||
- avcodec/cuviddec: fix null pointer dereference
|
- avformat/rpl: The associative law doesnt hold for signed integers in C
|
||||||
- avcodec/cuviddec: fix AV1 decoding error
|
- avcodec/faxcompr: Check available bits in decode_uncompressed()
|
||||||
- configure: extend SDL check to accept all 2.x versions
|
- avcodec/faxcompr: Check if bits are available before reading in cmode == 9 || cmode == 10
|
||||||
- lavf/tls_mbedtls: add support for mbedtls version 3
|
- avformat/utils: Avoid overflow in codec_info_duration computation for subtitles
|
||||||
- fate: update reference files after the recent dash manifest muxer changes
|
- avformat/utils: check dts/duration to be representable before using them
|
||||||
- avformat/webmdashenc: fix on-demand profile string
|
- avcodec/utils: do "calc from frame_bytes, channels, and block_align" in 64bit
|
||||||
- avcodec/libdav1d: don't depend on the event flags API to init sequence params the first time
|
- avcodec/ttadata: Add sentinel at the end of ff_tta_shift_1
|
||||||
|
- avformat/mov: Check for duplicate mdcv
|
||||||
version 5.0.1:
|
- avfilter/vf_dctdnoiz: Check threads
|
||||||
- avcodec/exr: Avoid signed overflow in displayWindow
|
- avfilter/vf_ciescope: Fix undefined behavior in rgb_to_xy() with black
|
||||||
- avcodec/diracdec: avoid signed integer overflow in global mv
|
- avcodec/dpx: fix off by 1 in bits_per_color check
|
||||||
- avcodec/takdsp: Fix integer overflow in decorrelate_sf()
|
- avformat/rpl: Check for EOF and zero framesize
|
||||||
- avcodec/apedec: fix a integer overflow in long_filter_high_3800()
|
- avcodec/vc2enc: Check for non negative slice bounds
|
||||||
- avdevice/dshow: fix regression
|
- avformat/rpl: Use 64bit in bitrate computation and check it
|
||||||
- avfilter/vf_subtitles: pass storage size to libass
|
- avcodec/mpegvideo_enc: Reset stuffing bits if they are not supported
|
||||||
- avcodec/vp9_superframe_split_bsf: Don't read inexistent data
|
- avcodec/svq1enc: Do not print debug RD value before it has been computed
|
||||||
- avcodec/vp9_superframe_split_bsf: Discard invalid zero-sized frames
|
- avcodec/aacpsy: Check bandwidth
|
||||||
- avcodec/vp9_superframe_bsf: Check for existence of data before reading it
|
- avcodec/aacenc: Do not divide by lambda_count if it is 0
|
||||||
- avcodec/vp9_raw_reorder_bsf: Check for existence of data before reading it
|
- avcodec/aacenc: Use FLT_EPSILON for lambda minimum
|
||||||
- avformat/imf: fix packet pts, dts and muxing
|
- avfilter/vf_yadif: Fix handing of tiny images
|
||||||
- avformat/imf: open resources only when first needed
|
- avfilter/vf_vmafmotion: Check dimensions
|
||||||
- avformat/imf: cosmetics
|
- avformat/movenc: Check pal_size before use
|
||||||
- avformat/imf_cpl: do not use filesize when reading XML file
|
- avcodec/lpc: Avoid floating point division by 0
|
||||||
- avformat/imfdec: Use proper logcontext
|
- avcodec/aacpsy: Avoid floating point division by 0 of norm_fac
|
||||||
- avformat/imfdec: do not use filesize when reading XML file
|
- avcodec/aacenc: Avoid 0 lambda
|
||||||
- doc/utils: add missing 22.2 layout entry
|
- avcodec/exr: More strictly check dc_count
|
||||||
- avcodec/av1: only set the private context pix_fmt field if get_pixel_format() succeeds
|
- avcodec/exr: x/ymax cannot be INT_MAX
|
||||||
- avformat/aqtitledec: Skip unrepresentable durations
|
- avformat/avio: Check av_opt_copy() for failure
|
||||||
- avformat/cafdec: Do not store empty keys in read_info_chunk()
|
- avformat/moflex: Remove unneeded format variable
|
||||||
- avformat/mxfdec: Do not clear array in mxf_read_strong_ref_array() before writing
|
- avformat/fifo: check for flushed packets and timeshift
|
||||||
- avformat/mxfdec: Check for avio_read() failure in mxf_read_strong_ref_array()
|
- avcodec/clearvideo: Check for 0 tile_shift
|
||||||
- avformat/mxfdec: Check count in mxf_read_strong_ref_array()
|
- avcodec/vc1: Check remaining bits in ff_vc1_parse_frame_header()
|
||||||
- avformat/hls: Check target_duration
|
- avformat/mov: Ignore duplicate CoLL
|
||||||
- avcodec/pixlet: Avoid signed integer overflow in scaling in filterfn()
|
- avformat/mov: Limit nb_chapter_tracks to input size
|
||||||
- avformat/matroskadec: Check pre_ns
|
- avformat/utils: Use 64bit earlier in r_frame_rate check
|
||||||
- avcodec/sonic: Use unsigned for predictor_k to avoid undefined behavior
|
- avcodec/alsdec: Fix decoding error with mono audio files
|
||||||
- avcodec/libuavs3d: Check ff_set_dimensions() for failure
|
- avformat/mvdec: Check sample rate in parse_audio_var()
|
||||||
- avcodec/speexdec: Align some comments
|
- avcodec/faxcompr: Check for end of bitstream in decode_group3_1d_line() and decode_group3_2d_line()
|
||||||
- avcodec/speexdec: Use correct doxygen comments
|
- avcodec/utils: treat PAL8 for jpegs similar to other colorspaces
|
||||||
- avcodec/mjpegbdec: Set buf_size
|
- avcodec/jpeglsdec: Set alpha plane in PAL8 so image is not 100% transparent
|
||||||
- avformat/matroskadec: Use rounded down duration in get_cue_desc() check
|
- avformat/asfdec_o: Use ff_get_extradata()
|
||||||
- avcodec/argo: Check packet size
|
- avformat/id3v2: Check end for overflow in id3v2_parse()
|
||||||
- avcodec/g729_parser: Check channels
|
- avformat/mxfdec: Fix file position addition
|
||||||
- avformat/avidec: Check height
|
- avformat/wtvdec: Improve size overflow checks in parse_chunks()
|
||||||
- avformat/rmdec: Better duplicate tags check
|
- avcodec/faxcompr: Check remaining bits on error in decode_group3_1d_line()
|
||||||
- avformat/mov: Disallow empty sidx
|
- avformat/mov: check for pts overflow in mov_read_sidx()
|
||||||
- avformat/argo_cvg:: Fix order of operations in error check in argo_cvg_write_trailer()
|
- avcodec/utils: Check ima wav duration for overflow
|
||||||
- avformat/argo_asf: Fix order of operations in error check in argo_asf_write_trailer()
|
- avcodec/rv10: Execute whole size check earlier for rv20
|
||||||
- avcodec/movtextdec: add () to CMP() macro to avoid unexpected behavior
|
- avformat/cafdec: Check channels
|
||||||
- avformat/matroskadec: Check duration
|
- avcodec/exr: increase vlc depth
|
||||||
- avformat/mov: Corner case encryption error cleanup in mov_read_senc()
|
- avcodec/dpx: Check bits_per_color earlier
|
||||||
- avcodec/jpeglsdec: Fix if( code style
|
- avformat/mvi: Check audio_data_size to be non negative
|
||||||
- avcodec/jpeglsdec: Check get_ur_golomb_jpegls() for error
|
- avcodec/nvenc: disable s12m timestamps by default
|
||||||
- avcodec/motion_est: fix indention of ff_get_best_fcode()
|
- aarch64: hevc_idct: Fix overflows in idct_dc
|
||||||
- avcodec/motion_est: Fix xy indexing on range violation in ff_get_best_fcode()
|
- avcodec/vaapi_av1: pass full buffer size for each tile
|
||||||
- avformat/hls: Use unsigned for iv computation
|
- avcodec/videotoolboxenc: #define TARGET_CPU_ARM64 to 0 if not provided by the SDK
|
||||||
- avcodec/jpeglsdec: Increase range for N in ls_get_code_runterm() by using unsigned
|
- lavc/pngdec: fix updating reference frames for APNG_DISPOSE_OP_BACKGROUND
|
||||||
- avformat/matroskadec: Check desc_bytes
|
- ffmpeg: return no chosen output if an uninitialized stream is unavailable
|
||||||
- avformat/utils: Fix invalid NULL pointer operation in ff_parse_key_value()
|
- avcodec/h263, h263data: Move ff_h263_init_rl_inter to h263.c
|
||||||
- avformat/matroskadec: Fix infinite loop with bz decompression
|
- configure: Add missing mpegvideo dependency for IPU decoder
|
||||||
- avformat/utils: keep chapter monotonicity on chapter updates
|
- avcodec/ttmlenc: Don't confuse capabilities and caps_internal
|
||||||
- avformat/mov: Check size before subtraction
|
- avformat/mpegts: add missing sample_rate value to Opus extradata
|
||||||
- avcodec/cfhd: Avoid signed integer overflow in coeff
|
- avformat/movenc: fix writing dOps atoms
|
||||||
- avcodec/libdav1d: free the Dav1dData packet on dav1d_send_data() failure
|
- avcodec/av1_metadata: don't store the inserted TD OBU in stack
|
||||||
- avcodec/h264_parser: don't alter decoder private data
|
- avcodec/nellymoserenc: Fix segfault when using unsupported channels/rate
|
||||||
- configure: link to libatomic when it's present
|
- avutil/cpu: Use HW_NCPUONLINE to detect # of online CPUs with OpenBSD
|
||||||
- fate/ffmpeg: add missing samples dependency to fate-shortest
|
- avcodec/nvenc: fix lossless tuning logic
|
||||||
|
- avfilter/overlay_cuda: check av_buffer_ref result
|
||||||
|
- avfilter/overlay_cuda: hold explicit reference to hw_device_ctx
|
||||||
version 5.0:
|
- avformat/matroskaenc: Fix leak when writing attachment without filename
|
||||||
- ADPCM IMA Westwood encoder
|
|
||||||
- Westwood AUD muxer
|
|
||||||
- ADPCM IMA Acorn Replay decoder
|
|
||||||
- Argonaut Games CVG demuxer
|
|
||||||
- Argonaut Games CVG muxer
|
|
||||||
- Concatf protocol
|
|
||||||
- afwtdn audio filter
|
|
||||||
- audio and video segment filters
|
|
||||||
- Apple Graphics (SMC) encoder
|
|
||||||
- hsvkey and hsvhold video filters
|
|
||||||
- adecorrelate audio filter
|
|
||||||
- atilt audio filter
|
|
||||||
- grayworld video filter
|
|
||||||
- AV1 Low overhead bitstream format muxer
|
|
||||||
- swscale slice threading
|
|
||||||
- MSN Siren decoder
|
|
||||||
- scharr video filter
|
|
||||||
- apsyclip audio filter
|
|
||||||
- morpho video filter
|
|
||||||
- amr parser
|
|
||||||
- (a)latency filters
|
|
||||||
- GEM Raster image decoder
|
|
||||||
- asdr audio filter
|
|
||||||
- speex decoder
|
|
||||||
- limitdiff video filter
|
|
||||||
- xcorrelate video filter
|
|
||||||
- varblur video filter
|
|
||||||
- huesaturation video filter
|
|
||||||
- colorspectrum source video filter
|
|
||||||
- RTP packetizer for uncompressed video (RFC 4175)
|
|
||||||
- bitpacked encoder
|
|
||||||
- VideoToolbox VP9 hwaccel
|
|
||||||
- VideoToolbox ProRes hwaccel
|
|
||||||
- support loongarch.
|
|
||||||
- aspectralstats audio filter
|
|
||||||
- adynamicsmooth audio filter
|
|
||||||
- libplacebo filter
|
|
||||||
- vflip_vulkan, hflip_vulkan and flip_vulkan filters
|
|
||||||
- adynamicequalizer audio filter
|
|
||||||
- yadif_videotoolbox filter
|
|
||||||
- VideoToolbox ProRes encoder
|
|
||||||
- anlmf audio filter
|
|
||||||
- IMF demuxer (experimental)
|
|
||||||
|
|
||||||
|
|
||||||
version 4.4:
|
version 4.4:
|
||||||
- AudioToolbox output device
|
- AudioToolbox output device
|
||||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -138,7 +138,6 @@ Codecs:
|
|||||||
8bps.c Roberto Togni
|
8bps.c Roberto Togni
|
||||||
8svx.c Jaikrishnan Menon
|
8svx.c Jaikrishnan Menon
|
||||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||||
adpcm.c Zane van Iperen
|
|
||||||
alacenc.c Jaikrishnan Menon
|
alacenc.c Jaikrishnan Menon
|
||||||
alsdec.c Thilo Borgmann, Umair Khan
|
alsdec.c Thilo Borgmann, Umair Khan
|
||||||
aptx.c Aurelien Jacobs
|
aptx.c Aurelien Jacobs
|
||||||
@ -226,7 +225,7 @@ Codecs:
|
|||||||
ptx.c Ivo van Poorten
|
ptx.c Ivo van Poorten
|
||||||
qcelp* Reynaldo H. Verdejo Pinochet
|
qcelp* Reynaldo H. Verdejo Pinochet
|
||||||
qdm2.c, qdm2data.h Roberto Togni
|
qdm2.c, qdm2data.h Roberto Togni
|
||||||
qsv* Mark Thompson, Zhong Li, Haihao Xiang
|
qsv* Mark Thompson, Zhong Li
|
||||||
qtrle.c Mike Melanson
|
qtrle.c Mike Melanson
|
||||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||||
resample2.c Michael Niedermayer
|
resample2.c Michael Niedermayer
|
||||||
@ -274,8 +273,8 @@ Hardware acceleration:
|
|||||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||||
d3d11va* Steve Lhomme
|
d3d11va* Steve Lhomme
|
||||||
mediacodec* Matthieu Bouron, Aman Gupta
|
mediacodec* Matthieu Bouron, Aman Gupta
|
||||||
vaapi* Haihao Xiang
|
vaapi* Gwenole Beauchesne
|
||||||
vaapi_encode* Mark Thompson, Haihao Xiang
|
vaapi_encode* Mark Thompson
|
||||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||||
videotoolbox* Rick Kern, Aman Gupta
|
videotoolbox* Rick Kern, Aman Gupta
|
||||||
|
|
||||||
@ -399,7 +398,6 @@ Muxers/Demuxers:
|
|||||||
apngdec.c Benoit Fouet
|
apngdec.c Benoit Fouet
|
||||||
argo_asf.c Zane van Iperen
|
argo_asf.c Zane van Iperen
|
||||||
argo_brp.c Zane van Iperen
|
argo_brp.c Zane van Iperen
|
||||||
argo_cvg.c Zane van Iperen
|
|
||||||
ass* Aurelien Jacobs
|
ass* Aurelien Jacobs
|
||||||
astdec.c Paul B Mahol
|
astdec.c Paul B Mahol
|
||||||
astenc.c James Almer
|
astenc.c James Almer
|
||||||
@ -611,14 +609,12 @@ Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
|||||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
|
||||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
|
||||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||||
|
7
Makefile
7
Makefile
@ -13,7 +13,6 @@ vpath %.v $(SRC_PATH)
|
|||||||
vpath %.texi $(SRC_PATH)
|
vpath %.texi $(SRC_PATH)
|
||||||
vpath %.cu $(SRC_PATH)
|
vpath %.cu $(SRC_PATH)
|
||||||
vpath %.ptx $(SRC_PATH)
|
vpath %.ptx $(SRC_PATH)
|
||||||
vpath %.metal $(SRC_PATH)
|
|
||||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||||
|
|
||||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||||
@ -24,6 +23,7 @@ FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
|||||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||||
|
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||||
@ -65,8 +65,6 @@ tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o $(FF_DEP_LIBS)
|
|||||||
|
|
||||||
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||||
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
||||||
tools/scale_slice_test$(EXESUF): $(FF_DEP_LIBS)
|
|
||||||
tools/scale_slice_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
|
||||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||||
@ -90,8 +88,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
|||||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||||
MMI-OBJS LSX-OBJS LASX-OBJS OBJS SLIBOBJS SHLIBOBJS \
|
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||||
STLIBOBJS HOSTOBJS TESTOBJS
|
|
||||||
|
|
||||||
define RESET
|
define RESET
|
||||||
$(1) :=
|
$(1) :=
|
||||||
|
2
OAT.xml
2
OAT.xml
@ -75,8 +75,6 @@ Note:If the text contains special characters, please escape them according to th
|
|||||||
<policyitem type="compatibility" name="GPL" path="libavfilter/vsrc_mptestsrc.c" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
<policyitem type="compatibility" name="GPL" path="libavfilter/vsrc_mptestsrc.c" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
||||||
<policyitem type="compatibility" name="GPL" path="libpostproc/.*" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
<policyitem type="compatibility" name="GPL" path="libpostproc/.*" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
||||||
<policyitem type="compatibility" name="GPL" path="libswresample/tests/swresample.c" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
<policyitem type="compatibility" name="GPL" path="libswresample/tests/swresample.c" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
||||||
<policyitem type="compatibility" name="GPL" path="libavutil/macos_kperf.c" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
|
||||||
<policyitem type="compatibility" name="GPL" path="libavutil/macos_kperf.h" rule="may" group="defaultGroup" desc="The files involved in compilation are the GPL protocol.They are not used."/>
|
|
||||||
</policy>
|
</policy>
|
||||||
</policylist>
|
</policylist>
|
||||||
<filefilterlist>
|
<filefilterlist>
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
"Name": "FFmpeg",
|
"Name": "FFmpeg",
|
||||||
"License": "LGPL V2.1/LGPL V3.0/GPL V2.0/GPL V3.0",
|
"License": "LGPL V2.1/LGPL V3.0/GPL V2.0/GPL V3.0",
|
||||||
"License File": "COPYING.LGPLv2.1/COPYING.LGPLv3/COPYING.GPLv2/COPYING.GPLv3",
|
"License File": "COPYING.LGPLv2.1/COPYING.LGPLv3/COPYING.GPLv2/COPYING.GPLv3",
|
||||||
"Version Number": "5.0.2",
|
"Version Number": "4.4.1",
|
||||||
"Upstream URL": "http://www.ffmpeg.org/",
|
"Upstream URL": "http://www.ffmpeg.org/",
|
||||||
"Description": "FFmpeg is the leading multimedia framework, able to decode, encode, transcode, mux, demux, stream, filter and play pretty much anything that humans and machines have created."
|
"Description": "FFmpeg is the leading multimedia framework, able to decode, encode, transcode, mux, demux, stream, filter and play pretty much anything that humans and machines have created."
|
||||||
}
|
}
|
||||||
]
|
]
|
@ -9,7 +9,7 @@ such as audio, video, subtitles and related metadata.
|
|||||||
* `libavcodec` provides implementation of a wider range of codecs.
|
* `libavcodec` provides implementation of a wider range of codecs.
|
||||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||||
* `libavfilter` provides means to alter decoded audio and video through a directed graph of connected filters.
|
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||||
* `libswresample` implements audio mixing and resampling routines.
|
* `libswresample` implements audio mixing and resampling routines.
|
||||||
* `libswscale` implements color conversion and scaling routines.
|
* `libswscale` implements color conversion and scaling routines.
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
┌────────────────────────────────────────┐
|
┌────────────────────────────────────┐
|
||||||
│ RELEASE NOTES for FFmpeg 5.0 "Lorentz" │
|
│ RELEASE NOTES for FFmpeg 4.4 "Rao" │
|
||||||
└────────────────────────────────────────┘
|
└────────────────────────────────────┘
|
||||||
|
|
||||||
The FFmpeg Project proudly presents FFmpeg 5.0 "Lorentz", about 9
|
The FFmpeg Project proudly presents FFmpeg 4.4 "Rao", about 10
|
||||||
months after the release of FFmpeg 4.4.
|
months after the release of FFmpeg 4.3.
|
||||||
|
|
||||||
A complete Changelog is available at the root of the project, and the
|
A complete Changelog is available at the root of the project, and the
|
||||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||||
|
@ -96,7 +96,7 @@ do { \
|
|||||||
atomic_load(object)
|
atomic_load(object)
|
||||||
|
|
||||||
#define atomic_exchange(object, desired) \
|
#define atomic_exchange(object, desired) \
|
||||||
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
|
InterlockedExchangePointer(object, desired);
|
||||||
|
|
||||||
#define atomic_exchange_explicit(object, desired, order) \
|
#define atomic_exchange_explicit(object, desired, order) \
|
||||||
atomic_exchange(object, desired)
|
atomic_exchange(object, desired)
|
||||||
|
@ -184,6 +184,5 @@ static inline __device__ double fabs(double a) { return __builtin_fabs(a); }
|
|||||||
|
|
||||||
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
||||||
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
||||||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
|
||||||
|
|
||||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||||
|
@ -59,7 +59,7 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
|||||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||||
* is one less than the actual buffer, and zero it before calling
|
* is one less than the actual buffer, and zero it before calling
|
||||||
* _snprintf/_vsnprintf() to workaround this problem.
|
* _snprintf/_vsnprintf() to workaround this problem.
|
||||||
* See https://web.archive.org/web/20151214111935/http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||||
memset(s, 0, n);
|
memset(s, 0, n);
|
||||||
va_copy(ap_copy, ap);
|
va_copy(ap_copy, ap);
|
||||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||||
|
@ -42,7 +42,7 @@ static inline HMODULE win32_dlopen(const char *name)
|
|||||||
DWORD pathlen;
|
DWORD pathlen;
|
||||||
if (utf8towchar(name, &name_w))
|
if (utf8towchar(name, &name_w))
|
||||||
goto exit;
|
goto exit;
|
||||||
path = (wchar_t *)av_calloc(MAX_PATH, sizeof(wchar_t));
|
path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t));
|
||||||
// Try local directory first
|
// Try local directory first
|
||||||
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
||||||
pathlen = wcsrchr(path, '\\') - path;
|
pathlen = wcsrchr(path, '\\') - path;
|
||||||
|
211
doc/APIchanges
211
doc/APIchanges
@ -2,212 +2,19 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
|
|||||||
since the last major version increase or the API was added.
|
since the last major version increase or the API was added.
|
||||||
|
|
||||||
The last version increases were:
|
The last version increases were:
|
||||||
libavcodec: 2021-04-27
|
libavcodec: 2017-10-21
|
||||||
libavdevice: 2021-04-27
|
libavdevice: 2017-10-21
|
||||||
libavfilter: 2021-04-27
|
libavfilter: 2017-10-21
|
||||||
libavformat: 2021-04-27
|
libavformat: 2017-10-21
|
||||||
libpostproc: 2021-04-27
|
libavresample: 2017-10-21
|
||||||
libswresample: 2021-04-27
|
libpostproc: 2017-10-21
|
||||||
libswscale: 2021-04-27
|
libswresample: 2017-10-21
|
||||||
libavutil: 2021-04-27
|
libswscale: 2017-10-21
|
||||||
|
libavutil: 2017-10-21
|
||||||
|
|
||||||
|
|
||||||
API changes, most recent first:
|
API changes, most recent first:
|
||||||
|
|
||||||
2022-01-04 - 78dc21b123e - lavu 57.16.100 - frame.h
|
|
||||||
Add AV_FRAME_DATA_DOVI_METADATA.
|
|
||||||
|
|
||||||
2022-01-03 - 70f318e6b6c - lavf 59.13.100 - avformat.h
|
|
||||||
Add AVFMT_EXPERIMENTAL flag.
|
|
||||||
|
|
||||||
2021-12-22 - b7e1ec7bda9 - lavu 57.13.100 - hwcontext_videotoolbox.h
|
|
||||||
Add av_vt_pixbuf_set_attachments
|
|
||||||
|
|
||||||
2021-12-22 - 69bd95dcd8d - lavu 57.13.100 - hwcontext_videotoolbox.h
|
|
||||||
Add av_map_videotoolbox_chroma_loc_from_av
|
|
||||||
Add av_map_videotoolbox_color_matrix_from_av
|
|
||||||
Add av_map_videotoolbox_color_primaries_from_av
|
|
||||||
Add av_map_videotoolbox_color_trc_from_av
|
|
||||||
|
|
||||||
2021-12-21 - ffbab99f2c2 - lavu 57.12.100 - cpu.h
|
|
||||||
Add AV_CPU_FLAG_SLOW_GATHER.
|
|
||||||
|
|
||||||
2021-12-20 - 278068dc60d - lavu 57.11.101 - display.h
|
|
||||||
Modified the documentation of av_display_rotation_set()
|
|
||||||
to match its longstanding actual behaviour of treating
|
|
||||||
the angle as directed clockwise.
|
|
||||||
|
|
||||||
2021-12-12 - 64834bb86a1 - lavf 59.10.100 - avformat.h
|
|
||||||
Add AVFormatContext io_close2 which returns an int
|
|
||||||
|
|
||||||
2021-12-10 - f45cbb775e4 - lavu 57.11.100 - hwcontext_vulkan.h
|
|
||||||
Add AVVkFrame.offset and AVVulkanFramesContext.flags.
|
|
||||||
|
|
||||||
2021-12-04 - b9c928a486f - lavfi 8.19.100 - avfilter.h
|
|
||||||
Add AVFILTER_FLAG_METADATA_ONLY.
|
|
||||||
|
|
||||||
2021-12-03 - b236ef0a594 - lavu 57.10.100 - frame.h
|
|
||||||
Add AVFrame.time_base
|
|
||||||
|
|
||||||
2021-11-22 - b2cd1fb2ec6 - lavu 57.9.100 - pixfmt.h
|
|
||||||
Add AV_PIX_FMT_P210, AV_PIX_FMT_P410, AV_PIX_FMT_P216, and AV_PIX_FMT_P416.
|
|
||||||
|
|
||||||
2021-11-17 - 54e65aa38ab - lavf 57.9.100 - frame.h
|
|
||||||
Add AV_FRAME_DATA_DOVI_RPU_BUFFER.
|
|
||||||
|
|
||||||
2021-11-16 - ed75a08d36c - lavf 59.9.100 - avformat.h
|
|
||||||
Add av_stream_get_class(). Schedule adding AVStream.av_class at libavformat
|
|
||||||
major version 60.
|
|
||||||
Add av_disposition_to_string() and av_disposition_from_string().
|
|
||||||
Add "disposition" AVOption to AVStream's class.
|
|
||||||
|
|
||||||
2021-11-12 - 8478d60d5b5 - lavu 57.8.100 - hwcontext_vulkan.h
|
|
||||||
Added AVVkFrame.sem_value, AVVulkanDeviceContext.queue_family_encode_index,
|
|
||||||
nb_encode_queues, queue_family_decode_index, and nb_decode_queues.
|
|
||||||
|
|
||||||
2021-10-18 - 682bafdb125 - lavf 59.8.100 - avio.h
|
|
||||||
Introduce public bytes_{read,written} statistic fields to AVIOContext.
|
|
||||||
|
|
||||||
2021-10-13 - a5622ed16f8 - lavf 59.7.100 - avio.h
|
|
||||||
Deprecate AVIOContext.written. Originally added as a private entry in
|
|
||||||
commit 3f75e5116b900f1428aa13041fc7d6301bf1988a, its grouping with
|
|
||||||
the comment noting its private state was missed during merging of the field
|
|
||||||
from Libav (most likely due to an already existing field in between).
|
|
||||||
|
|
||||||
2021-09-21 - 0760d9153c3 - lavu 57.7.100 - pixfmt.h
|
|
||||||
Add AV_PIX_FMT_X2BGR10.
|
|
||||||
|
|
||||||
2021-09-20 - 8d5de914d31 - lavu 57.6.100 - mem.h
|
|
||||||
Deprecate av_mallocz_array() as it is identical to av_calloc().
|
|
||||||
|
|
||||||
2021-09-20 - 176b8d785bf - lavc 59.9.100 - avcodec.h
|
|
||||||
Deprecate AVCodecContext.sub_text_format and the corresponding
|
|
||||||
AVOptions. It is unused since the last major bump.
|
|
||||||
|
|
||||||
2021-09-20 - dd846bc4a91 - lavc 59.8.100 - avcodec.h codec.h
|
|
||||||
Deprecate AV_CODEC_FLAG_TRUNCATED and AV_CODEC_CAP_TRUNCATED,
|
|
||||||
as they are redundant with parsers.
|
|
||||||
|
|
||||||
2021-09-17 - ccfdef79b13 - lavu 57.5.101 - buffer.h
|
|
||||||
Constified the input parameters in av_buffer_replace(), av_buffer_ref(),
|
|
||||||
and av_buffer_pool_buffer_get_opaque().
|
|
||||||
|
|
||||||
2021-09-08 - 4f78711f9c2 - lavu 57.5.100 - hwcontext_d3d11va.h
|
|
||||||
Add AVD3D11VAFramesContext.texture_infos
|
|
||||||
|
|
||||||
2021-09-06 - 42cd64c1826 - lsws 6.1.100 - swscale.h
|
|
||||||
Add AVFrame-based scaling API:
|
|
||||||
- sws_scale_frame()
|
|
||||||
- sws_frame_start()
|
|
||||||
- sws_frame_end()
|
|
||||||
- sws_send_slice()
|
|
||||||
- sws_receive_slice()
|
|
||||||
- sws_receive_slice_alignment()
|
|
||||||
|
|
||||||
2021-09-02 - cbf111059d2 - lavc 59.7.100 - avcodec.h
|
|
||||||
Incremented the number of elements of AVCodecParser.codec_ids to seven.
|
|
||||||
|
|
||||||
2021-08-24 - 590a7e02f04 - lavc 59.6.100 - avcodec.h
|
|
||||||
Add FF_CODEC_PROPERTY_FILM_GRAIN
|
|
||||||
|
|
||||||
2021-08-20 - 7c5f998196d - lavfi 8.3.100 - avfilter.H
|
|
||||||
Add avfilter_filter_pad_count() as a replacement for avfilter_pad_count().
|
|
||||||
Deprecate avfilter_pad_count().
|
|
||||||
|
|
||||||
2021-08-17 - 8c53b145993 - lavu 57.4.101 - opt.h
|
|
||||||
av_opt_copy() now guarantees that allocated src and dst options
|
|
||||||
don't alias each other even on error.
|
|
||||||
|
|
||||||
2021-08-14 - d5de9965ef6 - lavu 57.4.100 - imgutils.h
|
|
||||||
Add av_image_copy_plane_uc_from()
|
|
||||||
|
|
||||||
2021-08-02 - a1a0fddfd05 - lavc 59.4.100 - packet.h
|
|
||||||
Add AVPacket.opaque, AVPacket.opaque_ref, AVPacket.time_base.
|
|
||||||
|
|
||||||
2021-07-23 - 2dd8acbe800 - lavu 57.3.100 - common.h macros.h
|
|
||||||
Move several macros (AV_NE, FFDIFFSIGN, FFMAX, FFMAX3, FFMIN, FFMIN3,
|
|
||||||
FFSWAP, FF_ARRAY_ELEMS, MKTAG, MKBETAG) from common.h to macros.h.
|
|
||||||
|
|
||||||
2021-07-22 - e3b5ff17c2e - lavu 57.2.100 - film_grain_params.h
|
|
||||||
Add AV_FILM_GRAIN_PARAMS_H274, AVFilmGrainH274Params
|
|
||||||
|
|
||||||
2021-07-19 - c1bf56a526f - lavu 57.1.100 - cpu.h
|
|
||||||
Add av_cpu_force_count()
|
|
||||||
|
|
||||||
2021-06-17 - aca923b3653 - lavc 59.2.100 - packet.h
|
|
||||||
Add AV_PKT_DATA_DYNAMIC_HDR10_PLUS
|
|
||||||
|
|
||||||
2021-06-09 - 2cccab96f6f - lavf 59.3.100 - avformat.h
|
|
||||||
Add pts_wrap_bits to AVStream
|
|
||||||
|
|
||||||
2021-06-10 - 7c9763070d9 - lavc 59.1.100 - avcodec.h codec.h
|
|
||||||
Move av_get_profile_name() from avcodec.h to codec.h.
|
|
||||||
|
|
||||||
2021-06-10 - bb3648e6766 - lavc 59.1.100 - avcodec.h codec_par.h
|
|
||||||
Move av_get_audio_frame_duration2() from avcodec.h to codec_par.h.
|
|
||||||
|
|
||||||
2021-06-10 - 881db34f6a0 - lavc 59.1.100 - avcodec.h codec_id.h
|
|
||||||
Move av_get_bits_per_sample(), av_get_exact_bits_per_sample(),
|
|
||||||
avcodec_profile_name(), and av_get_pcm_codec() from avcodec.h
|
|
||||||
to codec_id.h.
|
|
||||||
|
|
||||||
2021-06-10 - ff0a96046d8 - lavc 59.1.100 - avcodec.h defs.h
|
|
||||||
Add new installed header defs.h. The following definitions are moved
|
|
||||||
into it from avcodec.h:
|
|
||||||
- AVDiscard
|
|
||||||
- AVAudioServiceType
|
|
||||||
- AVPanScan
|
|
||||||
- AVCPBProperties and av_cpb_properties_alloc()
|
|
||||||
- AVProducerReferenceTime
|
|
||||||
- av_xiphlacing()
|
|
||||||
|
|
||||||
2021-04-27 - cb3ac722f4 - lavc 59.0.100 - avcodec.h
|
|
||||||
Constified AVCodecParserContext.parser.
|
|
||||||
|
|
||||||
2021-04-27 - 8b3e6ce5f4 - lavd 59.0.100 - avdevice.h
|
|
||||||
The av_*_device_next API functions now accept and return
|
|
||||||
pointers to const AVInputFormat resp. AVOutputFormat.
|
|
||||||
|
|
||||||
2021-04-27 - d7e0d428fa - lavd 59.0.100 - avdevice.h
|
|
||||||
avdevice_list_input_sources and avdevice_list_output_sinks now accept
|
|
||||||
pointers to const AVInputFormat resp. const AVOutputFormat.
|
|
||||||
|
|
||||||
2021-04-27 - 46dac8cf3d - lavf 59.0.100 - avformat.h
|
|
||||||
av_find_best_stream now uses a const AVCodec ** parameter
|
|
||||||
for the returned decoder.
|
|
||||||
|
|
||||||
2021-04-27 - 626535f6a1 - lavc 59.0.100 - codec.h
|
|
||||||
avcodec_find_encoder_by_name(), avcodec_find_encoder(),
|
|
||||||
avcodec_find_decoder_by_name() and avcodec_find_decoder()
|
|
||||||
now return a pointer to const AVCodec.
|
|
||||||
|
|
||||||
2021-04-27 - 14fa0a4efb - lavf 59.0.100 - avformat.h
|
|
||||||
Constified AVFormatContext.*_codec.
|
|
||||||
|
|
||||||
2021-04-27 - 56450a0ee4 - lavf 59.0.100 - avformat.h
|
|
||||||
Constified the pointers to AVInputFormats and AVOutputFormats
|
|
||||||
in AVFormatContext, avformat_alloc_output_context2(),
|
|
||||||
av_find_input_format(), av_probe_input_format(),
|
|
||||||
av_probe_input_format2(), av_probe_input_format3(),
|
|
||||||
av_probe_input_buffer2(), av_probe_input_buffer(),
|
|
||||||
avformat_open_input(), av_guess_format() and av_guess_codec().
|
|
||||||
Furthermore, constified the AVProbeData in av_probe_input_format(),
|
|
||||||
av_probe_input_format2() and av_probe_input_format3().
|
|
||||||
|
|
||||||
2021-04-19 - 18af1ea8d1 - lavu 56.74.100 - tx.h
|
|
||||||
Add AV_TX_FULL_IMDCT and AV_TX_UNALIGNED.
|
|
||||||
|
|
||||||
2021-04-17 - f1bf465aa0 - lavu 56.73.100 - frame.h detection_bbox.h
|
|
||||||
Add AV_FRAME_DATA_DETECTION_BBOXES
|
|
||||||
|
|
||||||
2021-04-06 - 557953a397 - lavf 58.78.100 - avformat.h
|
|
||||||
Add avformat_index_get_entries_count(), avformat_index_get_entry(),
|
|
||||||
and avformat_index_get_entry_from_timestamp().
|
|
||||||
|
|
||||||
2021-03-21 - a77beea6c8 - lavu 56.72.100 - frame.h
|
|
||||||
Deprecated av_get_colorspace_name().
|
|
||||||
Use av_color_space_name() instead.
|
|
||||||
|
|
||||||
-------- 8< --------- FFmpeg 4.4 was cut here -------- 8< ---------
|
-------- 8< --------- FFmpeg 4.4 was cut here -------- 8< ---------
|
||||||
|
|
||||||
2021-03-19 - e8c0bca6bd - lavu 56.69.100 - adler32.h
|
2021-03-19 - e8c0bca6bd - lavu 56.69.100 - adler32.h
|
||||||
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# could be handy for archiving the generated documentation or if some version
|
# could be handy for archiving the generated documentation or if some version
|
||||||
# control system is used.
|
# control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 5.0.2
|
PROJECT_NUMBER = 4.4.1
|
||||||
|
|
||||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||||
# for a project that appears at the top of each page and should give viewer a
|
# for a project that appears at the top of each page and should give viewer a
|
||||||
|
@ -102,7 +102,7 @@ DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
|||||||
|
|
||||||
doc/doxy/html: TAG = DOXY
|
doc/doxy/html: TAG = DOXY
|
||||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
|
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
|
||||||
|
|
||||||
install-doc: install-html install-man
|
install-doc: install-html install-man
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ Top-left position.
|
|||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item tick_rate
|
@item tick_rate
|
||||||
Set the tick rate (@emph{time_scale / num_units_in_display_tick}) in
|
Set the tick rate (@emph{num_units_in_display_tick / time_scale}) in
|
||||||
the timing info in the sequence header.
|
the timing info in the sequence header.
|
||||||
@item num_ticks_per_picture
|
@item num_ticks_per_picture
|
||||||
Set the number of ticks in each picture, to indicate that the stream
|
Set the number of ticks in each picture, to indicate that the stream
|
||||||
@ -244,7 +244,7 @@ Set the chroma sample location in the stream (see H.264 section
|
|||||||
E.2.1 and figure E-1).
|
E.2.1 and figure E-1).
|
||||||
|
|
||||||
@item tick_rate
|
@item tick_rate
|
||||||
Set the tick rate (time_scale / num_units_in_tick) in the VUI
|
Set the tick rate (num_units_in_tick / time_scale) in the VUI
|
||||||
parameters. This is the smallest time unit representable in the
|
parameters. This is the smallest time unit representable in the
|
||||||
stream, and in many cases represents the field rate of the stream
|
stream, and in many cases represents the field rate of the stream
|
||||||
(double the frame rate).
|
(double the frame rate).
|
||||||
@ -253,11 +253,6 @@ Set whether the stream has fixed framerate - typically this indicates
|
|||||||
that the framerate is exactly half the tick rate, but the exact
|
that the framerate is exactly half the tick rate, but the exact
|
||||||
meaning is dependent on interlacing and the picture structure (see
|
meaning is dependent on interlacing and the picture structure (see
|
||||||
H.264 section E.2.1 and table E-6).
|
H.264 section E.2.1 and table E-6).
|
||||||
@item zero_new_constraint_set_flags
|
|
||||||
Zero constraint_set4_flag and constraint_set5_flag in the SPS. These
|
|
||||||
bits were reserved in a previous version of the H.264 spec, and thus
|
|
||||||
some hardware decoders require these to be zero. The result of zeroing
|
|
||||||
this is still a valid bitstream.
|
|
||||||
|
|
||||||
@item crop_left
|
@item crop_left
|
||||||
@item crop_right
|
@item crop_right
|
||||||
@ -352,8 +347,8 @@ Set the chroma sample location in the stream (see H.265 section
|
|||||||
E.3.1 and figure E.1).
|
E.3.1 and figure E.1).
|
||||||
|
|
||||||
@item tick_rate
|
@item tick_rate
|
||||||
Set the tick rate in the VPS and VUI parameters (time_scale /
|
Set the tick rate in the VPS and VUI parameters (num_units_in_tick /
|
||||||
num_units_in_tick). Combined with @option{num_ticks_poc_diff_one}, this can
|
time_scale). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||||
set a constant framerate in the stream. Note that it is likely to be
|
set a constant framerate in the stream. Note that it is likely to be
|
||||||
overridden by container parameters when the stream is in a container.
|
overridden by container parameters when the stream is in a container.
|
||||||
|
|
||||||
@ -534,67 +529,20 @@ container. Can be used for fuzzing or testing error resilience/concealment.
|
|||||||
Parameters:
|
Parameters:
|
||||||
@table @option
|
@table @option
|
||||||
@item amount
|
@item amount
|
||||||
Accepts an expression whose evaluation per-packet determines how often bytes in that
|
A numeral string, whose value is related to how often output bytes will
|
||||||
packet will be modified. A value below 0 will result in a variable frequency.
|
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||||
Default is 0 which results in no modification. However, if neither amount nor drop is specified,
|
the lower the more frequent bytes will be modified, with 1 meaning
|
||||||
amount will be set to @var{-1}. See below for accepted variables.
|
every byte is modified.
|
||||||
@item drop
|
|
||||||
Accepts an expression evaluated per-packet whose value determines whether that packet is dropped.
|
|
||||||
Evaluation to a positive value results in the packet being dropped. Evaluation to a negative
|
|
||||||
value results in a variable chance of it being dropped, roughly inverse in proportion to the magnitude
|
|
||||||
of the value. Default is 0 which results in no drops. See below for accepted variables.
|
|
||||||
@item dropamount
|
@item dropamount
|
||||||
Accepts a non-negative integer, which assigns a variable chance of it being dropped, roughly inverse
|
A numeral string, whose value is related to how often packets will be dropped.
|
||||||
in proportion to the value. Default is 0 which results in no drops. This option is kept for backwards
|
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||||
compatibility and is equivalent to setting drop to a negative value with the same magnitude
|
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||||
i.e. @code{dropamount=4} is the same as @code{drop=-4}. Ignored if drop is also specified.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Both @code{amount} and @code{drop} accept expressions containing the following variables:
|
The following example applies the modification to every byte but does not drop
|
||||||
|
any packets.
|
||||||
@table @samp
|
|
||||||
@item n
|
|
||||||
The index of the packet, starting from zero.
|
|
||||||
@item tb
|
|
||||||
The timebase for packet timestamps.
|
|
||||||
@item pts
|
|
||||||
Packet presentation timestamp.
|
|
||||||
@item dts
|
|
||||||
Packet decoding timestamp.
|
|
||||||
@item nopts
|
|
||||||
Constant representing AV_NOPTS_VALUE.
|
|
||||||
@item startpts
|
|
||||||
First non-AV_NOPTS_VALUE PTS seen in the stream.
|
|
||||||
@item startdts
|
|
||||||
First non-AV_NOPTS_VALUE DTS seen in the stream.
|
|
||||||
@item duration
|
|
||||||
@itemx d
|
|
||||||
Packet duration, in timebase units.
|
|
||||||
@item pos
|
|
||||||
Packet position in input; may be -1 when unknown or not set.
|
|
||||||
@item size
|
|
||||||
Packet size, in bytes.
|
|
||||||
@item key
|
|
||||||
Whether packet is marked as a keyframe.
|
|
||||||
@item state
|
|
||||||
A pseudo random integer, primarily derived from the content of packet payload.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Examples
|
|
||||||
Apply modification to every byte but don't drop any packets.
|
|
||||||
@example
|
@example
|
||||||
ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||||
@end example
|
|
||||||
|
|
||||||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
|
||||||
modify any of the remaining packets.
|
|
||||||
@example
|
|
||||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
|
|
||||||
@end example
|
|
||||||
|
|
||||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
|
||||||
@example
|
|
||||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
|
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@section null
|
@section null
|
||||||
@ -782,9 +730,6 @@ The timebase of stream packet belongs.
|
|||||||
|
|
||||||
@item SR
|
@item SR
|
||||||
The sample rate of stream packet belongs.
|
The sample rate of stream packet belongs.
|
||||||
|
|
||||||
@item NOPTS
|
|
||||||
The AV_NOPTS_VALUE constant.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{text2movsub}
|
@anchor{text2movsub}
|
||||||
|
123
doc/codecs.texi
123
doc/codecs.texi
@ -144,6 +144,21 @@ Default value is 0.
|
|||||||
@item b_qfactor @var{float} (@emph{encoding,video})
|
@item b_qfactor @var{float} (@emph{encoding,video})
|
||||||
Set qp factor between P and B frames.
|
Set qp factor between P and B frames.
|
||||||
|
|
||||||
|
@item b_strategy @var{integer} (@emph{encoding,video})
|
||||||
|
Set strategy to choose between I/P/B-frames.
|
||||||
|
|
||||||
|
@item ps @var{integer} (@emph{encoding,video})
|
||||||
|
Set RTP payload size in bytes.
|
||||||
|
|
||||||
|
@item mv_bits @var{integer}
|
||||||
|
@item header_bits @var{integer}
|
||||||
|
@item i_tex_bits @var{integer}
|
||||||
|
@item p_tex_bits @var{integer}
|
||||||
|
@item i_count @var{integer}
|
||||||
|
@item p_count @var{integer}
|
||||||
|
@item skip_count @var{integer}
|
||||||
|
@item misc_bits @var{integer}
|
||||||
|
@item frame_bits @var{integer}
|
||||||
@item codec_tag @var{integer}
|
@item codec_tag @var{integer}
|
||||||
@item bug @var{flags} (@emph{decoding,video})
|
@item bug @var{flags} (@emph{decoding,video})
|
||||||
Workaround not auto detected encoder bugs.
|
Workaround not auto detected encoder bugs.
|
||||||
@ -233,6 +248,9 @@ consider things that a sane encoder should not do as an error
|
|||||||
|
|
||||||
@item block_align @var{integer}
|
@item block_align @var{integer}
|
||||||
|
|
||||||
|
@item mpeg_quant @var{integer} (@emph{encoding,video})
|
||||||
|
Use MPEG quantizers instead of H.263.
|
||||||
|
|
||||||
@item rc_override_count @var{integer}
|
@item rc_override_count @var{integer}
|
||||||
|
|
||||||
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
||||||
@ -338,6 +356,19 @@ favor predicting from the previous frame instead of the current
|
|||||||
|
|
||||||
@item bits_per_coded_sample @var{integer}
|
@item bits_per_coded_sample @var{integer}
|
||||||
|
|
||||||
|
@item pred @var{integer} (@emph{encoding,video})
|
||||||
|
Set prediction method.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
@table @samp
|
||||||
|
@item left
|
||||||
|
|
||||||
|
@item plane
|
||||||
|
|
||||||
|
@item median
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@item aspect @var{rational number} (@emph{encoding,video})
|
@item aspect @var{rational number} (@emph{encoding,video})
|
||||||
Set sample aspect ratio.
|
Set sample aspect ratio.
|
||||||
|
|
||||||
@ -554,6 +585,9 @@ sab diamond motion estimation
|
|||||||
@item last_pred @var{integer} (@emph{encoding,video})
|
@item last_pred @var{integer} (@emph{encoding,video})
|
||||||
Set amount of motion predictors from the previous frame.
|
Set amount of motion predictors from the previous frame.
|
||||||
|
|
||||||
|
@item preme @var{integer} (@emph{encoding,video})
|
||||||
|
Set pre motion estimation.
|
||||||
|
|
||||||
@item precmp @var{integer} (@emph{encoding,video})
|
@item precmp @var{integer} (@emph{encoding,video})
|
||||||
Set pre motion estimation compare function.
|
Set pre motion estimation compare function.
|
||||||
|
|
||||||
@ -602,6 +636,23 @@ Set limit motion vectors range (1023 for DivX player).
|
|||||||
|
|
||||||
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
||||||
|
|
||||||
|
@item coder @var{integer} (@emph{encoding,video})
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
@table @samp
|
||||||
|
@item vlc
|
||||||
|
variable length coder / huffman coder
|
||||||
|
@item ac
|
||||||
|
arithmetic coder
|
||||||
|
@item raw
|
||||||
|
raw (no encoding)
|
||||||
|
@item rle
|
||||||
|
run-length coder
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@item context @var{integer} (@emph{encoding,video})
|
||||||
|
Set context model.
|
||||||
|
|
||||||
@item slice_flags @var{integer}
|
@item slice_flags @var{integer}
|
||||||
|
|
||||||
@item mbd @var{integer} (@emph{encoding,video})
|
@item mbd @var{integer} (@emph{encoding,video})
|
||||||
@ -617,6 +668,12 @@ use fewest bits
|
|||||||
use best rate distortion
|
use best rate distortion
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@item sc_threshold @var{integer} (@emph{encoding,video})
|
||||||
|
Set scene change threshold.
|
||||||
|
|
||||||
|
@item nr @var{integer} (@emph{encoding,video})
|
||||||
|
Set noise reduction.
|
||||||
|
|
||||||
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
||||||
Set number of bits which should be loaded into the rc buffer before
|
Set number of bits which should be loaded into the rc buffer before
|
||||||
decoding starts.
|
decoding starts.
|
||||||
@ -704,12 +761,64 @@ Possible values:
|
|||||||
@item lowres @var{integer} (@emph{decoding,audio,video})
|
@item lowres @var{integer} (@emph{decoding,audio,video})
|
||||||
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
||||||
|
|
||||||
|
@item skip_threshold @var{integer} (@emph{encoding,video})
|
||||||
|
Set frame skip threshold.
|
||||||
|
|
||||||
|
@item skip_factor @var{integer} (@emph{encoding,video})
|
||||||
|
Set frame skip factor.
|
||||||
|
|
||||||
|
@item skip_exp @var{integer} (@emph{encoding,video})
|
||||||
|
Set frame skip exponent.
|
||||||
|
Negative values behave identical to the corresponding positive ones, except
|
||||||
|
that the score is normalized.
|
||||||
|
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||||
|
|
||||||
|
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||||
|
Set frame skip compare function.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
@table @samp
|
||||||
|
@item sad
|
||||||
|
sum of absolute differences, fast (default)
|
||||||
|
@item sse
|
||||||
|
sum of squared errors
|
||||||
|
@item satd
|
||||||
|
sum of absolute Hadamard transformed differences
|
||||||
|
@item dct
|
||||||
|
sum of absolute DCT transformed differences
|
||||||
|
@item psnr
|
||||||
|
sum of squared quantization errors (avoid, low quality)
|
||||||
|
@item bit
|
||||||
|
number of bits needed for the block
|
||||||
|
@item rd
|
||||||
|
rate distortion optimal, slow
|
||||||
|
@item zero
|
||||||
|
0
|
||||||
|
@item vsad
|
||||||
|
sum of absolute vertical differences
|
||||||
|
@item vsse
|
||||||
|
sum of squared vertical differences
|
||||||
|
@item nsse
|
||||||
|
noise preserving sum of squared differences
|
||||||
|
@item w53
|
||||||
|
5/3 wavelet, only used in snow
|
||||||
|
@item w97
|
||||||
|
9/7 wavelet, only used in snow
|
||||||
|
@item dctmax
|
||||||
|
|
||||||
|
@item chroma
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
@item mblmin @var{integer} (@emph{encoding,video})
|
@item mblmin @var{integer} (@emph{encoding,video})
|
||||||
Set min macroblock lagrange factor (VBR).
|
Set min macroblock lagrange factor (VBR).
|
||||||
|
|
||||||
@item mblmax @var{integer} (@emph{encoding,video})
|
@item mblmax @var{integer} (@emph{encoding,video})
|
||||||
Set max macroblock lagrange factor (VBR).
|
Set max macroblock lagrange factor (VBR).
|
||||||
|
|
||||||
|
@item mepc @var{integer} (@emph{encoding,video})
|
||||||
|
Set motion estimation bitrate penalty compensation (1.0 = 256).
|
||||||
|
|
||||||
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
||||||
@item skip_idct @var{integer} (@emph{decoding,video})
|
@item skip_idct @var{integer} (@emph{decoding,video})
|
||||||
@item skip_frame @var{integer} (@emph{decoding,video})
|
@item skip_frame @var{integer} (@emph{decoding,video})
|
||||||
@ -749,17 +858,31 @@ Default value is @samp{default}.
|
|||||||
@item bidir_refine @var{integer} (@emph{encoding,video})
|
@item bidir_refine @var{integer} (@emph{encoding,video})
|
||||||
Refine the two motion vectors used in bidirectional macroblocks.
|
Refine the two motion vectors used in bidirectional macroblocks.
|
||||||
|
|
||||||
|
@item brd_scale @var{integer} (@emph{encoding,video})
|
||||||
|
Downscale frames for dynamic B-frame decision.
|
||||||
|
|
||||||
@item keyint_min @var{integer} (@emph{encoding,video})
|
@item keyint_min @var{integer} (@emph{encoding,video})
|
||||||
Set minimum interval between IDR-frames.
|
Set minimum interval between IDR-frames.
|
||||||
|
|
||||||
@item refs @var{integer} (@emph{encoding,video})
|
@item refs @var{integer} (@emph{encoding,video})
|
||||||
Set reference frames to consider for motion compensation.
|
Set reference frames to consider for motion compensation.
|
||||||
|
|
||||||
|
@item chromaoffset @var{integer} (@emph{encoding,video})
|
||||||
|
Set chroma qp offset from luma.
|
||||||
|
|
||||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||||
Set rate-distortion optimal quantization.
|
Set rate-distortion optimal quantization.
|
||||||
|
|
||||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||||
|
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||||
|
Adjust sensitivity of b_frame_strategy 1.
|
||||||
|
|
||||||
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
||||||
|
@item min_prediction_order @var{integer} (@emph{encoding,audio})
|
||||||
|
@item max_prediction_order @var{integer} (@emph{encoding,audio})
|
||||||
|
@item timecode_frame_start @var{integer} (@emph{encoding,video})
|
||||||
|
Set GOP timecode frame start number, in non drop frame format.
|
||||||
|
|
||||||
@item bits_per_raw_sample @var{integer}
|
@item bits_per_raw_sample @var{integer}
|
||||||
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
||||||
|
|
||||||
|
@ -76,19 +76,13 @@ The following options are supported by the libdav1d wrapper.
|
|||||||
|
|
||||||
@item framethreads
|
@item framethreads
|
||||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
|
||||||
global option @code{threads} instead.
|
|
||||||
|
|
||||||
@item tilethreads
|
@item tilethreads
|
||||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
|
||||||
global option @code{threads} instead.
|
|
||||||
|
|
||||||
@item filmgrain
|
@item filmgrain
|
||||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||||
internal default of the library.
|
internal default of the library.
|
||||||
This option is deprecated and will be removed in the future. See the global option
|
|
||||||
@code{export_side_data} to export Film Grain parameters instead of applying it.
|
|
||||||
|
|
||||||
@item oppoint
|
@item oppoint
|
||||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||||
@ -299,8 +293,6 @@ Enabled by default.
|
|||||||
@table @option
|
@table @option
|
||||||
@item compute_clut
|
@item compute_clut
|
||||||
@table @option
|
@table @option
|
||||||
@item -2
|
|
||||||
Compute clut once if no matching CLUT is in the stream.
|
|
||||||
@item -1
|
@item -1
|
||||||
Compute clut if no matching CLUT is in the stream.
|
Compute clut if no matching CLUT is in the stream.
|
||||||
@item 0
|
@item 0
|
||||||
|
@ -25,13 +25,6 @@ Audible Format 2, 3, and 4 demuxer.
|
|||||||
|
|
||||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||||
|
|
||||||
@section aac
|
|
||||||
|
|
||||||
Raw Audio Data Transport Stream AAC demuxer.
|
|
||||||
|
|
||||||
This demuxer is used to demux an ADTS input containing a single AAC stream
|
|
||||||
alongwith any ID3v1/2 or APE tags in it.
|
|
||||||
|
|
||||||
@section apng
|
@section apng
|
||||||
|
|
||||||
Animated Portable Network Graphics demuxer.
|
Animated Portable Network Graphics demuxer.
|
||||||
@ -44,15 +37,12 @@ between the last fcTL and IEND chunks.
|
|||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item -ignore_loop @var{bool}
|
@item -ignore_loop @var{bool}
|
||||||
Ignore the loop variable in the file if set. Default is enabled.
|
Ignore the loop variable in the file if set.
|
||||||
|
|
||||||
@item -max_fps @var{int}
|
@item -max_fps @var{int}
|
||||||
Maximum framerate in frames per second. Default of 0 imposes no limit.
|
Maximum framerate in frames per second (0 for no limit).
|
||||||
|
|
||||||
@item -default_fps @var{int}
|
@item -default_fps @var{int}
|
||||||
Default framerate in frames per second when none is specified in the file
|
Default framerate in frames per second when none is specified in the file
|
||||||
(0 meaning as fast as possible). Default is 15.
|
(0 meaning as fast as possible).
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section asf
|
@section asf
|
||||||
@ -103,7 +93,8 @@ backslash or single quotes.
|
|||||||
All subsequent file-related directives apply to that file.
|
All subsequent file-related directives apply to that file.
|
||||||
|
|
||||||
@item @code{ffconcat version 1.0}
|
@item @code{ffconcat version 1.0}
|
||||||
Identify the script type and version.
|
Identify the script type and version. It also sets the @option{safe} option
|
||||||
|
to 1 if it was -1.
|
||||||
|
|
||||||
To make FFmpeg recognize the format automatically, this directive must
|
To make FFmpeg recognize the format automatically, this directive must
|
||||||
appear exactly as is (no extra space or byte-order-mark) on the very first
|
appear exactly as is (no extra space or byte-order-mark) on the very first
|
||||||
@ -157,16 +148,6 @@ directive) will be reduced based on their specified Out point.
|
|||||||
Metadata of the packets of the file. The specified metadata will be set for
|
Metadata of the packets of the file. The specified metadata will be set for
|
||||||
each file packet. You can specify this directive multiple times to add multiple
|
each file packet. You can specify this directive multiple times to add multiple
|
||||||
metadata entries.
|
metadata entries.
|
||||||
This directive is deprecated, use @code{file_packet_meta} instead.
|
|
||||||
|
|
||||||
@item @code{file_packet_meta @var{key} @var{value}}
|
|
||||||
Metadata of the packets of the file. The specified metadata will be set for
|
|
||||||
each file packet. You can specify this directive multiple times to add multiple
|
|
||||||
metadata entries.
|
|
||||||
|
|
||||||
@item @code{option @var{key} @var{value}}
|
|
||||||
Option to access, open and probe the file.
|
|
||||||
Can be present multiple times.
|
|
||||||
|
|
||||||
@item @code{stream}
|
@item @code{stream}
|
||||||
Introduce a stream in the virtual file.
|
Introduce a stream in the virtual file.
|
||||||
@ -184,20 +165,6 @@ subfiles will be used.
|
|||||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||||
streams is not reliable.
|
streams is not reliable.
|
||||||
|
|
||||||
@item @code{stream_meta @var{key} @var{value}}
|
|
||||||
Metadata for the stream.
|
|
||||||
Can be present multiple times.
|
|
||||||
|
|
||||||
@item @code{stream_codec @var{value}}
|
|
||||||
Codec for the stream.
|
|
||||||
|
|
||||||
@item @code{stream_extradata @var{hex_string}}
|
|
||||||
Extradata for the string, encoded in hexadecimal.
|
|
||||||
|
|
||||||
@item @code{chapter @var{id} @var{start} @var{end}}
|
|
||||||
Add a chapter. @var{id} is an unique identifier, possibly small and
|
|
||||||
consecutive.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
@ -207,8 +174,7 @@ This demuxer accepts the following option:
|
|||||||
@table @option
|
@table @option
|
||||||
|
|
||||||
@item safe
|
@item safe
|
||||||
If set to 1, reject unsafe file paths and directives.
|
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||||
A file path is considered safe if it
|
|
||||||
does not contain a protocol specification and is relative and all components
|
does not contain a protocol specification and is relative and all components
|
||||||
only contain characters from the portable character set (letters, digits,
|
only contain characters from the portable character set (letters, digits,
|
||||||
period, underscore and hyphen) and have no period at the beginning of a
|
period, underscore and hyphen) and have no period at the beginning of a
|
||||||
@ -218,6 +184,9 @@ If set to 0, any file name is accepted.
|
|||||||
|
|
||||||
The default is 1.
|
The default is 1.
|
||||||
|
|
||||||
|
-1 is equivalent to 1 if the format was automatically
|
||||||
|
probed and 0 otherwise.
|
||||||
|
|
||||||
@item auto_convert
|
@item auto_convert
|
||||||
If set to 1, try to perform automatic conversions on packet data to make the
|
If set to 1, try to perform automatic conversions on packet data to make the
|
||||||
streams concatenable.
|
streams concatenable.
|
||||||
@ -274,18 +243,11 @@ which streams to actually receive.
|
|||||||
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
||||||
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
||||||
|
|
||||||
@section imf
|
@section flv, live_flv
|
||||||
|
|
||||||
Interoperable Master Format demuxer.
|
|
||||||
|
|
||||||
This demuxer presents audio and video streams found in an IMF Composition.
|
|
||||||
|
|
||||||
@section flv, live_flv, kux
|
|
||||||
|
|
||||||
Adobe Flash Video Format demuxer.
|
Adobe Flash Video Format demuxer.
|
||||||
|
|
||||||
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
||||||
KUX is a flv variant used on the Youku platform.
|
|
||||||
|
|
||||||
@example
|
@example
|
||||||
ffmpeg -f flv -i myfile.flv ...
|
ffmpeg -f flv -i myfile.flv ...
|
||||||
@ -384,9 +346,6 @@ Enabled by default for HTTP/1.1 servers.
|
|||||||
@item http_seekable
|
@item http_seekable
|
||||||
Use HTTP partial requests for downloading HTTP segments.
|
Use HTTP partial requests for downloading HTTP segments.
|
||||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||||
|
|
||||||
@item seg_format_options
|
|
||||||
Set options for the demuxer of media segments using a list of key=value pairs separated by @code{:}.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section image2
|
@section image2
|
||||||
@ -702,12 +661,6 @@ Set mfra timestamps as PTS
|
|||||||
Don't use mfra box to set timestamps
|
Don't use mfra box to set timestamps
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item use_tfdt
|
|
||||||
For fragmented input, set fragment's starting timestamp to @code{baseMediaDecodeTime} from the @code{tfdt} box.
|
|
||||||
Default is enabled, which will prefer to use the @code{tfdt} box to set DTS. Disable to use the @code{earliest_presentation_time} from the @code{sidx} box.
|
|
||||||
In either case, the timestamp from the @code{mfra} box will be used if it's available and @code{use_mfra_for} is
|
|
||||||
set to pts or dts.
|
|
||||||
|
|
||||||
@item export_all
|
@item export_all
|
||||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||||
characters of the box type are set as the key. Default is false.
|
characters of the box type are set as the key. Default is false.
|
||||||
@ -726,15 +679,6 @@ specify.
|
|||||||
|
|
||||||
@item decryption_key
|
@item decryption_key
|
||||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||||
|
|
||||||
@item max_stts_delta
|
|
||||||
Very high sample deltas written in a trak's stts box may occasionally be intended but usually they are written in
|
|
||||||
error or used to store a negative value for dts correction when treated as signed 32-bit integers. This option lets
|
|
||||||
the user set an upper limit, beyond which the delta is clamped to 1. Values greater than the limit if negative when
|
|
||||||
cast to int32 are used to adjust onward dts.
|
|
||||||
|
|
||||||
Unit is the track time scale. Range is 0 to UINT_MAX. Default is @code{UINT_MAX - 48000*10} which allows upto
|
|
||||||
a 10 second dts correction for 48 kHz audio streams while accommodating 99.9% of @code{uint32} range.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Audible AAX
|
@subsection Audible AAX
|
||||||
|
@ -494,22 +494,6 @@ patch is inline or attached per mail.
|
|||||||
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
||||||
likely was wrong.
|
likely was wrong.
|
||||||
|
|
||||||
@subheading Sending patches from email clients
|
|
||||||
Using @code{git send-email} might not be desirable for everyone. The
|
|
||||||
following trick allows to send patches via email clients in a safe
|
|
||||||
way. It has been tested with Outlook and Thunderbird (with X-Unsent
|
|
||||||
extension) and might work with other applications.
|
|
||||||
|
|
||||||
Create your patch like this:
|
|
||||||
|
|
||||||
@verbatim
|
|
||||||
git format-patch -s -o "outputfolder" --add-header "X-Unsent: 1" --suffix .eml --to ffmpeg-devel@ffmpeg.org -1 1a2b3c4d
|
|
||||||
@end verbatim
|
|
||||||
|
|
||||||
Now you'll just need to open the eml file with the email application
|
|
||||||
and execute 'Send'.
|
|
||||||
|
|
||||||
@subheading Reviews
|
|
||||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||||
to make some changes and are expected to send in an improved version that
|
to make some changes and are expected to send in an improved version that
|
||||||
incorporates the requests from the review. This process may go through
|
incorporates the requests from the review. This process may go through
|
||||||
|
@ -1,13 +1,10 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
OUT_DIR="${1}"
|
OUT_DIR="${1}"
|
||||||
SRC_DIR="${2}"
|
DOXYFILE="${2}"
|
||||||
DOXYFILE="${3}"
|
DOXYGEN="${3}"
|
||||||
DOXYGEN="${4}"
|
|
||||||
|
|
||||||
shift 4
|
shift 3
|
||||||
|
|
||||||
cd ${SRC_DIR}
|
|
||||||
|
|
||||||
if [ -e "VERSION" ]; then
|
if [ -e "VERSION" ]; then
|
||||||
VERSION=`cat "VERSION"`
|
VERSION=`cat "VERSION"`
|
||||||
|
@ -53,7 +53,7 @@ Set AAC encoder coding method. Possible values:
|
|||||||
|
|
||||||
@table @samp
|
@table @samp
|
||||||
@item twoloop
|
@item twoloop
|
||||||
Two loop searching (TLS) method. This is the default method.
|
Two loop searching (TLS) method.
|
||||||
|
|
||||||
This method first sets quantizers depending on band thresholds and then tries
|
This method first sets quantizers depending on band thresholds and then tries
|
||||||
to find an optimal combination by adding or subtracting a specific value from
|
to find an optimal combination by adding or subtracting a specific value from
|
||||||
@ -75,6 +75,7 @@ Constant quantizer method.
|
|||||||
Uses a cheaper version of twoloop algorithm that doesn't try to do as many
|
Uses a cheaper version of twoloop algorithm that doesn't try to do as many
|
||||||
clever adjustments. Worse with low bitrates (less than 64kbps), but is better
|
clever adjustments. Worse with low bitrates (less than 64kbps), but is better
|
||||||
and much faster at higher bitrates.
|
and much faster at higher bitrates.
|
||||||
|
This is the default choice for a coder
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@ -1267,10 +1268,6 @@ disabled
|
|||||||
A description of some of the currently available video encoders
|
A description of some of the currently available video encoders
|
||||||
follows.
|
follows.
|
||||||
|
|
||||||
@section a64_multi, a64_multi5
|
|
||||||
|
|
||||||
A64 / Commodore 64 multicolor charset encoder. @code{a64_multi5} is extended with 5th color (colram).
|
|
||||||
|
|
||||||
@section GIF
|
@section GIF
|
||||||
|
|
||||||
GIF image/animation encoder.
|
GIF image/animation encoder.
|
||||||
@ -1750,30 +1747,12 @@ You need to explicitly configure the build with @code{--enable-libsvtav1}.
|
|||||||
@table @option
|
@table @option
|
||||||
@item profile
|
@item profile
|
||||||
Set the encoding profile.
|
Set the encoding profile.
|
||||||
@table @samp
|
|
||||||
@item main
|
|
||||||
@item high
|
|
||||||
@item professional
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@item level
|
@item level
|
||||||
Set the operating point level. For example: '4.0'
|
Set the operating point level.
|
||||||
|
|
||||||
@item hielevel
|
|
||||||
Set the Hierarchical prediction levels.
|
|
||||||
@table @samp
|
|
||||||
@item 3level
|
|
||||||
@item 4level
|
|
||||||
This is the default.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@item tier
|
@item tier
|
||||||
Set the operating point tier.
|
Set the operating point tier.
|
||||||
@table @samp
|
|
||||||
@item main
|
|
||||||
This is the default.
|
|
||||||
@item high
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@item rc
|
@item rc
|
||||||
Set the rate control mode to use.
|
Set the rate control mode to use.
|
||||||
@ -2660,9 +2639,6 @@ ffmpeg -i foo.mpg -c:v libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
|||||||
Import closed captions (which must be ATSC compatible format) into output.
|
Import closed captions (which must be ATSC compatible format) into output.
|
||||||
Only the mpeg2 and h264 decoders provide these. Default is 1 (on).
|
Only the mpeg2 and h264 decoders provide these. Default is 1 (on).
|
||||||
|
|
||||||
@item udu_sei @var{boolean}
|
|
||||||
Import user data unregistered SEI if available into output. Default is 0 (off).
|
|
||||||
|
|
||||||
@item x264-params (N.A.)
|
@item x264-params (N.A.)
|
||||||
Override the x264 configuration using a :-separated list of key=value
|
Override the x264 configuration using a :-separated list of key=value
|
||||||
parameters.
|
parameters.
|
||||||
@ -2744,9 +2720,6 @@ Quantizer curve compression factor
|
|||||||
Normally, when forcing a I-frame type, the encoder can select any type
|
Normally, when forcing a I-frame type, the encoder can select any type
|
||||||
of I-frame. This option forces it to choose an IDR-frame.
|
of I-frame. This option forces it to choose an IDR-frame.
|
||||||
|
|
||||||
@item udu_sei @var{boolean}
|
|
||||||
Import user data unregistered SEI if available into output. Default is 0 (off).
|
|
||||||
|
|
||||||
@item x265-params
|
@item x265-params
|
||||||
Set x265 options using a list of @var{key}=@var{value} couples separated
|
Set x265 options using a list of @var{key}=@var{value} couples separated
|
||||||
by ":". See @command{x265 --help} for a list of options.
|
by ":". See @command{x265 --help} for a list of options.
|
||||||
@ -3143,8 +3116,7 @@ also set (the @option{-qscale} ffmpeg option).
|
|||||||
@option{look_ahead} option is also set.
|
@option{look_ahead} option is also set.
|
||||||
|
|
||||||
@item
|
@item
|
||||||
@var{ICQ} -- intelligent constant quality otherwise. For the ICQ modes, global
|
@var{ICQ} -- intelligent constant quality otherwise.
|
||||||
quality range is 1 to 51, with 1 being the best quality.
|
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#include <libavutil/imgutils.h>
|
#include <libavutil/imgutils.h>
|
||||||
#include <libavutil/samplefmt.h>
|
#include <libavutil/samplefmt.h>
|
||||||
#include <libavutil/timestamp.h>
|
#include <libavutil/timestamp.h>
|
||||||
#include <libavcodec/avcodec.h>
|
|
||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
|
|
||||||
static AVFormatContext *fmt_ctx = NULL;
|
static AVFormatContext *fmt_ctx = NULL;
|
||||||
@ -150,7 +149,8 @@ static int open_codec_context(int *stream_idx,
|
|||||||
{
|
{
|
||||||
int ret, stream_index;
|
int ret, stream_index;
|
||||||
AVStream *st;
|
AVStream *st;
|
||||||
const AVCodec *dec = NULL;
|
AVCodec *dec = NULL;
|
||||||
|
AVDictionary *opts = NULL;
|
||||||
|
|
||||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -185,7 +185,7 @@ static int open_codec_context(int *stream_idx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Init the decoders */
|
/* Init the decoders */
|
||||||
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) {
|
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||||
fprintf(stderr, "Failed to open %s codec\n",
|
fprintf(stderr, "Failed to open %s codec\n",
|
||||||
av_get_media_type_string(type));
|
av_get_media_type_string(type));
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -155,25 +155,12 @@ int main(int argc, char **argv)
|
|||||||
for (i = 0; i < 25; i++) {
|
for (i = 0; i < 25; i++) {
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
/* Make sure the frame data is writable.
|
/* make sure the frame data is writable */
|
||||||
On the first round, the frame is fresh from av_frame_get_buffer()
|
|
||||||
and therefore we know it is writable.
|
|
||||||
But on the next rounds, encode() will have called
|
|
||||||
avcodec_send_frame(), and the codec may have kept a reference to
|
|
||||||
the frame in its internal structures, that makes the frame
|
|
||||||
unwritable.
|
|
||||||
av_frame_make_writable() checks that and allocates a new buffer
|
|
||||||
for the frame only if necessary.
|
|
||||||
*/
|
|
||||||
ret = av_frame_make_writable(frame);
|
ret = av_frame_make_writable(frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
exit(1);
|
exit(1);
|
||||||
|
|
||||||
/* Prepare a dummy image.
|
/* prepare a dummy image */
|
||||||
In real code, this is where you would have your own logic for
|
|
||||||
filling the frame. FFmpeg does not care what you put in the
|
|
||||||
frame.
|
|
||||||
*/
|
|
||||||
/* Y */
|
/* Y */
|
||||||
for (y = 0; y < c->height; y++) {
|
for (y = 0; y < c->height; y++) {
|
||||||
for (x = 0; x < c->width; x++) {
|
for (x = 0; x < c->width; x++) {
|
||||||
@ -198,12 +185,7 @@ int main(int argc, char **argv)
|
|||||||
/* flush the encoder */
|
/* flush the encoder */
|
||||||
encode(c, NULL, pkt, f);
|
encode(c, NULL, pkt, f);
|
||||||
|
|
||||||
/* Add sequence end code to have a real MPEG file.
|
/* add sequence end code to have a real MPEG file */
|
||||||
It makes only sense because this tiny examples writes packets
|
|
||||||
directly. This is called "elementary stream" and only works for some
|
|
||||||
codecs. To create a valid file, you usually need to write packets
|
|
||||||
into a proper file format or protocol; see muxing.c.
|
|
||||||
*/
|
|
||||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||||
fwrite(endcode, 1, sizeof(endcode), f);
|
fwrite(endcode, 1, sizeof(endcode), f);
|
||||||
fclose(f);
|
fclose(f);
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <libavutil/motion_vector.h>
|
#include <libavutil/motion_vector.h>
|
||||||
#include <libavcodec/avcodec.h>
|
|
||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
|
|
||||||
static AVFormatContext *fmt_ctx = NULL;
|
static AVFormatContext *fmt_ctx = NULL;
|
||||||
@ -79,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
|||||||
int ret;
|
int ret;
|
||||||
AVStream *st;
|
AVStream *st;
|
||||||
AVCodecContext *dec_ctx = NULL;
|
AVCodecContext *dec_ctx = NULL;
|
||||||
const AVCodec *dec = NULL;
|
AVCodec *dec = NULL;
|
||||||
AVDictionary *opts = NULL;
|
AVDictionary *opts = NULL;
|
||||||
|
|
||||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
||||||
@ -105,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
|||||||
|
|
||||||
/* Init the video decoder */
|
/* Init the video decoder */
|
||||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||||
ret = avcodec_open2(dec_ctx, dec, &opts);
|
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||||
av_dict_free(&opts);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Failed to open %s codec\n",
|
fprintf(stderr, "Failed to open %s codec\n",
|
||||||
av_get_media_type_string(type));
|
av_get_media_type_string(type));
|
||||||
return ret;
|
return ret;
|
||||||
@ -124,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
|||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
AVPacket *pkt = NULL;
|
AVPacket pkt = { 0 };
|
||||||
|
|
||||||
if (argc != 2) {
|
if (argc != 2) {
|
||||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||||
@ -159,20 +156,13 @@ int main(int argc, char **argv)
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
pkt = av_packet_alloc();
|
|
||||||
if (!pkt) {
|
|
||||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||||
|
|
||||||
/* read frames from the file */
|
/* read frames from the file */
|
||||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||||
if (pkt->stream_index == video_stream_idx)
|
if (pkt.stream_index == video_stream_idx)
|
||||||
ret = decode_packet(pkt);
|
ret = decode_packet(&pkt);
|
||||||
av_packet_unref(pkt);
|
av_packet_unref(&pkt);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -184,6 +174,5 @@ end:
|
|||||||
avcodec_free_context(&video_dec_ctx);
|
avcodec_free_context(&video_dec_ctx);
|
||||||
avformat_close_input(&fmt_ctx);
|
avformat_close_input(&fmt_ctx);
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
av_packet_free(&pkt);
|
|
||||||
return ret < 0;
|
return ret < 0;
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,6 @@
|
|||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
#include <libavfilter/buffersink.h>
|
#include <libavfilter/buffersink.h>
|
||||||
#include <libavfilter/buffersrc.h>
|
#include <libavfilter/buffersrc.h>
|
||||||
#include <libavutil/channel_layout.h>
|
|
||||||
#include <libavutil/opt.h>
|
#include <libavutil/opt.h>
|
||||||
|
|
||||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||||
@ -49,8 +48,8 @@ static int audio_stream_index = -1;
|
|||||||
|
|
||||||
static int open_input_file(const char *filename)
|
static int open_input_file(const char *filename)
|
||||||
{
|
{
|
||||||
const AVCodec *dec;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
AVCodec *dec;
|
||||||
|
|
||||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||||
@ -215,12 +214,12 @@ static void print_frame(const AVFrame *frame)
|
|||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
AVPacket *packet = av_packet_alloc();
|
AVPacket packet;
|
||||||
AVFrame *frame = av_frame_alloc();
|
AVFrame *frame = av_frame_alloc();
|
||||||
AVFrame *filt_frame = av_frame_alloc();
|
AVFrame *filt_frame = av_frame_alloc();
|
||||||
|
|
||||||
if (!packet || !frame || !filt_frame) {
|
if (!frame || !filt_frame) {
|
||||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
perror("Could not allocate frame");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
if (argc != 2) {
|
if (argc != 2) {
|
||||||
@ -235,11 +234,11 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
/* read all packets */
|
/* read all packets */
|
||||||
while (1) {
|
while (1) {
|
||||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (packet->stream_index == audio_stream_index) {
|
if (packet.stream_index == audio_stream_index) {
|
||||||
ret = avcodec_send_packet(dec_ctx, packet);
|
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||||
break;
|
break;
|
||||||
@ -275,13 +274,12 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_packet_unref(packet);
|
av_packet_unref(&packet);
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
avfilter_graph_free(&filter_graph);
|
avfilter_graph_free(&filter_graph);
|
||||||
avcodec_free_context(&dec_ctx);
|
avcodec_free_context(&dec_ctx);
|
||||||
avformat_close_input(&fmt_ctx);
|
avformat_close_input(&fmt_ctx);
|
||||||
av_packet_free(&packet);
|
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
av_frame_free(&filt_frame);
|
av_frame_free(&filt_frame);
|
||||||
|
|
||||||
|
@ -53,8 +53,8 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
|||||||
|
|
||||||
static int open_input_file(const char *filename)
|
static int open_input_file(const char *filename)
|
||||||
{
|
{
|
||||||
const AVCodec *dec;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
AVCodec *dec;
|
||||||
|
|
||||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||||
@ -210,7 +210,7 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
|
|||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
AVPacket *packet;
|
AVPacket packet;
|
||||||
AVFrame *frame;
|
AVFrame *frame;
|
||||||
AVFrame *filt_frame;
|
AVFrame *filt_frame;
|
||||||
|
|
||||||
@ -221,9 +221,8 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
frame = av_frame_alloc();
|
frame = av_frame_alloc();
|
||||||
filt_frame = av_frame_alloc();
|
filt_frame = av_frame_alloc();
|
||||||
packet = av_packet_alloc();
|
if (!frame || !filt_frame) {
|
||||||
if (!frame || !filt_frame || !packet) {
|
perror("Could not allocate frame");
|
||||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,11 +233,11 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
/* read all packets */
|
/* read all packets */
|
||||||
while (1) {
|
while (1) {
|
||||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (packet->stream_index == video_stream_index) {
|
if (packet.stream_index == video_stream_index) {
|
||||||
ret = avcodec_send_packet(dec_ctx, packet);
|
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||||
break;
|
break;
|
||||||
@ -274,7 +273,7 @@ int main(int argc, char **argv)
|
|||||||
av_frame_unref(frame);
|
av_frame_unref(frame);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_packet_unref(packet);
|
av_packet_unref(&packet);
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
avfilter_graph_free(&filter_graph);
|
avfilter_graph_free(&filter_graph);
|
||||||
@ -282,7 +281,6 @@ end:
|
|||||||
avformat_close_input(&fmt_ctx);
|
avformat_close_input(&fmt_ctx);
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
av_frame_free(&filt_frame);
|
av_frame_free(&filt_frame);
|
||||||
av_packet_free(&packet);
|
|
||||||
|
|
||||||
if (ret < 0 && ret != AVERROR_EOF) {
|
if (ret < 0 && ret != AVERROR_EOF) {
|
||||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||||
|
@ -152,8 +152,8 @@ int main(int argc, char *argv[])
|
|||||||
int video_stream, ret;
|
int video_stream, ret;
|
||||||
AVStream *video = NULL;
|
AVStream *video = NULL;
|
||||||
AVCodecContext *decoder_ctx = NULL;
|
AVCodecContext *decoder_ctx = NULL;
|
||||||
const AVCodec *decoder = NULL;
|
AVCodec *decoder = NULL;
|
||||||
AVPacket *packet = NULL;
|
AVPacket packet;
|
||||||
enum AVHWDeviceType type;
|
enum AVHWDeviceType type;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -172,12 +172,6 @@ int main(int argc, char *argv[])
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
packet = av_packet_alloc();
|
|
||||||
if (!packet) {
|
|
||||||
fprintf(stderr, "Failed to allocate AVPacket\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* open the input file */
|
/* open the input file */
|
||||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||||
@ -233,21 +227,23 @@ int main(int argc, char *argv[])
|
|||||||
|
|
||||||
/* actual decoding and dump the raw data */
|
/* actual decoding and dump the raw data */
|
||||||
while (ret >= 0) {
|
while (ret >= 0) {
|
||||||
if ((ret = av_read_frame(input_ctx, packet)) < 0)
|
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (video_stream == packet->stream_index)
|
if (video_stream == packet.stream_index)
|
||||||
ret = decode_write(decoder_ctx, packet);
|
ret = decode_write(decoder_ctx, &packet);
|
||||||
|
|
||||||
av_packet_unref(packet);
|
av_packet_unref(&packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flush the decoder */
|
/* flush the decoder */
|
||||||
ret = decode_write(decoder_ctx, NULL);
|
packet.data = NULL;
|
||||||
|
packet.size = 0;
|
||||||
|
ret = decode_write(decoder_ctx, &packet);
|
||||||
|
av_packet_unref(&packet);
|
||||||
|
|
||||||
if (output_file)
|
if (output_file)
|
||||||
fclose(output_file);
|
fclose(output_file);
|
||||||
av_packet_free(&packet);
|
|
||||||
avcodec_free_context(&decoder_ctx);
|
avcodec_free_context(&decoder_ctx);
|
||||||
avformat_close_input(&input_ctx);
|
avformat_close_input(&input_ctx);
|
||||||
av_buffer_unref(&hw_device_ctx);
|
av_buffer_unref(&hw_device_ctx);
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
int main (int argc, char **argv)
|
int main (int argc, char **argv)
|
||||||
{
|
{
|
||||||
AVFormatContext *fmt_ctx = NULL;
|
AVFormatContext *fmt_ctx = NULL;
|
||||||
const AVDictionaryEntry *tag = NULL;
|
AVDictionaryEntry *tag = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (argc != 2) {
|
if (argc != 2) {
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
#include <libavutil/opt.h>
|
#include <libavutil/opt.h>
|
||||||
#include <libavutil/mathematics.h>
|
#include <libavutil/mathematics.h>
|
||||||
#include <libavutil/timestamp.h>
|
#include <libavutil/timestamp.h>
|
||||||
#include <libavcodec/avcodec.h>
|
|
||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
#include <libswscale/swscale.h>
|
#include <libswscale/swscale.h>
|
||||||
#include <libswresample/swresample.h>
|
#include <libswresample/swresample.h>
|
||||||
@ -62,8 +61,6 @@ typedef struct OutputStream {
|
|||||||
AVFrame *frame;
|
AVFrame *frame;
|
||||||
AVFrame *tmp_frame;
|
AVFrame *tmp_frame;
|
||||||
|
|
||||||
AVPacket *tmp_pkt;
|
|
||||||
|
|
||||||
float t, tincr, tincr2;
|
float t, tincr, tincr2;
|
||||||
|
|
||||||
struct SwsContext *sws_ctx;
|
struct SwsContext *sws_ctx;
|
||||||
@ -82,7 +79,7 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
AVStream *st, AVFrame *frame)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -95,7 +92,9 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (ret >= 0) {
|
while (ret >= 0) {
|
||||||
ret = avcodec_receive_packet(c, pkt);
|
AVPacket pkt = { 0 };
|
||||||
|
|
||||||
|
ret = avcodec_receive_packet(c, &pkt);
|
||||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||||
break;
|
break;
|
||||||
else if (ret < 0) {
|
else if (ret < 0) {
|
||||||
@ -104,15 +103,13 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* rescale output packet timestamp values from codec to stream timebase */
|
/* rescale output packet timestamp values from codec to stream timebase */
|
||||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
|
||||||
pkt->stream_index = st->index;
|
pkt.stream_index = st->index;
|
||||||
|
|
||||||
/* Write the compressed frame to the media file. */
|
/* Write the compressed frame to the media file. */
|
||||||
log_packet(fmt_ctx, pkt);
|
log_packet(fmt_ctx, &pkt);
|
||||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
ret = av_interleaved_write_frame(fmt_ctx, &pkt);
|
||||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
av_packet_unref(&pkt);
|
||||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
|
||||||
* This would be different if one used av_write_frame(). */
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||||
exit(1);
|
exit(1);
|
||||||
@ -124,7 +121,7 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
|||||||
|
|
||||||
/* Add an output stream. */
|
/* Add an output stream. */
|
||||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||||
const AVCodec **codec,
|
AVCodec **codec,
|
||||||
enum AVCodecID codec_id)
|
enum AVCodecID codec_id)
|
||||||
{
|
{
|
||||||
AVCodecContext *c;
|
AVCodecContext *c;
|
||||||
@ -138,12 +135,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ost->tmp_pkt = av_packet_alloc();
|
|
||||||
if (!ost->tmp_pkt) {
|
|
||||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ost->st = avformat_new_stream(oc, NULL);
|
ost->st = avformat_new_stream(oc, NULL);
|
||||||
if (!ost->st) {
|
if (!ost->st) {
|
||||||
fprintf(stderr, "Could not allocate stream\n");
|
fprintf(stderr, "Could not allocate stream\n");
|
||||||
@ -251,8 +242,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
|||||||
return frame;
|
return frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||||
OutputStream *ost, AVDictionary *opt_arg)
|
|
||||||
{
|
{
|
||||||
AVCodecContext *c;
|
AVCodecContext *c;
|
||||||
int nb_samples;
|
int nb_samples;
|
||||||
@ -386,7 +376,7 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
|||||||
ost->samples_count += dst_nb_samples;
|
ost->samples_count += dst_nb_samples;
|
||||||
}
|
}
|
||||||
|
|
||||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
return write_frame(oc, c, ost->st, frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**************************************************************/
|
/**************************************************************/
|
||||||
@ -415,8 +405,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
|||||||
return picture;
|
return picture;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||||
OutputStream *ost, AVDictionary *opt_arg)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
AVCodecContext *c = ost->enc;
|
AVCodecContext *c = ost->enc;
|
||||||
@ -529,7 +518,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
|||||||
*/
|
*/
|
||||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||||
{
|
{
|
||||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||||
@ -537,7 +526,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
|||||||
avcodec_free_context(&ost->enc);
|
avcodec_free_context(&ost->enc);
|
||||||
av_frame_free(&ost->frame);
|
av_frame_free(&ost->frame);
|
||||||
av_frame_free(&ost->tmp_frame);
|
av_frame_free(&ost->tmp_frame);
|
||||||
av_packet_free(&ost->tmp_pkt);
|
|
||||||
sws_freeContext(ost->sws_ctx);
|
sws_freeContext(ost->sws_ctx);
|
||||||
swr_free(&ost->swr_ctx);
|
swr_free(&ost->swr_ctx);
|
||||||
}
|
}
|
||||||
@ -548,10 +536,10 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
|||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||||
const AVOutputFormat *fmt;
|
|
||||||
const char *filename;
|
const char *filename;
|
||||||
|
AVOutputFormat *fmt;
|
||||||
AVFormatContext *oc;
|
AVFormatContext *oc;
|
||||||
const AVCodec *audio_codec, *video_codec;
|
AVCodec *audio_codec, *video_codec;
|
||||||
int ret;
|
int ret;
|
||||||
int have_video = 0, have_audio = 0;
|
int have_video = 0, have_audio = 0;
|
||||||
int encode_video = 0, encode_audio = 0;
|
int encode_video = 0, encode_audio = 0;
|
||||||
|
@ -44,10 +44,38 @@
|
|||||||
#include "libavutil/hwcontext_qsv.h"
|
#include "libavutil/hwcontext_qsv.h"
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
|
|
||||||
|
typedef struct DecodeContext {
|
||||||
|
AVBufferRef *hw_device_ref;
|
||||||
|
} DecodeContext;
|
||||||
|
|
||||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||||
{
|
{
|
||||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||||
|
DecodeContext *decode = avctx->opaque;
|
||||||
|
AVHWFramesContext *frames_ctx;
|
||||||
|
AVQSVFramesContext *frames_hwctx;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* create a pool of surfaces to be used by the decoder */
|
||||||
|
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||||
|
if (!avctx->hw_frames_ctx)
|
||||||
|
return AV_PIX_FMT_NONE;
|
||||||
|
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||||
|
frames_hwctx = frames_ctx->hwctx;
|
||||||
|
|
||||||
|
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||||
|
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||||
|
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||||
|
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||||
|
frames_ctx->initial_pool_size = 32;
|
||||||
|
|
||||||
|
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||||
|
|
||||||
|
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||||
|
if (ret < 0)
|
||||||
|
return AV_PIX_FMT_NONE;
|
||||||
|
|
||||||
return AV_PIX_FMT_QSV;
|
return AV_PIX_FMT_QSV;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +87,7 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
|||||||
return AV_PIX_FMT_NONE;
|
return AV_PIX_FMT_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_packet(AVCodecContext *decoder_ctx,
|
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||||
AVFrame *frame, AVFrame *sw_frame,
|
AVFrame *frame, AVFrame *sw_frame,
|
||||||
AVPacket *pkt, AVIOContext *output_ctx)
|
AVPacket *pkt, AVIOContext *output_ctx)
|
||||||
{
|
{
|
||||||
@ -113,15 +141,15 @@ int main(int argc, char **argv)
|
|||||||
AVCodecContext *decoder_ctx = NULL;
|
AVCodecContext *decoder_ctx = NULL;
|
||||||
const AVCodec *decoder;
|
const AVCodec *decoder;
|
||||||
|
|
||||||
AVPacket *pkt = NULL;
|
AVPacket pkt = { 0 };
|
||||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||||
|
|
||||||
|
DecodeContext decode = { NULL };
|
||||||
|
|
||||||
AVIOContext *output_ctx = NULL;
|
AVIOContext *output_ctx = NULL;
|
||||||
|
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
AVBufferRef *device_ref = NULL;
|
|
||||||
|
|
||||||
if (argc < 3) {
|
if (argc < 3) {
|
||||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||||
return 1;
|
return 1;
|
||||||
@ -149,7 +177,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* open the hardware device */
|
/* open the hardware device */
|
||||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV,
|
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||||
"auto", NULL, 0);
|
"auto", NULL, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "Cannot open the hardware device\n");
|
fprintf(stderr, "Cannot open the hardware device\n");
|
||||||
@ -181,8 +209,7 @@ int main(int argc, char **argv)
|
|||||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decoder_ctx->opaque = &decode;
|
||||||
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
|
|
||||||
decoder_ctx->get_format = get_format;
|
decoder_ctx->get_format = get_format;
|
||||||
|
|
||||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||||
@ -200,26 +227,27 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
frame = av_frame_alloc();
|
frame = av_frame_alloc();
|
||||||
sw_frame = av_frame_alloc();
|
sw_frame = av_frame_alloc();
|
||||||
pkt = av_packet_alloc();
|
if (!frame || !sw_frame) {
|
||||||
if (!frame || !sw_frame || !pkt) {
|
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
goto finish;
|
goto finish;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* actual decoding */
|
/* actual decoding */
|
||||||
while (ret >= 0) {
|
while (ret >= 0) {
|
||||||
ret = av_read_frame(input_ctx, pkt);
|
ret = av_read_frame(input_ctx, &pkt);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (pkt->stream_index == video_st->index)
|
if (pkt.stream_index == video_st->index)
|
||||||
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx);
|
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||||
|
|
||||||
av_packet_unref(pkt);
|
av_packet_unref(&pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flush the decoder */
|
/* flush the decoder */
|
||||||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
pkt.data = NULL;
|
||||||
|
pkt.size = 0;
|
||||||
|
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -232,11 +260,10 @@ finish:
|
|||||||
|
|
||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
av_frame_free(&sw_frame);
|
av_frame_free(&sw_frame);
|
||||||
av_packet_free(&pkt);
|
|
||||||
|
|
||||||
avcodec_free_context(&decoder_ctx);
|
avcodec_free_context(&decoder_ctx);
|
||||||
|
|
||||||
av_buffer_unref(&device_ref);
|
av_buffer_unref(&decode.hw_device_ref);
|
||||||
|
|
||||||
avio_close(output_ctx);
|
avio_close(output_ctx);
|
||||||
|
|
||||||
|
@ -45,9 +45,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, cons
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
const AVOutputFormat *ofmt = NULL;
|
AVOutputFormat *ofmt = NULL;
|
||||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||||
AVPacket *pkt = NULL;
|
AVPacket pkt;
|
||||||
const char *in_filename, *out_filename;
|
const char *in_filename, *out_filename;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
int stream_index = 0;
|
int stream_index = 0;
|
||||||
@ -65,12 +65,6 @@ int main(int argc, char **argv)
|
|||||||
in_filename = argv[1];
|
in_filename = argv[1];
|
||||||
out_filename = argv[2];
|
out_filename = argv[2];
|
||||||
|
|
||||||
pkt = av_packet_alloc();
|
|
||||||
if (!pkt) {
|
|
||||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||||
goto end;
|
goto end;
|
||||||
@ -91,7 +85,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||||
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping));
|
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
|
||||||
if (!stream_mapping) {
|
if (!stream_mapping) {
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
goto end;
|
goto end;
|
||||||
@ -146,39 +140,38 @@ int main(int argc, char **argv)
|
|||||||
while (1) {
|
while (1) {
|
||||||
AVStream *in_stream, *out_stream;
|
AVStream *in_stream, *out_stream;
|
||||||
|
|
||||||
ret = av_read_frame(ifmt_ctx, pkt);
|
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
in_stream = ifmt_ctx->streams[pkt->stream_index];
|
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||||
if (pkt->stream_index >= stream_mapping_size ||
|
if (pkt.stream_index >= stream_mapping_size ||
|
||||||
stream_mapping[pkt->stream_index] < 0) {
|
stream_mapping[pkt.stream_index] < 0) {
|
||||||
av_packet_unref(pkt);
|
av_packet_unref(&pkt);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pkt->stream_index = stream_mapping[pkt->stream_index];
|
pkt.stream_index = stream_mapping[pkt.stream_index];
|
||||||
out_stream = ofmt_ctx->streams[pkt->stream_index];
|
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||||
log_packet(ifmt_ctx, pkt, "in");
|
log_packet(ifmt_ctx, &pkt, "in");
|
||||||
|
|
||||||
/* copy packet */
|
/* copy packet */
|
||||||
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
|
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
pkt->pos = -1;
|
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
log_packet(ofmt_ctx, pkt, "out");
|
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||||
|
pkt.pos = -1;
|
||||||
|
log_packet(ofmt_ctx, &pkt, "out");
|
||||||
|
|
||||||
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
|
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
|
||||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
|
||||||
* This would be different if one used av_write_frame(). */
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "Error muxing packet\n");
|
fprintf(stderr, "Error muxing packet\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
av_packet_unref(&pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_write_trailer(ofmt_ctx);
|
av_write_trailer(ofmt_ctx);
|
||||||
end:
|
end:
|
||||||
av_packet_free(&pkt);
|
|
||||||
|
|
||||||
avformat_close_input(&ifmt_ctx);
|
avformat_close_input(&ifmt_ctx);
|
||||||
|
|
||||||
|
@ -38,7 +38,6 @@
|
|||||||
#include "libavutil/audio_fifo.h"
|
#include "libavutil/audio_fifo.h"
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/frame.h"
|
#include "libavutil/frame.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
@ -61,7 +60,7 @@ static int open_input_file(const char *filename,
|
|||||||
AVCodecContext **input_codec_context)
|
AVCodecContext **input_codec_context)
|
||||||
{
|
{
|
||||||
AVCodecContext *avctx;
|
AVCodecContext *avctx;
|
||||||
const AVCodec *input_codec;
|
AVCodec *input_codec;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
/* Open the input file to read from it. */
|
/* Open the input file to read from it. */
|
||||||
@ -145,7 +144,7 @@ static int open_output_file(const char *filename,
|
|||||||
AVCodecContext *avctx = NULL;
|
AVCodecContext *avctx = NULL;
|
||||||
AVIOContext *output_io_context = NULL;
|
AVIOContext *output_io_context = NULL;
|
||||||
AVStream *stream = NULL;
|
AVStream *stream = NULL;
|
||||||
const AVCodec *output_codec = NULL;
|
AVCodec *output_codec = NULL;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
/* Open the output file to write to it. */
|
/* Open the output file to write to it. */
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
#include <libavfilter/buffersink.h>
|
#include <libavfilter/buffersink.h>
|
||||||
#include <libavfilter/buffersrc.h>
|
#include <libavfilter/buffersrc.h>
|
||||||
#include <libavutil/channel_layout.h>
|
|
||||||
#include <libavutil/opt.h>
|
#include <libavutil/opt.h>
|
||||||
#include <libavutil/pixdesc.h>
|
#include <libavutil/pixdesc.h>
|
||||||
|
|
||||||
@ -72,13 +71,13 @@ static int open_input_file(const char *filename)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||||
if (!stream_ctx)
|
if (!stream_ctx)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||||
AVStream *stream = ifmt_ctx->streams[i];
|
AVStream *stream = ifmt_ctx->streams[i];
|
||||||
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||||
AVCodecContext *codec_ctx;
|
AVCodecContext *codec_ctx;
|
||||||
if (!dec) {
|
if (!dec) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||||
@ -123,7 +122,7 @@ static int open_output_file(const char *filename)
|
|||||||
AVStream *out_stream;
|
AVStream *out_stream;
|
||||||
AVStream *in_stream;
|
AVStream *in_stream;
|
||||||
AVCodecContext *dec_ctx, *enc_ctx;
|
AVCodecContext *dec_ctx, *enc_ctx;
|
||||||
const AVCodec *encoder;
|
AVCodec *encoder;
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ int main(int argc, char *argv[])
|
|||||||
FILE *fin = NULL, *fout = NULL;
|
FILE *fin = NULL, *fout = NULL;
|
||||||
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
||||||
AVCodecContext *avctx = NULL;
|
AVCodecContext *avctx = NULL;
|
||||||
const AVCodec *codec = NULL;
|
AVCodec *codec = NULL;
|
||||||
const char *enc_name = "h264_vaapi";
|
const char *enc_name = "h264_vaapi";
|
||||||
|
|
||||||
if (argc < 5) {
|
if (argc < 5) {
|
||||||
|
@ -62,7 +62,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
|
|||||||
static int open_input_file(const char *filename)
|
static int open_input_file(const char *filename)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
const AVCodec *decoder = NULL;
|
AVCodec *decoder = NULL;
|
||||||
AVStream *video = NULL;
|
AVStream *video = NULL;
|
||||||
|
|
||||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||||
@ -142,7 +142,7 @@ end:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
|
||||||
{
|
{
|
||||||
AVFrame *frame;
|
AVFrame *frame;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -226,9 +226,9 @@ fail:
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
const AVCodec *enc_codec;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
AVPacket *dec_pkt;
|
AVPacket *dec_pkt;
|
||||||
|
AVCodec *enc_codec;
|
||||||
|
|
||||||
if (argc != 4) {
|
if (argc != 4) {
|
||||||
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
||||||
|
140
doc/ffmpeg.texi
140
doc/ffmpeg.texi
@ -449,11 +449,6 @@ output file already exists.
|
|||||||
Set number of times input stream shall be looped. Loop 0 means no loop,
|
Set number of times input stream shall be looped. Loop 0 means no loop,
|
||||||
loop -1 means infinite loop.
|
loop -1 means infinite loop.
|
||||||
|
|
||||||
@item -recast_media (@emph{global})
|
|
||||||
Allow forcing a decoder of a different media type than the one
|
|
||||||
detected or designated by the demuxer. Useful for decoding media
|
|
||||||
data muxed as data streams.
|
|
||||||
|
|
||||||
@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
||||||
@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
||||||
Select an encoder (when used before an output file) or a decoder (when used
|
Select an encoder (when used before an output file) or a decoder (when used
|
||||||
@ -560,22 +555,27 @@ ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
|
|||||||
@item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream})
|
@item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream})
|
||||||
Sets the disposition for a stream.
|
Sets the disposition for a stream.
|
||||||
|
|
||||||
By default, the disposition is copied from the input stream, unless the output
|
This option overrides the disposition copied from the input stream. It is also
|
||||||
stream this option applies to is fed by a complex filtergraph - in that case the
|
possible to delete the disposition by setting it to 0.
|
||||||
disposition is unset by default.
|
|
||||||
|
|
||||||
@var{value} is a sequence of items separated by '+' or '-'. The first item may
|
The following dispositions are recognized:
|
||||||
also be prefixed with '+' or '-', in which case this option modifies the default
|
@table @option
|
||||||
value. Otherwise (the first item is not prefixed) this options overrides the
|
@item default
|
||||||
default value. A '+' prefix adds the given disposition, '-' removes it. It is
|
@item dub
|
||||||
also possible to clear the disposition by setting it to 0.
|
@item original
|
||||||
|
@item comment
|
||||||
If no @code{-disposition} options were specified for an output file, ffmpeg will
|
@item lyrics
|
||||||
automatically set the 'default' disposition on the first stream of each type,
|
@item karaoke
|
||||||
when there are multiple streams of this type in the output file and no stream of
|
@item forced
|
||||||
that type is already marked as default.
|
@item hearing_impaired
|
||||||
|
@item visual_impaired
|
||||||
The @code{-dispositions} option lists the known dispositions.
|
@item clean_effects
|
||||||
|
@item attached_pic
|
||||||
|
@item captions
|
||||||
|
@item descriptions
|
||||||
|
@item dependent
|
||||||
|
@item metadata
|
||||||
|
@end table
|
||||||
|
|
||||||
For example, to make the second audio stream the default stream:
|
For example, to make the second audio stream the default stream:
|
||||||
@example
|
@example
|
||||||
@ -759,16 +759,6 @@ This option is similar to @option{-filter}, the only difference is that its
|
|||||||
argument is the name of the file from which a filtergraph description is to be
|
argument is the name of the file from which a filtergraph description is to be
|
||||||
read.
|
read.
|
||||||
|
|
||||||
@item -reinit_filter[:@var{stream_specifier}] @var{integer} (@emph{input,per-stream})
|
|
||||||
This boolean option determines if the filtergraph(s) to which this stream is fed gets
|
|
||||||
reinitialized when input frame parameters change mid-stream. This option is enabled by
|
|
||||||
default as most video and all audio filters cannot handle deviation in input frame properties.
|
|
||||||
Upon reinitialization, existing filter state is lost, like e.g. the frame count @code{n}
|
|
||||||
reference available in some filters. Any frames buffered at time of reinitialization are lost.
|
|
||||||
The properties where a change triggers reinitialization are,
|
|
||||||
for video, frame resolution or pixel format;
|
|
||||||
for audio, sample format, sample rate, channel count or channel layout.
|
|
||||||
|
|
||||||
@item -filter_threads @var{nb_threads} (@emph{global})
|
@item -filter_threads @var{nb_threads} (@emph{global})
|
||||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||||
will produce a thread pool with this many threads available for parallel processing.
|
will produce a thread pool with this many threads available for parallel processing.
|
||||||
@ -1006,7 +996,6 @@ Deprecated see -bsf
|
|||||||
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
||||||
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
||||||
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
|
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
|
||||||
@item -force_key_frames[:@var{stream_specifier}] source_no_drop (@emph{output,per-stream})
|
|
||||||
|
|
||||||
@var{force_key_frames} can take arguments of the following form:
|
@var{force_key_frames} can take arguments of the following form:
|
||||||
|
|
||||||
@ -1068,12 +1057,6 @@ starting from second 13:
|
|||||||
If the argument is @code{source}, ffmpeg will force a key frame if
|
If the argument is @code{source}, ffmpeg will force a key frame if
|
||||||
the current frame being encoded is marked as a key frame in its source.
|
the current frame being encoded is marked as a key frame in its source.
|
||||||
|
|
||||||
@item source_no_drop
|
|
||||||
If the argument is @code{source_no_drop}, ffmpeg will force a key frame if
|
|
||||||
the current frame being encoded is marked as a key frame in its source.
|
|
||||||
In cases where this particular source frame has to be dropped,
|
|
||||||
enforce the next available frame to become a key frame instead.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Note that forcing too many keyframes is very harmful for the lookahead
|
Note that forcing too many keyframes is very harmful for the lookahead
|
||||||
@ -1096,27 +1079,9 @@ device type:
|
|||||||
@item cuda
|
@item cuda
|
||||||
@var{device} is the number of the CUDA device.
|
@var{device} is the number of the CUDA device.
|
||||||
|
|
||||||
The following options are recognized:
|
|
||||||
@table @option
|
|
||||||
@item primary_ctx
|
|
||||||
If set to 1, uses the primary device context instead of creating a new one.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
@table @emph
|
|
||||||
@item -init_hw_device cuda:1
|
|
||||||
Choose the second device on the system.
|
|
||||||
|
|
||||||
@item -init_hw_device cuda:0,primary_ctx=1
|
|
||||||
Choose the first device and use the primary device context.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@item dxva2
|
@item dxva2
|
||||||
@var{device} is the number of the Direct3D 9 display adapter.
|
@var{device} is the number of the Direct3D 9 display adapter.
|
||||||
|
|
||||||
@item d3d11va
|
|
||||||
@var{device} is the number of the Direct3D 11 display adapter.
|
|
||||||
|
|
||||||
@item vaapi
|
@item vaapi
|
||||||
@var{device} is either an X11 display name or a DRM render node.
|
@var{device} is either an X11 display name or a DRM render node.
|
||||||
If not specified, it will attempt to open the default X11 display (@emph{$DISPLAY})
|
If not specified, it will attempt to open the default X11 display (@emph{$DISPLAY})
|
||||||
@ -1140,21 +1105,9 @@ If not specified, it will attempt to open the default X11 display (@emph{$DISPLA
|
|||||||
@end table
|
@end table
|
||||||
If not specified, @samp{auto_any} is used.
|
If not specified, @samp{auto_any} is used.
|
||||||
(Note that it may be easier to achieve the desired result for QSV by creating the
|
(Note that it may be easier to achieve the desired result for QSV by creating the
|
||||||
platform-appropriate subdevice (@samp{dxva2} or @samp{d3d11va} or @samp{vaapi}) and then deriving a
|
platform-appropriate subdevice (@samp{dxva2} or @samp{vaapi}) and then deriving a
|
||||||
QSV device from that.)
|
QSV device from that.)
|
||||||
|
|
||||||
Alternatively, @samp{child_device_type} helps to choose platform-appropriate subdevice type.
|
|
||||||
On Windows @samp{d3d11va} is used as default subdevice type.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
@table @emph
|
|
||||||
@item -init_hw_device qsv:hw,child_device_type=d3d11va
|
|
||||||
Choose the GPU subdevice with type @samp{d3d11va} and create QSV device with @samp{MFX_IMPL_HARDWARE}.
|
|
||||||
|
|
||||||
@item -init_hw_device qsv:hw,child_device_type=dxva2
|
|
||||||
Choose the GPU subdevice with type @samp{dxva2} and create QSV device with @samp{MFX_IMPL_HARDWARE}.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@item opencl
|
@item opencl
|
||||||
@var{device} selects the platform and device as @emph{platform_index.device_index}.
|
@var{device} selects the platform and device as @emph{platform_index.device_index}.
|
||||||
|
|
||||||
@ -1257,9 +1210,6 @@ Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
|
|||||||
@item dxva2
|
@item dxva2
|
||||||
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
|
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
|
||||||
|
|
||||||
@item d3d11va
|
|
||||||
Use D3D11VA (DirectX Video Acceleration) hardware acceleration.
|
|
||||||
|
|
||||||
@item vaapi
|
@item vaapi
|
||||||
Use VAAPI (Video Acceleration API) hardware acceleration.
|
Use VAAPI (Video Acceleration API) hardware acceleration.
|
||||||
|
|
||||||
@ -1293,9 +1243,7 @@ by name, or it can create a new device as if
|
|||||||
were called immediately before.
|
were called immediately before.
|
||||||
|
|
||||||
@item -hwaccels
|
@item -hwaccels
|
||||||
List all hardware acceleration components enabled in this build of ffmpeg.
|
List all hardware acceleration methods supported in this build of ffmpeg.
|
||||||
Actual runtime availability depends on the hardware and its suitable driver
|
|
||||||
being installed.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@ -1603,42 +1551,33 @@ Exit after ffmpeg has been running for @var{duration} seconds in CPU user time.
|
|||||||
Dump each input packet to stderr.
|
Dump each input packet to stderr.
|
||||||
@item -hex (@emph{global})
|
@item -hex (@emph{global})
|
||||||
When dumping packets, also dump the payload.
|
When dumping packets, also dump the payload.
|
||||||
@item -readrate @var{speed} (@emph{input})
|
|
||||||
Limit input read speed.
|
|
||||||
|
|
||||||
Its value is a floating-point positive number which represents the maximum duration of
|
|
||||||
media, in seconds, that should be ingested in one second of wallclock time.
|
|
||||||
Default value is zero and represents no imposed limitation on speed of ingestion.
|
|
||||||
Value @code{1} represents real-time speed and is equivalent to @code{-re}.
|
|
||||||
|
|
||||||
Mainly used to simulate a capture device or live input stream (e.g. when reading from a file).
|
|
||||||
Should not be used with a low value when input is an actual capture device or live stream as
|
|
||||||
it may cause packet loss.
|
|
||||||
|
|
||||||
It is useful for when flow speed of output packets is important, such as live streaming.
|
|
||||||
@item -re (@emph{input})
|
@item -re (@emph{input})
|
||||||
Read input at native frame rate. This is equivalent to setting @code{-readrate 1}.
|
Read input at native frame rate. Mainly used to simulate a grab device,
|
||||||
|
or live input stream (e.g. when reading from a file). Should not be used
|
||||||
|
with actual grab devices or live input streams (where it can cause packet
|
||||||
|
loss).
|
||||||
|
By default @command{ffmpeg} attempts to read the input(s) as fast as possible.
|
||||||
|
This option will slow down the reading of the input(s) to the native frame rate
|
||||||
|
of the input(s). It is useful for real-time output (e.g. live streaming).
|
||||||
@item -vsync @var{parameter}
|
@item -vsync @var{parameter}
|
||||||
Video sync method.
|
Video sync method.
|
||||||
|
For compatibility reasons old values can be specified as numbers.
|
||||||
For compatibility reasons some of the values can be specified as numbers (shown
|
Newly added values will have to be specified as strings always.
|
||||||
in parentheses in the following table). This is deprecated and will stop working
|
|
||||||
in the future.
|
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item passthrough (0)
|
@item 0, passthrough
|
||||||
Each frame is passed with its timestamp from the demuxer to the muxer.
|
Each frame is passed with its timestamp from the demuxer to the muxer.
|
||||||
@item cfr (1)
|
@item 1, cfr
|
||||||
Frames will be duplicated and dropped to achieve exactly the requested
|
Frames will be duplicated and dropped to achieve exactly the requested
|
||||||
constant frame rate.
|
constant frame rate.
|
||||||
@item vfr (2)
|
@item 2, vfr
|
||||||
Frames are passed through with their timestamp or dropped so as to
|
Frames are passed through with their timestamp or dropped so as to
|
||||||
prevent 2 frames from having the same timestamp.
|
prevent 2 frames from having the same timestamp.
|
||||||
@item drop
|
@item drop
|
||||||
As passthrough but destroys all timestamps, making the muxer generate
|
As passthrough but destroys all timestamps, making the muxer generate
|
||||||
fresh timestamps based on frame-rate.
|
fresh timestamps based on frame-rate.
|
||||||
@item auto (-1)
|
@item -1, auto
|
||||||
Chooses between cfr and vfr depending on muxer capabilities. This is the
|
Chooses between 1 and 2 depending on muxer capabilities. This is the
|
||||||
default method.
|
default method.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@ -1962,13 +1901,6 @@ filter (scale, aresample) in the graph.
|
|||||||
On by default, to explicitly disable it you need to specify
|
On by default, to explicitly disable it you need to specify
|
||||||
@code{-noauto_conversion_filters}.
|
@code{-noauto_conversion_filters}.
|
||||||
|
|
||||||
@item -bits_per_raw_sample[:@var{stream_specifier}] @var{value} (@emph{output,per-stream})
|
|
||||||
Declare the number of bits per raw sample in the given output stream to be
|
|
||||||
@var{value}. Note that this option sets the information provided to the
|
|
||||||
encoder/muxer, it does not change the stream to conform to this value. Setting
|
|
||||||
values that do not match the stream properties may result in encoding failures
|
|
||||||
or invalid output files.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section Preset files
|
@section Preset files
|
||||||
|
@ -335,12 +335,6 @@ Show information about all pixel formats supported by FFmpeg.
|
|||||||
Pixel format information for each format is printed within a section
|
Pixel format information for each format is printed within a section
|
||||||
with name "PIXEL_FORMAT".
|
with name "PIXEL_FORMAT".
|
||||||
|
|
||||||
@item -show_optional_fields @var{value}
|
|
||||||
Some writers viz. JSON and XML, omit the printing of fields with invalid or non-applicable values,
|
|
||||||
while other writers always print them. This option enables one to control this behaviour.
|
|
||||||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
|
||||||
Default is @var{auto}.
|
|
||||||
|
|
||||||
@item -bitexact
|
@item -bitexact
|
||||||
Force bitexact output, useful to produce output which is not dependent
|
Force bitexact output, useful to produce output which is not dependent
|
||||||
on the specific build.
|
on the specific build.
|
||||||
|
@ -29,18 +29,22 @@
|
|||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="framesType">
|
<xsd:complexType name="framesType">
|
||||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
<xsd:sequence>
|
||||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
</xsd:choice>
|
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
</xsd:choice>
|
||||||
|
</xsd:sequence>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="packetsAndFramesType">
|
<xsd:complexType name="packetsAndFramesType">
|
||||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
<xsd:sequence>
|
||||||
<xsd:element name="packet" type="ffprobe:packetType"/>
|
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
</xsd:choice>
|
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
|
</xsd:choice>
|
||||||
|
</xsd:sequence>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="packetType">
|
<xsd:complexType name="packetType">
|
||||||
@ -86,6 +90,8 @@
|
|||||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||||
<xsd:attribute name="pts" type="xsd:long" />
|
<xsd:attribute name="pts" type="xsd:long" />
|
||||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||||
|
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||||
|
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||||
@ -193,11 +199,6 @@
|
|||||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||||
<xsd:attribute name="captions" type="xsd:int" use="required" />
|
|
||||||
<xsd:attribute name="descriptions" type="xsd:int" use="required" />
|
|
||||||
<xsd:attribute name="metadata" type="xsd:int" use="required" />
|
|
||||||
<xsd:attribute name="dependent" type="xsd:int" use="required" />
|
|
||||||
<xsd:attribute name="still_image" type="xsd:int" use="required" />
|
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="streamType">
|
<xsd:complexType name="streamType">
|
||||||
@ -215,7 +216,6 @@
|
|||||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||||
<xsd:attribute name="extradata" type="xsd:string" />
|
<xsd:attribute name="extradata" type="xsd:string" />
|
||||||
<xsd:attribute name="extradata_size" type="xsd:int" />
|
|
||||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||||
|
|
||||||
<!-- video attributes -->
|
<!-- video attributes -->
|
||||||
@ -224,7 +224,6 @@
|
|||||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||||
<xsd:attribute name="film_grain" type="xsd:boolean"/>
|
|
||||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||||
@ -270,6 +269,10 @@
|
|||||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||||
|
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||||
|
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||||
|
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||||
|
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
@ -167,9 +167,6 @@ Show available sample formats.
|
|||||||
@item -layouts
|
@item -layouts
|
||||||
Show channel names and standard channel layouts.
|
Show channel names and standard channel layouts.
|
||||||
|
|
||||||
@item -dispositions
|
|
||||||
Show stream dispositions.
|
|
||||||
|
|
||||||
@item -colors
|
@item -colors
|
||||||
Show recognized color names.
|
Show recognized color names.
|
||||||
|
|
||||||
@ -356,13 +353,6 @@ Possible flags for this option are:
|
|||||||
@end table
|
@end table
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item -cpucount @var{count} (@emph{global})
|
|
||||||
Override detection of CPU count. This option is intended
|
|
||||||
for testing. Do not use it unless you know what you're doing.
|
|
||||||
@example
|
|
||||||
ffmpeg -cpucount 2
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item -max_alloc @var{bytes}
|
@item -max_alloc @var{bytes}
|
||||||
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
||||||
family of malloc functions. Exercise @strong{extreme caution} when using
|
family of malloc functions. Exercise @strong{extreme caution} when using
|
||||||
|
1461
doc/filters.texi
1461
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@ -49,6 +49,7 @@ Generate missing PTS if DTS is present.
|
|||||||
Ignore DTS if PTS is set. Inert when nofillin is set.
|
Ignore DTS if PTS is set. Inert when nofillin is set.
|
||||||
@item ignidx
|
@item ignidx
|
||||||
Ignore index.
|
Ignore index.
|
||||||
|
@item keepside (@emph{deprecated},@emph{inert})
|
||||||
@item nobuffer
|
@item nobuffer
|
||||||
Reduce the latency introduced by buffering during initial input streams analysis.
|
Reduce the latency introduced by buffering during initial input streams analysis.
|
||||||
@item nofillin
|
@item nofillin
|
||||||
@ -69,6 +70,7 @@ This ensures that file and data checksums are reproducible and match between
|
|||||||
platforms. Its primary use is for regression testing.
|
platforms. Its primary use is for regression testing.
|
||||||
@item flush_packets
|
@item flush_packets
|
||||||
Write out packets immediately.
|
Write out packets immediately.
|
||||||
|
@item latm (@emph{deprecated},@emph{inert})
|
||||||
@item shortest
|
@item shortest
|
||||||
Stop muxing at the end of the shortest stream.
|
Stop muxing at the end of the shortest stream.
|
||||||
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
||||||
|
@ -263,7 +263,7 @@ to @file{./configure}.
|
|||||||
|
|
||||||
FFmpeg can make use of the Scalable Video Technology for AV1 library for AV1 encoding.
|
FFmpeg can make use of the Scalable Video Technology for AV1 library for AV1 encoding.
|
||||||
|
|
||||||
Go to @url{https://gitlab.com/AOMediaCodec/SVT-AV1/} and follow the instructions
|
Go to @url{https://github.com/OpenVisualCloud/SVT-AV1/} and follow the instructions
|
||||||
for installing the library. Then pass @code{--enable-libsvtav1} to configure to
|
for installing the library. Then pass @code{--enable-libsvtav1} to configure to
|
||||||
enable it.
|
enable it.
|
||||||
|
|
||||||
@ -599,7 +599,6 @@ library:
|
|||||||
@item raw NULL @tab X @tab
|
@item raw NULL @tab X @tab
|
||||||
@item raw video @tab X @tab X
|
@item raw video @tab X @tab X
|
||||||
@item raw id RoQ @tab X @tab
|
@item raw id RoQ @tab X @tab
|
||||||
@item raw OBU @tab X @tab X
|
|
||||||
@item raw SBC @tab X @tab X
|
@item raw SBC @tab X @tab X
|
||||||
@item raw Shorten @tab @tab X
|
@item raw Shorten @tab @tab X
|
||||||
@item raw TAK @tab @tab X
|
@item raw TAK @tab @tab X
|
||||||
@ -696,7 +695,7 @@ library:
|
|||||||
@item Windows Televison (WTV) @tab X @tab X
|
@item Windows Televison (WTV) @tab X @tab X
|
||||||
@item Wing Commander III movie @tab @tab X
|
@item Wing Commander III movie @tab @tab X
|
||||||
@tab Multimedia format used in Origin's Wing Commander III computer game.
|
@tab Multimedia format used in Origin's Wing Commander III computer game.
|
||||||
@item Westwood Studios audio @tab X @tab X
|
@item Westwood Studios audio @tab @tab X
|
||||||
@tab Multimedia format used in Westwood Studios games.
|
@tab Multimedia format used in Westwood Studios games.
|
||||||
@item Westwood Studios VQA @tab @tab X
|
@item Westwood Studios VQA @tab @tab X
|
||||||
@tab Multimedia format used in Westwood Studios games.
|
@tab Multimedia format used in Westwood Studios games.
|
||||||
@ -741,8 +740,6 @@ following image formats are supported:
|
|||||||
@tab OpenEXR
|
@tab OpenEXR
|
||||||
@item FITS @tab X @tab X
|
@item FITS @tab X @tab X
|
||||||
@tab Flexible Image Transport System
|
@tab Flexible Image Transport System
|
||||||
@item IMG @tab @tab X
|
|
||||||
@tab GEM Raster image
|
|
||||||
@item JPEG @tab X @tab X
|
@item JPEG @tab X @tab X
|
||||||
@tab Progressive JPEG is not supported.
|
@tab Progressive JPEG is not supported.
|
||||||
@item JPEG 2000 @tab X @tab X
|
@item JPEG 2000 @tab X @tab X
|
||||||
@ -1021,7 +1018,7 @@ following image formats are supported:
|
|||||||
@item QuickTime 8BPS video @tab @tab X
|
@item QuickTime 8BPS video @tab @tab X
|
||||||
@item QuickTime Animation (RLE) video @tab X @tab X
|
@item QuickTime Animation (RLE) video @tab X @tab X
|
||||||
@tab fourcc: 'rle '
|
@tab fourcc: 'rle '
|
||||||
@item QuickTime Graphics (SMC) @tab X @tab X
|
@item QuickTime Graphics (SMC) @tab @tab X
|
||||||
@tab fourcc: 'smc '
|
@tab fourcc: 'smc '
|
||||||
@item QuickTime video (RPZA) @tab X @tab X
|
@item QuickTime video (RPZA) @tab X @tab X
|
||||||
@tab fourcc: rpza
|
@tab fourcc: rpza
|
||||||
@ -1129,7 +1126,6 @@ following image formats are supported:
|
|||||||
@item ADPCM Electronic Arts XAS @tab @tab X
|
@item ADPCM Electronic Arts XAS @tab @tab X
|
||||||
@item ADPCM G.722 @tab X @tab X
|
@item ADPCM G.722 @tab X @tab X
|
||||||
@item ADPCM G.726 @tab X @tab X
|
@item ADPCM G.726 @tab X @tab X
|
||||||
@item ADPCM IMA Acorn Replay @tab @tab X
|
|
||||||
@item ADPCM IMA AMV @tab X @tab X
|
@item ADPCM IMA AMV @tab X @tab X
|
||||||
@tab Used in AMV files
|
@tab Used in AMV files
|
||||||
@item ADPCM IMA Cunning Developments @tab @tab X
|
@item ADPCM IMA Cunning Developments @tab @tab X
|
||||||
@ -1166,7 +1162,7 @@ following image formats are supported:
|
|||||||
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
|
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
|
||||||
@item ADPCM VIMA @tab @tab X
|
@item ADPCM VIMA @tab @tab X
|
||||||
@tab Used in LucasArts SMUSH animations.
|
@tab Used in LucasArts SMUSH animations.
|
||||||
@item ADPCM Westwood Studios IMA @tab X @tab X
|
@item ADPCM Westwood Studios IMA @tab @tab X
|
||||||
@tab Used in Westwood Studios games like Command and Conquer.
|
@tab Used in Westwood Studios games like Command and Conquer.
|
||||||
@item ADPCM Yamaha @tab X @tab X
|
@item ADPCM Yamaha @tab X @tab X
|
||||||
@item ADPCM Zork @tab @tab X
|
@item ADPCM Zork @tab @tab X
|
||||||
@ -1231,7 +1227,7 @@ following image formats are supported:
|
|||||||
@item GSM Microsoft variant @tab E @tab X
|
@item GSM Microsoft variant @tab E @tab X
|
||||||
@tab encoding supported through external library libgsm
|
@tab encoding supported through external library libgsm
|
||||||
@item IAC (Indeo Audio Coder) @tab @tab X
|
@item IAC (Indeo Audio Coder) @tab @tab X
|
||||||
@item iLBC (Internet Low Bitrate Codec) @tab E @tab EX
|
@item iLBC (Internet Low Bitrate Codec) @tab E @tab E
|
||||||
@tab encoding and decoding supported through external library libilbc
|
@tab encoding and decoding supported through external library libilbc
|
||||||
@item IMC (Intel Music Coder) @tab @tab X
|
@item IMC (Intel Music Coder) @tab @tab X
|
||||||
@item Interplay ACM @tab @tab X
|
@item Interplay ACM @tab @tab X
|
||||||
@ -1303,7 +1299,7 @@ following image formats are supported:
|
|||||||
@tab experimental codec
|
@tab experimental codec
|
||||||
@item Sonic lossless @tab X @tab X
|
@item Sonic lossless @tab X @tab X
|
||||||
@tab experimental codec
|
@tab experimental codec
|
||||||
@item Speex @tab E @tab EX
|
@item Speex @tab E @tab E
|
||||||
@tab supported through external library libspeex
|
@tab supported through external library libspeex
|
||||||
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
|
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
|
||||||
@item True Audio (TTA) @tab X @tab X
|
@item True Audio (TTA) @tab X @tab X
|
||||||
|
@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions.
|
|||||||
git add [-i|-p|-A] <filenames/dirnames>
|
git add [-i|-p|-A] <filenames/dirnames>
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
Make sure you have told Git your name, email address and GPG key
|
Make sure you have told Git your name and email address
|
||||||
|
|
||||||
@example
|
@example
|
||||||
git config --global user.name "My Name"
|
git config --global user.name "My Name"
|
||||||
git config --global user.email my@@email.invalid
|
git config --global user.email my@@email.invalid
|
||||||
git config --global user.signingkey ABCDEF0123245
|
|
||||||
@end example
|
|
||||||
|
|
||||||
Enable signing all commits or use -S
|
|
||||||
|
|
||||||
@example
|
|
||||||
git config --global commit.gpgsign true
|
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||||
@ -224,46 +217,16 @@ git config --global core.editor
|
|||||||
or set by one of the following environment variables:
|
or set by one of the following environment variables:
|
||||||
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
||||||
|
|
||||||
@section Writing a commit message
|
Log messages should be concise but descriptive. Explain why you made a change,
|
||||||
|
what you did will be obvious from the changes themselves most of the time.
|
||||||
|
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||||
|
levels look at and educate themselves while reading through your code. Don't
|
||||||
|
include filenames in log messages, Git provides that information.
|
||||||
|
|
||||||
Log messages should be concise but descriptive.
|
Possibly make the commit message have a terse, descriptive first line, an
|
||||||
|
empty line and then a full description. The first line will be used to name
|
||||||
The first line must contain the context, a colon and a very short
|
|
||||||
summary of what the commit does. Details can be added, if necessary,
|
|
||||||
separated by an empty line. These details should not exceed 60-72 characters
|
|
||||||
per line, except when containing code.
|
|
||||||
|
|
||||||
Example of a good commit message:
|
|
||||||
|
|
||||||
@example
|
|
||||||
avcodec/cbs: add a helper to read extradata within packet side data
|
|
||||||
|
|
||||||
Using ff_cbs_read() on the raw buffer will not parse it as extradata,
|
|
||||||
resulting in parsing errors for example when handling ISOBMFF avcC.
|
|
||||||
This helper works around that.
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@example
|
|
||||||
ptr might be NULL
|
|
||||||
@end example
|
|
||||||
|
|
||||||
If the summary on the first line is not enough, in the body of the message,
|
|
||||||
explain why you made a change, what you did will be obvious from the changes
|
|
||||||
themselves most of the time. Saying just "bug fix" or "10l" is bad. Remember
|
|
||||||
that people of varying skill levels look at and educate themselves while
|
|
||||||
reading through your code. Don't include filenames in log messages except in
|
|
||||||
the context, Git provides that information.
|
|
||||||
|
|
||||||
If the commit fixes a registered issue, state it in a separate line of the
|
|
||||||
body: @code{Fix Trac ticket #42.}
|
|
||||||
|
|
||||||
The first line will be used to name
|
|
||||||
the patch by @command{git format-patch}.
|
the patch by @command{git format-patch}.
|
||||||
|
|
||||||
Common mistakes for the first line, as seen in @command{git log --oneline}
|
|
||||||
include: missing context at the beginning; description of what the code did
|
|
||||||
before the patch; line too long or wrapped to the second line.
|
|
||||||
|
|
||||||
@section Preparing a patchset
|
@section Preparing a patchset
|
||||||
|
|
||||||
@example
|
@example
|
||||||
@ -430,19 +393,6 @@ git checkout -b svn_23456 $SHA1
|
|||||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||||
|
|
||||||
|
|
||||||
@chapter gpg key generation
|
|
||||||
|
|
||||||
If you have no gpg key yet, we recommend that you create a ed25519 based key as it
|
|
||||||
is small, fast and secure. Especially it results in small signatures in git.
|
|
||||||
|
|
||||||
@example
|
|
||||||
gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com"
|
|
||||||
@end example
|
|
||||||
|
|
||||||
When generating a key, make sure the email specified matches the email used in git as some sites like
|
|
||||||
github consider mismatches a reason to declare such commits unverified. After generating a key you
|
|
||||||
can add it to the MAINTAINER file and upload it to a keyserver.
|
|
||||||
|
|
||||||
@chapter Pre-push checklist
|
@chapter Pre-push checklist
|
||||||
|
|
||||||
Once you have a set of commits that you feel are ready for pushing,
|
Once you have a set of commits that you feel are ready for pushing,
|
||||||
|
@ -344,23 +344,9 @@ Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp
|
|||||||
Defaults to @samp{2}.
|
Defaults to @samp{2}.
|
||||||
|
|
||||||
@item duplex_mode
|
@item duplex_mode
|
||||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
|
||||||
@samp{four_sub_device_half}
|
|
||||||
Defaults to @samp{unset}.
|
Defaults to @samp{unset}.
|
||||||
|
|
||||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
|
||||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
|
||||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
|
||||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
|
||||||
|
|
||||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
|
||||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
|
||||||
@samp{four_sub_device_half}
|
|
||||||
|
|
||||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
|
||||||
@samp{half}, @samp{full}
|
|
||||||
|
|
||||||
@item timecode_format
|
@item timecode_format
|
||||||
Timecode type to include in the frame and video stream metadata. Must be
|
Timecode type to include in the frame and video stream metadata. Must be
|
||||||
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
||||||
@ -625,12 +611,6 @@ Save the currently used video capture filter device and its
|
|||||||
parameters (if the filter supports it) to a file.
|
parameters (if the filter supports it) to a file.
|
||||||
If a file with the same name exists it will be overwritten.
|
If a file with the same name exists it will be overwritten.
|
||||||
|
|
||||||
@item use_video_device_timestamps
|
|
||||||
If set to @option{false}, the timestamp for video frames will be
|
|
||||||
derived from the wallclock instead of the timestamp provided by
|
|
||||||
the capture device. This allows working around devices that
|
|
||||||
provide unreliable timestamps.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -116,7 +116,7 @@ or is abusive towards others).
|
|||||||
@section How long does it take for my message in the moderation queue to be approved?
|
@section How long does it take for my message in the moderation queue to be approved?
|
||||||
|
|
||||||
The queue is not checked on a regular basis. You can ask on the
|
The queue is not checked on a regular basis. You can ask on the
|
||||||
@t{#ffmpeg-devel} IRC channel on Libera Chat for someone to approve your message.
|
@t{#ffmpeg-devel} IRC channel on Freenode for someone to approve your message.
|
||||||
|
|
||||||
@anchor{How do I delete my message in the moderation queue?}
|
@anchor{How do I delete my message in the moderation queue?}
|
||||||
@section How do I delete my message in the moderation queue?
|
@section How do I delete my message in the moderation queue?
|
||||||
@ -155,7 +155,7 @@ Perform a site search using your favorite search engine. Example:
|
|||||||
|
|
||||||
@section Is there an alternative to the mailing list?
|
@section Is there an alternative to the mailing list?
|
||||||
|
|
||||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Libera Chat.
|
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||||
|
|
||||||
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
||||||
interface which presents the mailing lists in a typical forum layout.
|
interface which presents the mailing lists in a typical forum layout.
|
||||||
|
643
doc/muxers.texi
643
doc/muxers.texi
@ -19,33 +19,6 @@ enabled demuxers and muxers.
|
|||||||
|
|
||||||
A description of some of the currently available muxers follows.
|
A description of some of the currently available muxers follows.
|
||||||
|
|
||||||
@anchor{a64}
|
|
||||||
@section a64
|
|
||||||
|
|
||||||
A64 muxer for Commodore 64 video. Accepts a single @code{a64_multi} or @code{a64_multi5} codec video stream.
|
|
||||||
|
|
||||||
@anchor{adts}
|
|
||||||
@section adts
|
|
||||||
|
|
||||||
Audio Data Transport Stream muxer. It accepts a single AAC stream.
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
It accepts the following options:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item write_id3v2 @var{bool}
|
|
||||||
Enable to write ID3v2.4 tags at the start of the stream. Default is disabled.
|
|
||||||
|
|
||||||
@item write_apetag @var{bool}
|
|
||||||
Enable to write APE tags at the end of the stream. Default is disabled.
|
|
||||||
|
|
||||||
@item write_mpeg2 @var{bool}
|
|
||||||
Enable to set MPEG version bit in the ADTS frame header to 1 which indicates MPEG-2. Default is 0, which indicates MPEG-4.
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@anchor{aiff}
|
@anchor{aiff}
|
||||||
@section aiff
|
@section aiff
|
||||||
|
|
||||||
@ -65,37 +38,6 @@ ID3v2.3 and ID3v2.4) are supported. The default is version 4.
|
|||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{alp}
|
|
||||||
@section alp
|
|
||||||
|
|
||||||
Muxer for audio of High Voltage Software's Lego Racers game. It accepts a single ADPCM_IMA_ALP stream
|
|
||||||
with no more than 2 channels nor a sample rate greater than 44100 Hz.
|
|
||||||
|
|
||||||
Extensions: tun, pcm
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
It accepts the following options:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item type @var{type}
|
|
||||||
Set file type.
|
|
||||||
|
|
||||||
@table @samp
|
|
||||||
@item tun
|
|
||||||
Set file type as music. Must have a sample rate of 22050 Hz.
|
|
||||||
|
|
||||||
@item pcm
|
|
||||||
Set file type as sfx.
|
|
||||||
|
|
||||||
@item auto
|
|
||||||
Set file type as per output file extension. @code{.pcm} results in type @code{pcm} else type @code{tun} is set. @var{(default)}
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@anchor{asf}
|
@anchor{asf}
|
||||||
@section asf
|
@section asf
|
||||||
|
|
||||||
@ -231,6 +173,37 @@ and the input video converted to MPEG-2 video, use the command:
|
|||||||
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
|
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@section flv
|
||||||
|
|
||||||
|
Adobe Flash Video Format muxer.
|
||||||
|
|
||||||
|
This muxer accepts the following options:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item flvflags @var{flags}
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
@table @samp
|
||||||
|
|
||||||
|
@item aac_seq_header_detect
|
||||||
|
Place AAC sequence header based on audio stream data.
|
||||||
|
|
||||||
|
@item no_sequence_end
|
||||||
|
Disable sequence end tag.
|
||||||
|
|
||||||
|
@item no_metadata
|
||||||
|
Disable metadata tag.
|
||||||
|
|
||||||
|
@item no_duration_filesize
|
||||||
|
Disable duration and filesize in metadata when they are equal to zero
|
||||||
|
at the end of stream. (Be used to non-seekable living stream).
|
||||||
|
|
||||||
|
@item add_keyframe_index
|
||||||
|
Used to facilitate seeking; particularly for HTTP pseudo streaming.
|
||||||
|
@end table
|
||||||
|
@end table
|
||||||
|
|
||||||
@anchor{dash}
|
@anchor{dash}
|
||||||
@section dash
|
@section dash
|
||||||
|
|
||||||
@ -264,6 +237,8 @@ ffmpeg -re -i <input> -map 0 -map 0 -c:a libfdk_aac -c:v libx264 \
|
|||||||
@end example
|
@end example
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
|
@item min_seg_duration @var{microseconds}
|
||||||
|
This is a deprecated option to set the segment length in microseconds, use @var{seg_duration} instead.
|
||||||
@item seg_duration @var{duration}
|
@item seg_duration @var{duration}
|
||||||
Set the segment length in seconds (fractional value can be set). The value is
|
Set the segment length in seconds (fractional value can be set). The value is
|
||||||
treated as average segment duration when @var{use_template} is enabled and
|
treated as average segment duration when @var{use_template} is enabled and
|
||||||
@ -362,13 +337,12 @@ Ignore IO errors during open and write. Useful for long-duration runs with netwo
|
|||||||
|
|
||||||
@item lhls @var{lhls}
|
@item lhls @var{lhls}
|
||||||
Enable Low-latency HLS(LHLS). Adds #EXT-X-PREFETCH tag with current segment's URI.
|
Enable Low-latency HLS(LHLS). Adds #EXT-X-PREFETCH tag with current segment's URI.
|
||||||
hls.js player folks are trying to standardize an open LHLS spec. The draft spec is available in https://github.com/video-dev/hlsjs-rfcs/blob/lhls-spec/proposals/0001-lhls.md
|
Apple doesn't have an official spec for LHLS. Meanwhile hls.js player folks are
|
||||||
This option tries to comply with the above open spec.
|
trying to standardize a open LHLS spec. The draft spec is available in https://github.com/video-dev/hlsjs-rfcs/blob/lhls-spec/proposals/0001-lhls.md
|
||||||
It enables @var{streaming} and @var{hls_playlist} options automatically.
|
This option will also try to comply with the above open spec, till Apple's spec officially supports it.
|
||||||
|
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
|
||||||
This is an experimental feature.
|
This is an experimental feature.
|
||||||
|
|
||||||
Note: This is not Apple's version LHLS. See @url{https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis}
|
|
||||||
|
|
||||||
@item ldash @var{ldash}
|
@item ldash @var{ldash}
|
||||||
Enable Low-latency Dash by constraining the presence and values of some elements.
|
Enable Low-latency Dash by constraining the presence and values of some elements.
|
||||||
|
|
||||||
@ -406,137 +380,6 @@ adjusting playback latency and buffer occupancy during normal playback by client
|
|||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{fifo}
|
|
||||||
@section fifo
|
|
||||||
|
|
||||||
The fifo pseudo-muxer allows the separation of encoding and muxing by using
|
|
||||||
first-in-first-out queue and running the actual muxer in a separate thread. This
|
|
||||||
is especially useful in combination with the @ref{tee} muxer and can be used to
|
|
||||||
send data to several destinations with different reliability/writing speed/latency.
|
|
||||||
|
|
||||||
API users should be aware that callback functions (interrupt_callback,
|
|
||||||
io_open and io_close) used within its AVFormatContext must be thread-safe.
|
|
||||||
|
|
||||||
The behavior of the fifo muxer if the queue fills up or if the output fails is
|
|
||||||
selectable,
|
|
||||||
|
|
||||||
@itemize @bullet
|
|
||||||
|
|
||||||
@item
|
|
||||||
output can be transparently restarted with configurable delay between retries
|
|
||||||
based on real time or time of the processed stream.
|
|
||||||
|
|
||||||
@item
|
|
||||||
encoding can be blocked during temporary failure, or continue transparently
|
|
||||||
dropping packets in case fifo queue fills up.
|
|
||||||
|
|
||||||
@end itemize
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item fifo_format
|
|
||||||
Specify the format name. Useful if it cannot be guessed from the
|
|
||||||
output name suffix.
|
|
||||||
|
|
||||||
@item queue_size
|
|
||||||
Specify size of the queue (number of packets). Default value is 60.
|
|
||||||
|
|
||||||
@item format_opts
|
|
||||||
Specify format options for the underlying muxer. Muxer options can be specified
|
|
||||||
as a list of @var{key}=@var{value} pairs separated by ':'.
|
|
||||||
|
|
||||||
@item drop_pkts_on_overflow @var{bool}
|
|
||||||
If set to 1 (true), in case the fifo queue fills up, packets will be dropped
|
|
||||||
rather than blocking the encoder. This makes it possible to continue streaming without
|
|
||||||
delaying the input, at the cost of omitting part of the stream. By default
|
|
||||||
this option is set to 0 (false), so in such cases the encoder will be blocked
|
|
||||||
until the muxer processes some of the packets and none of them is lost.
|
|
||||||
|
|
||||||
@item attempt_recovery @var{bool}
|
|
||||||
If failure occurs, attempt to recover the output. This is especially useful
|
|
||||||
when used with network output, since it makes it possible to restart streaming transparently.
|
|
||||||
By default this option is set to 0 (false).
|
|
||||||
|
|
||||||
@item max_recovery_attempts
|
|
||||||
Sets maximum number of successive unsuccessful recovery attempts after which
|
|
||||||
the output fails permanently. By default this option is set to 0 (unlimited).
|
|
||||||
|
|
||||||
@item recovery_wait_time @var{duration}
|
|
||||||
Waiting time before the next recovery attempt after previous unsuccessful
|
|
||||||
recovery attempt. Default value is 5 seconds.
|
|
||||||
|
|
||||||
@item recovery_wait_streamtime @var{bool}
|
|
||||||
If set to 0 (false), the real time is used when waiting for the recovery
|
|
||||||
attempt (i.e. the recovery will be attempted after at least
|
|
||||||
recovery_wait_time seconds).
|
|
||||||
If set to 1 (true), the time of the processed stream is taken into account
|
|
||||||
instead (i.e. the recovery will be attempted after at least @var{recovery_wait_time}
|
|
||||||
seconds of the stream is omitted).
|
|
||||||
By default, this option is set to 0 (false).
|
|
||||||
|
|
||||||
@item recover_any_error @var{bool}
|
|
||||||
If set to 1 (true), recovery will be attempted regardless of type of the error
|
|
||||||
causing the failure. By default this option is set to 0 (false) and in case of
|
|
||||||
certain (usually permanent) errors the recovery is not attempted even when
|
|
||||||
@var{attempt_recovery} is set to 1.
|
|
||||||
|
|
||||||
@item restart_with_keyframe @var{bool}
|
|
||||||
Specify whether to wait for the keyframe after recovering from
|
|
||||||
queue overflow or failure. This option is set to 0 (false) by default.
|
|
||||||
|
|
||||||
@item timeshift @var{duration}
|
|
||||||
Buffer the specified amount of packets and delay writing the output. Note that
|
|
||||||
@var{queue_size} must be big enough to store the packets for timeshift. At the
|
|
||||||
end of the input the fifo buffer is flushed at realtime speed.
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Examples
|
|
||||||
|
|
||||||
@itemize
|
|
||||||
|
|
||||||
@item
|
|
||||||
Stream something to rtmp server, continue processing the stream at real-time
|
|
||||||
rate even in case of temporary failure (network outage) and attempt to recover
|
|
||||||
streaming every second indefinitely.
|
|
||||||
@example
|
|
||||||
ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0:a
|
|
||||||
-drop_pkts_on_overflow 1 -attempt_recovery 1 -recovery_wait_time 1 rtmp://example.com/live/stream_name
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@end itemize
|
|
||||||
|
|
||||||
@section flv
|
|
||||||
|
|
||||||
Adobe Flash Video Format muxer.
|
|
||||||
|
|
||||||
This muxer accepts the following options:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item flvflags @var{flags}
|
|
||||||
Possible values:
|
|
||||||
|
|
||||||
@table @samp
|
|
||||||
|
|
||||||
@item aac_seq_header_detect
|
|
||||||
Place AAC sequence header based on audio stream data.
|
|
||||||
|
|
||||||
@item no_sequence_end
|
|
||||||
Disable sequence end tag.
|
|
||||||
|
|
||||||
@item no_metadata
|
|
||||||
Disable metadata tag.
|
|
||||||
|
|
||||||
@item no_duration_filesize
|
|
||||||
Disable duration and filesize in metadata when they are equal to zero
|
|
||||||
at the end of stream. (Be used to non-seekable living stream).
|
|
||||||
|
|
||||||
@item add_keyframe_index
|
|
||||||
Used to facilitate seeking; particularly for HTTP pseudo streaming.
|
|
||||||
@end table
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@anchor{framecrc}
|
@anchor{framecrc}
|
||||||
@section framecrc
|
@section framecrc
|
||||||
|
|
||||||
@ -799,7 +642,15 @@ were recently referenced in the playlist. Default value is 1, meaning segments o
|
|||||||
Set output format options using a :-separated list of key=value
|
Set output format options using a :-separated list of key=value
|
||||||
parameters. Values containing @code{:} special characters must be
|
parameters. Values containing @code{:} special characters must be
|
||||||
escaped.
|
escaped.
|
||||||
@code{hls_ts_options} is deprecated, use hls_segment_options instead of it..
|
|
||||||
|
@item hls_wrap @var{wrap}
|
||||||
|
This is a deprecated option, you can use @code{hls_list_size}
|
||||||
|
and @code{hls_flags delete_segments} instead it
|
||||||
|
|
||||||
|
This option is useful to avoid to fill the disk with many segment
|
||||||
|
files, and limits the maximum number of segment files written to disk
|
||||||
|
to @var{wrap}.
|
||||||
|
|
||||||
|
|
||||||
@item hls_start_number_source
|
@item hls_start_number_source
|
||||||
Start the playlist sequence number (@code{#EXT-X-MEDIA-SEQUENCE}) according to the specified source.
|
Start the playlist sequence number (@code{#EXT-X-MEDIA-SEQUENCE}) according to the specified source.
|
||||||
@ -886,6 +737,9 @@ This example will produce the playlists segment file sets:
|
|||||||
@file{vs0/file_000.ts}, @file{vs0/file_001.ts}, @file{vs0/file_002.ts}, etc. and
|
@file{vs0/file_000.ts}, @file{vs0/file_001.ts}, @file{vs0/file_002.ts}, etc. and
|
||||||
@file{vs1/file_000.ts}, @file{vs1/file_001.ts}, @file{vs1/file_002.ts}, etc.
|
@file{vs1/file_000.ts}, @file{vs1/file_001.ts}, @file{vs1/file_002.ts}, etc.
|
||||||
|
|
||||||
|
@item use_localtime
|
||||||
|
Same as strftime option, will be deprecated.
|
||||||
|
|
||||||
@item strftime
|
@item strftime
|
||||||
Use strftime() on @var{filename} to expand the segment filename with localtime.
|
Use strftime() on @var{filename} to expand the segment filename with localtime.
|
||||||
The segment number is also available in this mode, but to use it, you need to specify second_level_segment_index
|
The segment number is also available in this mode, but to use it, you need to specify second_level_segment_index
|
||||||
@ -903,6 +757,9 @@ ffmpeg -i in.nut -strftime 1 -hls_flags second_level_segment_index -hls_segment_
|
|||||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||||
@file{file-20160215-0001.ts}, @file{file-20160215-0002.ts}, etc.
|
@file{file-20160215-0001.ts}, @file{file-20160215-0002.ts}, etc.
|
||||||
|
|
||||||
|
@item use_localtime_mkdir
|
||||||
|
Same as strftime_mkdir option, will be deprecated .
|
||||||
|
|
||||||
@item strftime_mkdir
|
@item strftime_mkdir
|
||||||
Used together with -strftime_mkdir, it will create all subdirectories which
|
Used together with -strftime_mkdir, it will create all subdirectories which
|
||||||
is expanded in @var{filename}.
|
is expanded in @var{filename}.
|
||||||
@ -920,10 +777,6 @@ This example will create a directory hierarchy 2016/02/15 (if any of them do not
|
|||||||
produce the playlist, @file{out.m3u8}, and segment files:
|
produce the playlist, @file{out.m3u8}, and segment files:
|
||||||
@file{2016/02/15/file-20160215-1455569023.ts}, @file{2016/02/15/file-20160215-1455569024.ts}, etc.
|
@file{2016/02/15/file-20160215-1455569023.ts}, @file{2016/02/15/file-20160215-1455569024.ts}, etc.
|
||||||
|
|
||||||
@item hls_segment_options @var{options_list}
|
|
||||||
Set output format options using a :-separated list of key=value
|
|
||||||
parameters. Values containing @code{:} special characters must be
|
|
||||||
escaped.
|
|
||||||
|
|
||||||
@item hls_key_info_file @var{key_info_file}
|
@item hls_key_info_file @var{key_info_file}
|
||||||
Use the information in @var{key_info_file} for segment encryption. The first
|
Use the information in @var{key_info_file} for segment encryption. The first
|
||||||
@ -1425,10 +1278,6 @@ overwritten with new images. Default value is 0.
|
|||||||
If set to 1, expand the filename with date and time information from
|
If set to 1, expand the filename with date and time information from
|
||||||
@code{strftime()}. Default value is 0.
|
@code{strftime()}. Default value is 0.
|
||||||
|
|
||||||
@item atomic_writing
|
|
||||||
Write output to a temporary file, which is renamed to target filename once
|
|
||||||
writing is completed. Default is disabled.
|
|
||||||
|
|
||||||
@item protocol_opts @var{options_list}
|
@item protocol_opts @var{options_list}
|
||||||
Set protocol options as a :-separated list of key=value parameters. Values
|
Set protocol options as a :-separated list of key=value parameters. Values
|
||||||
containing the @code{:} special character must be escaped.
|
containing the @code{:} special character must be escaped.
|
||||||
@ -1570,15 +1419,15 @@ have no effect if it is not.
|
|||||||
@item default_mode
|
@item default_mode
|
||||||
This option controls how the FlagDefault of the output tracks will be set.
|
This option controls how the FlagDefault of the output tracks will be set.
|
||||||
It influences which tracks players should play by default. The default mode
|
It influences which tracks players should play by default. The default mode
|
||||||
is @samp{passthrough}.
|
is @samp{infer}.
|
||||||
@table @samp
|
@table @samp
|
||||||
@item infer
|
@item infer
|
||||||
Every track with disposition default will have the FlagDefault set.
|
In this mode, for each type of track (audio, video or subtitle), if there is
|
||||||
Additionally, for each type of track (audio, video or subtitle), if no track
|
a track with disposition default of this type, then the first such track
|
||||||
with disposition default of this type exists, then the first track of this type
|
(i.e. the one with the lowest index) will be marked as default; if no such
|
||||||
will be marked as default (if existing). This ensures that the default flag
|
track exists, the first track of this type will be marked as default instead
|
||||||
is set in a sensible way even if the input originated from containers that
|
(if existing). This ensures that the default flag is set in a sensible way even
|
||||||
lack the concept of default tracks.
|
if the input originated from containers that lack the concept of default tracks.
|
||||||
@item infer_no_subs
|
@item infer_no_subs
|
||||||
This mode is the same as infer except that if no subtitle track with
|
This mode is the same as infer except that if no subtitle track with
|
||||||
disposition default exists, no subtitle track will be marked as default.
|
disposition default exists, no subtitle track will be marked as default.
|
||||||
@ -1732,11 +1581,6 @@ Setting value to @samp{pts} is applicable only for a live encoding use case,
|
|||||||
where PTS values are set as as wallclock time at the source. For example, an
|
where PTS values are set as as wallclock time at the source. For example, an
|
||||||
encoding use case with decklink capture source where @option{video_pts} and
|
encoding use case with decklink capture source where @option{video_pts} and
|
||||||
@option{audio_pts} are set to @samp{abs_wallclock}.
|
@option{audio_pts} are set to @samp{abs_wallclock}.
|
||||||
|
|
||||||
@item -movie_timescale @var{scale}
|
|
||||||
Set the timescale written in the movie header box (@code{mvhd}).
|
|
||||||
Range is 1 to INT_MAX. Default is 1000.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Example
|
@subsection Example
|
||||||
@ -1886,8 +1730,6 @@ Reemit PAT and PMT at each video frame.
|
|||||||
Conform to System B (DVB) instead of System A (ATSC).
|
Conform to System B (DVB) instead of System A (ATSC).
|
||||||
@item initial_discontinuity
|
@item initial_discontinuity
|
||||||
Mark the initial packet of each stream as discontinuity.
|
Mark the initial packet of each stream as discontinuity.
|
||||||
@item nit
|
|
||||||
Emit NIT table.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item mpegts_copyts @var{boolean}
|
@item mpegts_copyts @var{boolean}
|
||||||
@ -1909,11 +1751,8 @@ Maximum time in seconds between PAT/PMT tables. Default is @code{0.1}.
|
|||||||
@item sdt_period @var{duration}
|
@item sdt_period @var{duration}
|
||||||
Maximum time in seconds between SDT tables. Default is @code{0.5}.
|
Maximum time in seconds between SDT tables. Default is @code{0.5}.
|
||||||
|
|
||||||
@item nit_period @var{duration}
|
|
||||||
Maximum time in seconds between NIT tables. Default is @code{0.5}.
|
|
||||||
|
|
||||||
@item tables_version @var{integer}
|
@item tables_version @var{integer}
|
||||||
Set PAT, PMT, SDT and NIT version (default @code{0}, valid values are from 0 to 31, inclusively).
|
Set PAT, PMT and SDT version (default @code{0}, valid values are from 0 to 31, inclusively).
|
||||||
This option allows updating stream structure so that standard consumer may
|
This option allows updating stream structure so that standard consumer may
|
||||||
detect the change. To do so, reopen output @code{AVFormatContext} (in case of API
|
detect the change. To do so, reopen output @code{AVFormatContext} (in case of API
|
||||||
usage) or restart @command{ffmpeg} instance, cyclically changing
|
usage) or restart @command{ffmpeg} instance, cyclically changing
|
||||||
@ -2025,182 +1864,6 @@ ogg files can be safely chained.
|
|||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{raw muxers}
|
|
||||||
@section raw muxers
|
|
||||||
|
|
||||||
Raw muxers accept a single stream matching the designated codec. They do not store timestamps or metadata.
|
|
||||||
The recognized extension is the same as the muxer name unless indicated otherwise.
|
|
||||||
|
|
||||||
@subsection ac3
|
|
||||||
|
|
||||||
Dolby Digital, also known as AC-3, audio.
|
|
||||||
|
|
||||||
@subsection adx
|
|
||||||
|
|
||||||
CRI Middleware ADX audio.
|
|
||||||
|
|
||||||
This muxer will write out the total sample count near the start of the first packet
|
|
||||||
when the output is seekable and the count can be stored in 32 bits.
|
|
||||||
|
|
||||||
@subsection aptx
|
|
||||||
|
|
||||||
aptX (Audio Processing Technology for Bluetooth) audio.
|
|
||||||
|
|
||||||
@subsection aptx_hd
|
|
||||||
|
|
||||||
aptX HD (Audio Processing Technology for Bluetooth) audio.
|
|
||||||
|
|
||||||
Extensions: aptxhd
|
|
||||||
|
|
||||||
@subsection avs2
|
|
||||||
|
|
||||||
AVS2-P2/IEEE1857.4 video.
|
|
||||||
|
|
||||||
Extensions: avs, avs2
|
|
||||||
|
|
||||||
@subsection cavsvideo
|
|
||||||
|
|
||||||
Chinese AVS (Audio Video Standard) video.
|
|
||||||
|
|
||||||
Extensions: cavs
|
|
||||||
|
|
||||||
@subsection codec2raw
|
|
||||||
|
|
||||||
Codec 2 audio.
|
|
||||||
|
|
||||||
No extension is registered so format name has to be supplied e.g. with the ffmpeg CLI tool @code{-f codec2raw}.
|
|
||||||
|
|
||||||
@subsection data
|
|
||||||
|
|
||||||
Data muxer accepts a single stream with any codec of any type.
|
|
||||||
The input stream has to be selected using the @code{-map} option with the ffmpeg CLI tool.
|
|
||||||
|
|
||||||
No extension is registered so format name has to be supplied e.g. with the ffmpeg CLI tool @code{-f data}.
|
|
||||||
|
|
||||||
@subsection dirac
|
|
||||||
|
|
||||||
BBC Dirac video. The Dirac Pro codec is a subset and is standardized as SMPTE VC-2.
|
|
||||||
|
|
||||||
Extensions: drc, vc2
|
|
||||||
|
|
||||||
@subsection dnxhd
|
|
||||||
|
|
||||||
Avid DNxHD video. It is standardized as SMPTE VC-3. Accepts DNxHR streams.
|
|
||||||
|
|
||||||
Extensions: dnxhd, dnxhr
|
|
||||||
|
|
||||||
@subsection dts
|
|
||||||
|
|
||||||
DTS Coherent Acoustics (DCA) audio.
|
|
||||||
|
|
||||||
@subsection eac3
|
|
||||||
|
|
||||||
Dolby Digital Plus, also known as Enhanced AC-3, audio.
|
|
||||||
|
|
||||||
@subsection g722
|
|
||||||
|
|
||||||
ITU-T G.722 audio.
|
|
||||||
|
|
||||||
@subsection g723_1
|
|
||||||
|
|
||||||
ITU-T G.723.1 audio.
|
|
||||||
|
|
||||||
Extensions: tco, rco
|
|
||||||
|
|
||||||
@subsection g726
|
|
||||||
|
|
||||||
ITU-T G.726 big-endian ("left-justified") audio.
|
|
||||||
|
|
||||||
No extension is registered so format name has to be supplied e.g. with the ffmpeg CLI tool @code{-f g726}.
|
|
||||||
|
|
||||||
@subsection g726le
|
|
||||||
|
|
||||||
ITU-T G.726 little-endian ("right-justified") audio.
|
|
||||||
|
|
||||||
No extension is registered so format name has to be supplied e.g. with the ffmpeg CLI tool @code{-f g726le}.
|
|
||||||
|
|
||||||
@subsection gsm
|
|
||||||
|
|
||||||
Global System for Mobile Communications audio.
|
|
||||||
|
|
||||||
@subsection h261
|
|
||||||
|
|
||||||
ITU-T H.261 video.
|
|
||||||
|
|
||||||
@subsection h263
|
|
||||||
|
|
||||||
ITU-T H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2 video.
|
|
||||||
|
|
||||||
@subsection h264
|
|
||||||
|
|
||||||
ITU-T H.264 / MPEG-4 Part 10 AVC video. Bitstream shall be converted to Annex B syntax if it's in length-prefixed mode.
|
|
||||||
|
|
||||||
Extensions: h264, 264
|
|
||||||
|
|
||||||
@subsection hevc
|
|
||||||
|
|
||||||
ITU-T H.265 / MPEG-H Part 2 HEVC video. Bitstream shall be converted to Annex B syntax if it's in length-prefixed mode.
|
|
||||||
|
|
||||||
Extensions: hevc, h265, 265
|
|
||||||
|
|
||||||
@subsection m4v
|
|
||||||
|
|
||||||
MPEG-4 Part 2 video.
|
|
||||||
|
|
||||||
@subsection mjpeg
|
|
||||||
|
|
||||||
Motion JPEG video.
|
|
||||||
|
|
||||||
Extensions: mjpg, mjpeg
|
|
||||||
|
|
||||||
@subsection mlp
|
|
||||||
|
|
||||||
Meridian Lossless Packing, also known as Packed PCM, audio.
|
|
||||||
|
|
||||||
@subsection mp2
|
|
||||||
|
|
||||||
MPEG-1 Audio Layer II audio.
|
|
||||||
|
|
||||||
Extensions: mp2, m2a, mpa
|
|
||||||
|
|
||||||
@subsection mpeg1video
|
|
||||||
|
|
||||||
MPEG-1 Part 2 video.
|
|
||||||
|
|
||||||
Extensions: mpg, mpeg, m1v
|
|
||||||
|
|
||||||
@subsection mpeg2video
|
|
||||||
|
|
||||||
ITU-T H.262 / MPEG-2 Part 2 video.
|
|
||||||
|
|
||||||
Extensions: m2v
|
|
||||||
|
|
||||||
@subsection obu
|
|
||||||
|
|
||||||
AV1 low overhead Open Bitstream Units muxer. Temporal delimiter OBUs will be inserted in all temporal units of the stream.
|
|
||||||
|
|
||||||
@subsection rawvideo
|
|
||||||
|
|
||||||
Raw uncompressed video.
|
|
||||||
|
|
||||||
Extensions: yuv, rgb
|
|
||||||
|
|
||||||
@subsection sbc
|
|
||||||
|
|
||||||
Bluetooth SIG low-complexity subband codec audio.
|
|
||||||
|
|
||||||
Extensions: sbc, msbc
|
|
||||||
|
|
||||||
@subsection truehd
|
|
||||||
|
|
||||||
Dolby TrueHD audio.
|
|
||||||
|
|
||||||
Extensions: thd
|
|
||||||
|
|
||||||
@subsection vc1
|
|
||||||
|
|
||||||
SMPTE 421M / VC-1 video.
|
|
||||||
|
|
||||||
@anchor{segment}
|
@anchor{segment}
|
||||||
@section segment, stream_segment, ssegment
|
@section segment, stream_segment, ssegment
|
||||||
|
|
||||||
@ -2569,6 +2232,106 @@ ffmpeg -i INPUT -f streamhash -hash md5 -
|
|||||||
|
|
||||||
See also the @ref{hash} and @ref{framehash} muxers.
|
See also the @ref{hash} and @ref{framehash} muxers.
|
||||||
|
|
||||||
|
@anchor{fifo}
|
||||||
|
@section fifo
|
||||||
|
|
||||||
|
The fifo pseudo-muxer allows the separation of encoding and muxing by using
|
||||||
|
first-in-first-out queue and running the actual muxer in a separate thread. This
|
||||||
|
is especially useful in combination with the @ref{tee} muxer and can be used to
|
||||||
|
send data to several destinations with different reliability/writing speed/latency.
|
||||||
|
|
||||||
|
API users should be aware that callback functions (interrupt_callback,
|
||||||
|
io_open and io_close) used within its AVFormatContext must be thread-safe.
|
||||||
|
|
||||||
|
The behavior of the fifo muxer if the queue fills up or if the output fails is
|
||||||
|
selectable,
|
||||||
|
|
||||||
|
@itemize @bullet
|
||||||
|
|
||||||
|
@item
|
||||||
|
output can be transparently restarted with configurable delay between retries
|
||||||
|
based on real time or time of the processed stream.
|
||||||
|
|
||||||
|
@item
|
||||||
|
encoding can be blocked during temporary failure, or continue transparently
|
||||||
|
dropping packets in case fifo queue fills up.
|
||||||
|
|
||||||
|
@end itemize
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
|
||||||
|
@item fifo_format
|
||||||
|
Specify the format name. Useful if it cannot be guessed from the
|
||||||
|
output name suffix.
|
||||||
|
|
||||||
|
@item queue_size
|
||||||
|
Specify size of the queue (number of packets). Default value is 60.
|
||||||
|
|
||||||
|
@item format_opts
|
||||||
|
Specify format options for the underlying muxer. Muxer options can be specified
|
||||||
|
as a list of @var{key}=@var{value} pairs separated by ':'.
|
||||||
|
|
||||||
|
@item drop_pkts_on_overflow @var{bool}
|
||||||
|
If set to 1 (true), in case the fifo queue fills up, packets will be dropped
|
||||||
|
rather than blocking the encoder. This makes it possible to continue streaming without
|
||||||
|
delaying the input, at the cost of omitting part of the stream. By default
|
||||||
|
this option is set to 0 (false), so in such cases the encoder will be blocked
|
||||||
|
until the muxer processes some of the packets and none of them is lost.
|
||||||
|
|
||||||
|
@item attempt_recovery @var{bool}
|
||||||
|
If failure occurs, attempt to recover the output. This is especially useful
|
||||||
|
when used with network output, since it makes it possible to restart streaming transparently.
|
||||||
|
By default this option is set to 0 (false).
|
||||||
|
|
||||||
|
@item max_recovery_attempts
|
||||||
|
Sets maximum number of successive unsuccessful recovery attempts after which
|
||||||
|
the output fails permanently. By default this option is set to 0 (unlimited).
|
||||||
|
|
||||||
|
@item recovery_wait_time @var{duration}
|
||||||
|
Waiting time before the next recovery attempt after previous unsuccessful
|
||||||
|
recovery attempt. Default value is 5 seconds.
|
||||||
|
|
||||||
|
@item recovery_wait_streamtime @var{bool}
|
||||||
|
If set to 0 (false), the real time is used when waiting for the recovery
|
||||||
|
attempt (i.e. the recovery will be attempted after at least
|
||||||
|
recovery_wait_time seconds).
|
||||||
|
If set to 1 (true), the time of the processed stream is taken into account
|
||||||
|
instead (i.e. the recovery will be attempted after at least @var{recovery_wait_time}
|
||||||
|
seconds of the stream is omitted).
|
||||||
|
By default, this option is set to 0 (false).
|
||||||
|
|
||||||
|
@item recover_any_error @var{bool}
|
||||||
|
If set to 1 (true), recovery will be attempted regardless of type of the error
|
||||||
|
causing the failure. By default this option is set to 0 (false) and in case of
|
||||||
|
certain (usually permanent) errors the recovery is not attempted even when
|
||||||
|
@var{attempt_recovery} is set to 1.
|
||||||
|
|
||||||
|
@item restart_with_keyframe @var{bool}
|
||||||
|
Specify whether to wait for the keyframe after recovering from
|
||||||
|
queue overflow or failure. This option is set to 0 (false) by default.
|
||||||
|
|
||||||
|
@item timeshift @var{duration}
|
||||||
|
Buffer the specified amount of packets and delay writing the output. Note that
|
||||||
|
@var{queue_size} must be big enough to store the packets for timeshift. At the
|
||||||
|
end of the input the fifo buffer is flushed at realtime speed.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@subsection Examples
|
||||||
|
|
||||||
|
@itemize
|
||||||
|
|
||||||
|
@item
|
||||||
|
Stream something to rtmp server, continue processing the stream at real-time
|
||||||
|
rate even in case of temporary failure (network outage) and attempt to recover
|
||||||
|
streaming every second indefinitely.
|
||||||
|
@example
|
||||||
|
ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0:a
|
||||||
|
-drop_pkts_on_overflow 1 -attempt_recovery 1 -recovery_wait_time 1 rtmp://example.com/live/stream_name
|
||||||
|
@end example
|
||||||
|
|
||||||
|
@end itemize
|
||||||
|
|
||||||
@anchor{tee}
|
@anchor{tee}
|
||||||
@section tee
|
@section tee
|
||||||
|
|
||||||
@ -2701,49 +2464,6 @@ ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac
|
|||||||
@end example
|
@end example
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@section webm_chunk
|
|
||||||
|
|
||||||
WebM Live Chunk Muxer.
|
|
||||||
|
|
||||||
This muxer writes out WebM headers and chunks as separate files which can be
|
|
||||||
consumed by clients that support WebM Live streams via DASH.
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
This muxer supports the following options:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
@item chunk_start_index
|
|
||||||
Index of the first chunk (defaults to 0).
|
|
||||||
|
|
||||||
@item header
|
|
||||||
Filename of the header where the initialization data will be written.
|
|
||||||
|
|
||||||
@item audio_chunk_duration
|
|
||||||
Duration of each audio chunk in milliseconds (defaults to 5000).
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Example
|
|
||||||
@example
|
|
||||||
ffmpeg -f v4l2 -i /dev/video0 \
|
|
||||||
-f alsa -i hw:0 \
|
|
||||||
-map 0:0 \
|
|
||||||
-c:v libvpx-vp9 \
|
|
||||||
-s 640x360 -keyint_min 30 -g 30 \
|
|
||||||
-f webm_chunk \
|
|
||||||
-header webm_live_video_360.hdr \
|
|
||||||
-chunk_start_index 1 \
|
|
||||||
webm_live_video_360_%d.chk \
|
|
||||||
-map 1:0 \
|
|
||||||
-c:a libvorbis \
|
|
||||||
-b:a 128k \
|
|
||||||
-f webm_chunk \
|
|
||||||
-header webm_live_audio_128.hdr \
|
|
||||||
-chunk_start_index 1 \
|
|
||||||
-audio_chunk_duration 1000 \
|
|
||||||
webm_live_audio_128_%d.chk
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@section webm_dash_manifest
|
@section webm_dash_manifest
|
||||||
|
|
||||||
WebM DASH Manifest muxer.
|
WebM DASH Manifest muxer.
|
||||||
@ -2810,4 +2530,47 @@ ffmpeg -f webm_dash_manifest -i video1.webm \
|
|||||||
manifest.xml
|
manifest.xml
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@section webm_chunk
|
||||||
|
|
||||||
|
WebM Live Chunk Muxer.
|
||||||
|
|
||||||
|
This muxer writes out WebM headers and chunks as separate files which can be
|
||||||
|
consumed by clients that support WebM Live streams via DASH.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
This muxer supports the following options:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item chunk_start_index
|
||||||
|
Index of the first chunk (defaults to 0).
|
||||||
|
|
||||||
|
@item header
|
||||||
|
Filename of the header where the initialization data will be written.
|
||||||
|
|
||||||
|
@item audio_chunk_duration
|
||||||
|
Duration of each audio chunk in milliseconds (defaults to 5000).
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@subsection Example
|
||||||
|
@example
|
||||||
|
ffmpeg -f v4l2 -i /dev/video0 \
|
||||||
|
-f alsa -i hw:0 \
|
||||||
|
-map 0:0 \
|
||||||
|
-c:v libvpx-vp9 \
|
||||||
|
-s 640x360 -keyint_min 30 -g 30 \
|
||||||
|
-f webm_chunk \
|
||||||
|
-header webm_live_video_360.hdr \
|
||||||
|
-chunk_start_index 1 \
|
||||||
|
webm_live_video_360_%d.chk \
|
||||||
|
-map 1:0 \
|
||||||
|
-c:a libvorbis \
|
||||||
|
-b:a 128k \
|
||||||
|
-f webm_chunk \
|
||||||
|
-header webm_live_audio_128.hdr \
|
||||||
|
-chunk_start_index 1 \
|
||||||
|
-audio_chunk_duration 1000 \
|
||||||
|
webm_live_audio_128_%d.chk
|
||||||
|
@end example
|
||||||
|
|
||||||
@c man end MUXERS
|
@c man end MUXERS
|
||||||
|
@ -198,43 +198,13 @@ Amount of time to preroll video in seconds.
|
|||||||
Defaults to @option{0.5}.
|
Defaults to @option{0.5}.
|
||||||
|
|
||||||
@item duplex_mode
|
@item duplex_mode
|
||||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
|
||||||
@samp{four_sub_device_half}
|
|
||||||
Defaults to @samp{unset}.
|
Defaults to @samp{unset}.
|
||||||
|
|
||||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
|
||||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
|
||||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
|
||||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
|
||||||
|
|
||||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
|
||||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
|
||||||
@samp{four_sub_device_half}
|
|
||||||
|
|
||||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
|
||||||
@samp{half}, @samp{full}
|
|
||||||
|
|
||||||
@item timing_offset
|
@item timing_offset
|
||||||
Sets the genlock timing pixel offset on the used output.
|
Sets the genlock timing pixel offset on the used output.
|
||||||
Defaults to @samp{unset}.
|
Defaults to @samp{unset}.
|
||||||
|
|
||||||
@item link
|
|
||||||
Sets the SDI video link configuration on the used output. Must be
|
|
||||||
@samp{unset}, @samp{single} link SDI, @samp{dual} link SDI or @samp{quad} link
|
|
||||||
SDI.
|
|
||||||
Defaults to @samp{unset}.
|
|
||||||
|
|
||||||
@item sqd
|
|
||||||
Enable Square Division Quad Split mode for Quad-link SDI output.
|
|
||||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
|
||||||
Defaults to @option{unset}.
|
|
||||||
|
|
||||||
@item level_a
|
|
||||||
Enable SMPTE Level A mode on the used output.
|
|
||||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
|
||||||
Defaults to @option{unset}.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -215,38 +215,6 @@ ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
|
|||||||
Note that you may need to escape the character "|" which is special for
|
Note that you may need to escape the character "|" which is special for
|
||||||
many shells.
|
many shells.
|
||||||
|
|
||||||
@section concatf
|
|
||||||
|
|
||||||
Physical concatenation protocol using a line break delimited list of
|
|
||||||
resources.
|
|
||||||
|
|
||||||
Read and seek from many resources in sequence as if they were
|
|
||||||
a unique resource.
|
|
||||||
|
|
||||||
A URL accepted by this protocol has the syntax:
|
|
||||||
@example
|
|
||||||
concatf:@var{URL}
|
|
||||||
@end example
|
|
||||||
|
|
||||||
where @var{URL} is the url containing a line break delimited list of
|
|
||||||
resources to be concatenated, each one possibly specifying a distinct
|
|
||||||
protocol. Special characters must be escaped with backslash or single
|
|
||||||
quotes. See @ref{quoting_and_escaping,,the "Quoting and escaping"
|
|
||||||
section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
|
||||||
|
|
||||||
For example to read a sequence of files @file{split1.mpeg},
|
|
||||||
@file{split2.mpeg}, @file{split3.mpeg} listed in separate lines within
|
|
||||||
a file @file{split.txt} with @command{ffplay} use the command:
|
|
||||||
@example
|
|
||||||
ffplay concatf:split.txt
|
|
||||||
@end example
|
|
||||||
Where @file{split.txt} contains the lines:
|
|
||||||
@example
|
|
||||||
split1.mpeg
|
|
||||||
split2.mpeg
|
|
||||||
split3.mpeg
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@section crypto
|
@section crypto
|
||||||
|
|
||||||
AES-encrypted stream reading protocol.
|
AES-encrypted stream reading protocol.
|
||||||
@ -438,6 +406,9 @@ Set the Referer header. Include 'Referer: URL' header in HTTP request.
|
|||||||
Override the User-Agent header. If not specified the protocol will use a
|
Override the User-Agent header. If not specified the protocol will use a
|
||||||
string describing the libavformat build. ("Lavf/<version>")
|
string describing the libavformat build. ("Lavf/<version>")
|
||||||
|
|
||||||
|
@item user-agent
|
||||||
|
This is a deprecated option, you can use user_agent instead it.
|
||||||
|
|
||||||
@item reconnect_at_eof
|
@item reconnect_at_eof
|
||||||
If set then eof is treated like an error and causes reconnection, this is useful
|
If set then eof is treated like an error and causes reconnection, this is useful
|
||||||
for live / endless streams.
|
for live / endless streams.
|
||||||
@ -875,11 +846,6 @@ URL to player swf file, compute hash/size automatically.
|
|||||||
@item rtmp_tcurl
|
@item rtmp_tcurl
|
||||||
URL of the target stream. Defaults to proto://host[:port]/app.
|
URL of the target stream. Defaults to proto://host[:port]/app.
|
||||||
|
|
||||||
@item tcp_nodelay=@var{1|0}
|
|
||||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
|
||||||
|
|
||||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
For example to read with @command{ffplay} a multimedia resource named
|
For example to read with @command{ffplay} a multimedia resource named
|
||||||
@ -1087,10 +1053,6 @@ set to 1) or to a default remote address (if set to 0).
|
|||||||
@item localport=@var{n}
|
@item localport=@var{n}
|
||||||
Set the local RTP port to @var{n}.
|
Set the local RTP port to @var{n}.
|
||||||
|
|
||||||
@item localaddr=@var{addr}
|
|
||||||
Local IP address of a network interface used for sending packets or joining
|
|
||||||
multicast groups.
|
|
||||||
|
|
||||||
@item timeout=@var{n}
|
@item timeout=@var{n}
|
||||||
Set timeout (in microseconds) of socket I/O operations to @var{n}.
|
Set timeout (in microseconds) of socket I/O operations to @var{n}.
|
||||||
|
|
||||||
@ -1202,18 +1164,19 @@ Set minimum local UDP port. Default value is 5000.
|
|||||||
@item max_port
|
@item max_port
|
||||||
Set maximum local UDP port. Default value is 65000.
|
Set maximum local UDP port. Default value is 65000.
|
||||||
|
|
||||||
@item listen_timeout
|
@item timeout
|
||||||
Set maximum timeout (in seconds) to establish an initial connection. Setting
|
Set maximum timeout (in seconds) to wait for incoming connections.
|
||||||
@option{listen_timeout} > 0 sets @option{rtsp_flags} to @samp{listen}. Default is -1
|
|
||||||
which means an infinite timeout when @samp{listen} mode is set.
|
A value of -1 means infinite (default). This option implies the
|
||||||
|
@option{rtsp_flags} set to @samp{listen}.
|
||||||
|
|
||||||
@item reorder_queue_size
|
@item reorder_queue_size
|
||||||
Set number of packets to buffer for handling of reordered packets.
|
Set number of packets to buffer for handling of reordered packets.
|
||||||
|
|
||||||
@item timeout
|
@item stimeout
|
||||||
Set socket TCP I/O timeout in microseconds.
|
Set socket TCP I/O timeout in microseconds.
|
||||||
|
|
||||||
@item user_agent
|
@item user-agent
|
||||||
Override User-Agent header. If not specified, it defaults to the
|
Override User-Agent header. If not specified, it defaults to the
|
||||||
libavformat identifier string.
|
libavformat identifier string.
|
||||||
@end table
|
@end table
|
||||||
@ -1500,12 +1463,6 @@ when the old encryption key is decommissioned. Default is -1.
|
|||||||
-1 means auto (0x1000 in srt library). The range for
|
-1 means auto (0x1000 in srt library). The range for
|
||||||
this option is integers in the 0 - @code{INT_MAX}.
|
this option is integers in the 0 - @code{INT_MAX}.
|
||||||
|
|
||||||
@item snddropdelay=@var{microseconds}
|
|
||||||
The sender's extra delay before dropping packets. This delay is
|
|
||||||
added to the default drop delay time interval value.
|
|
||||||
|
|
||||||
Special value -1: Do not drop packets on the sender at all.
|
|
||||||
|
|
||||||
@item payload_size=@var{bytes}
|
@item payload_size=@var{bytes}
|
||||||
Sets the maximum declared size of a packet transferred
|
Sets the maximum declared size of a packet transferred
|
||||||
during the single call to the sending function in Live
|
during the single call to the sending function in Live
|
||||||
@ -1605,9 +1562,6 @@ This option doesn’t make sense in Rendezvous connection; the result
|
|||||||
might be that simply one side will override the value from the other
|
might be that simply one side will override the value from the other
|
||||||
side and it’s the matter of luck which one would win
|
side and it’s the matter of luck which one would win
|
||||||
|
|
||||||
@item srt_streamid=@var{string}
|
|
||||||
Alias for @samp{streamid} to avoid conflict with ffmpeg command line option.
|
|
||||||
|
|
||||||
@item smoother=@var{live|file}
|
@item smoother=@var{live|file}
|
||||||
The type of Smoother used for the transmission for that socket, which
|
The type of Smoother used for the transmission for that socket, which
|
||||||
is responsible for the transmission and congestion control. The Smoother
|
is responsible for the transmission and congestion control. The Smoother
|
||||||
@ -1657,11 +1611,6 @@ Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
|
|||||||
seconds in file mode). The range for this option is integers in the
|
seconds in file mode). The range for this option is integers in the
|
||||||
0 - @code{INT_MAX}.
|
0 - @code{INT_MAX}.
|
||||||
|
|
||||||
@item tsbpd=@var{1|0}
|
|
||||||
When true, use Timestamp-based Packet Delivery mode. The default behavior
|
|
||||||
depends on the transmission type: enabled in live mode, disabled in file
|
|
||||||
mode.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
For more information see: @url{https://github.com/Haivision/srt}.
|
For more information see: @url{https://github.com/Haivision/srt}.
|
||||||
@ -1770,8 +1719,6 @@ Set send buffer size, expressed bytes.
|
|||||||
@item tcp_nodelay=@var{1|0}
|
@item tcp_nodelay=@var{1|0}
|
||||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||||
|
|
||||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
|
||||||
|
|
||||||
@item tcp_mss=@var{bytes}
|
@item tcp_mss=@var{bytes}
|
||||||
Set maximum segment size for outgoing TCP packets, expressed in bytes.
|
Set maximum segment size for outgoing TCP packets, expressed in bytes.
|
||||||
@end table
|
@end table
|
||||||
|
28
doc/t2h.pm
28
doc/t2h.pm
@ -126,16 +126,8 @@ foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
|||||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
||||||
}
|
}
|
||||||
|
|
||||||
# determine if texinfo is at least version 6.8
|
|
||||||
my $program_version_num = version->declare(get_conf('PACKAGE_VERSION'))->numify;
|
|
||||||
my $program_version_6_8 = $program_version_num >= 6.008000;
|
|
||||||
|
|
||||||
# print the TOC where @contents is used
|
# print the TOC where @contents is used
|
||||||
if ($program_version_6_8) {
|
set_from_init_file('INLINE_CONTENTS', 1);
|
||||||
set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline');
|
|
||||||
} else {
|
|
||||||
set_from_init_file('INLINE_CONTENTS', 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
# make chapters <h2>
|
# make chapters <h2>
|
||||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||||
@ -192,11 +184,7 @@ EOT
|
|||||||
|
|
||||||
return $head1 . $head_title . $head2 . $head_title . $head3;
|
return $head1 . $head_title . $head2 . $head_title . $head3;
|
||||||
}
|
}
|
||||||
if ($program_version_6_8) {
|
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||||
texinfo_register_formatting_function('format_begin_file', \&ffmpeg_begin_file);
|
|
||||||
} else {
|
|
||||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub ffmpeg_program_string($)
|
sub ffmpeg_program_string($)
|
||||||
{
|
{
|
||||||
@ -213,11 +201,7 @@ sub ffmpeg_program_string($)
|
|||||||
$self->gdt('This document was generated automatically.'));
|
$self->gdt('This document was generated automatically.'));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ($program_version_6_8) {
|
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||||
texinfo_register_formatting_function('format_program_string', \&ffmpeg_program_string);
|
|
||||||
} else {
|
|
||||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Customized file ending
|
# Customized file ending
|
||||||
sub ffmpeg_end_file($)
|
sub ffmpeg_end_file($)
|
||||||
@ -236,11 +220,7 @@ EOT
|
|||||||
EOT
|
EOT
|
||||||
return $program_text . $footer;
|
return $program_text . $footer;
|
||||||
}
|
}
|
||||||
if ($program_version_6_8) {
|
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||||
texinfo_register_formatting_function('format_end_file', \&ffmpeg_end_file);
|
|
||||||
} else {
|
|
||||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Dummy title command
|
# Dummy title command
|
||||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||||
|
@ -1,706 +0,0 @@
|
|||||||
The basis transforms used for FFT and various other derived functions are based
|
|
||||||
on the following unrollings.
|
|
||||||
The functions can be easily adapted to double precision floats as well.
|
|
||||||
|
|
||||||
# Parity permutation
|
|
||||||
The basis transforms described here all use the following permutation:
|
|
||||||
|
|
||||||
``` C
|
|
||||||
void ff_tx_gen_split_radix_parity_revtab(int *revtab, int len, int inv,
|
|
||||||
int basis, int dual_stride);
|
|
||||||
```
|
|
||||||
Parity means even and odd complex numbers will be split, e.g. the even
|
|
||||||
coefficients will come first, after which the odd coefficients will be
|
|
||||||
placed. For example, a 4-point transform's coefficients after reordering:
|
|
||||||
`z[0].re, z[0].im, z[2].re, z[2].im, z[1].re, z[1].im, z[3].re, z[3].im`
|
|
||||||
|
|
||||||
The basis argument is the length of the largest non-composite transform
|
|
||||||
supported, and also implies that the basis/2 transform is supported as well,
|
|
||||||
as the split-radix algorithm requires it to be.
|
|
||||||
|
|
||||||
The dual_stride argument indicates that both the basis, as well as the
|
|
||||||
basis/2 transforms support doing two transforms at once, and the coefficients
|
|
||||||
will be interleaved between each pair in a split-radix like so (stride == 2):
|
|
||||||
`tx1[0], tx1[2], tx2[0], tx2[2], tx1[1], tx1[3], tx2[1], tx2[3]`
|
|
||||||
A non-zero number switches this on, with the value indicating the stride
|
|
||||||
(how many values of 1 transform to put first before switching to the other).
|
|
||||||
Must be a power of two or 0. Must be less than the basis.
|
|
||||||
Value will be clipped to the transform size, so for a basis of 16 and a
|
|
||||||
dual_stride of 8, dual 8-point transforms will be laid out as if dual_stride
|
|
||||||
was set to 4.
|
|
||||||
Usually you'll set this to half the complex numbers that fit in a single
|
|
||||||
register or 0. This allows to reuse SSE functions as dual-transform
|
|
||||||
functions in AVX mode.
|
|
||||||
If length is smaller than basis/2 this function will not do anything.
|
|
||||||
|
|
||||||
# 4-point FFT transform
|
|
||||||
The only permutation this transform needs is to swap the `z[1]` and `z[2]`
|
|
||||||
elements when performing an inverse transform, which in the assembly code is
|
|
||||||
hardcoded with the function itself being templated and duplicated for each
|
|
||||||
direction.
|
|
||||||
|
|
||||||
``` C
|
|
||||||
static void fft4(FFTComplex *z)
|
|
||||||
{
|
|
||||||
FFTSample r1 = z[0].re - z[2].re;
|
|
||||||
FFTSample r2 = z[0].im - z[2].im;
|
|
||||||
FFTSample r3 = z[1].re - z[3].re;
|
|
||||||
FFTSample r4 = z[1].im - z[3].im;
|
|
||||||
/* r5-r8 second transform */
|
|
||||||
|
|
||||||
FFTSample t1 = z[0].re + z[2].re;
|
|
||||||
FFTSample t2 = z[0].im + z[2].im;
|
|
||||||
FFTSample t3 = z[1].re + z[3].re;
|
|
||||||
FFTSample t4 = z[1].im + z[3].im;
|
|
||||||
/* t5-t8 second transform */
|
|
||||||
|
|
||||||
/* 1sub + 1add = 2 instructions */
|
|
||||||
|
|
||||||
/* 2 shufs */
|
|
||||||
FFTSample a3 = t1 - t3;
|
|
||||||
FFTSample a4 = t2 - t4;
|
|
||||||
FFTSample b3 = r1 - r4;
|
|
||||||
FFTSample b2 = r2 - r3;
|
|
||||||
|
|
||||||
FFTSample a1 = t1 + t3;
|
|
||||||
FFTSample a2 = t2 + t4;
|
|
||||||
FFTSample b1 = r1 + r4;
|
|
||||||
FFTSample b4 = r2 + r3;
|
|
||||||
/* 1 add 1 sub 3 shufs */
|
|
||||||
|
|
||||||
z[0].re = a1;
|
|
||||||
z[0].im = a2;
|
|
||||||
z[2].re = a3;
|
|
||||||
z[2].im = a4;
|
|
||||||
|
|
||||||
z[1].re = b1;
|
|
||||||
z[1].im = b2;
|
|
||||||
z[3].re = b3;
|
|
||||||
z[3].im = b4;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# 8-point AVX FFT transform
|
|
||||||
Input must be pre-permuted using the parity lookup table, generated via
|
|
||||||
`ff_tx_gen_split_radix_parity_revtab`.
|
|
||||||
|
|
||||||
``` C
|
|
||||||
static void fft8(FFTComplex *z)
|
|
||||||
{
|
|
||||||
FFTSample r1 = z[0].re - z[4].re;
|
|
||||||
FFTSample r2 = z[0].im - z[4].im;
|
|
||||||
FFTSample r3 = z[1].re - z[5].re;
|
|
||||||
FFTSample r4 = z[1].im - z[5].im;
|
|
||||||
|
|
||||||
FFTSample r5 = z[2].re - z[6].re;
|
|
||||||
FFTSample r6 = z[2].im - z[6].im;
|
|
||||||
FFTSample r7 = z[3].re - z[7].re;
|
|
||||||
FFTSample r8 = z[3].im - z[7].im;
|
|
||||||
|
|
||||||
FFTSample q1 = z[0].re + z[4].re;
|
|
||||||
FFTSample q2 = z[0].im + z[4].im;
|
|
||||||
FFTSample q3 = z[1].re + z[5].re;
|
|
||||||
FFTSample q4 = z[1].im + z[5].im;
|
|
||||||
|
|
||||||
FFTSample q5 = z[2].re + z[6].re;
|
|
||||||
FFTSample q6 = z[2].im + z[6].im;
|
|
||||||
FFTSample q7 = z[3].re + z[7].re;
|
|
||||||
FFTSample q8 = z[3].im + z[7].im;
|
|
||||||
|
|
||||||
FFTSample s3 = q1 - q3;
|
|
||||||
FFTSample s1 = q1 + q3;
|
|
||||||
FFTSample s4 = q2 - q4;
|
|
||||||
FFTSample s2 = q2 + q4;
|
|
||||||
|
|
||||||
FFTSample s7 = q5 - q7;
|
|
||||||
FFTSample s5 = q5 + q7;
|
|
||||||
FFTSample s8 = q6 - q8;
|
|
||||||
FFTSample s6 = q6 + q8;
|
|
||||||
|
|
||||||
FFTSample e1 = s1 * -1;
|
|
||||||
FFTSample e2 = s2 * -1;
|
|
||||||
FFTSample e3 = s3 * -1;
|
|
||||||
FFTSample e4 = s4 * -1;
|
|
||||||
|
|
||||||
FFTSample e5 = s5 * 1;
|
|
||||||
FFTSample e6 = s6 * 1;
|
|
||||||
FFTSample e7 = s7 * -1;
|
|
||||||
FFTSample e8 = s8 * 1;
|
|
||||||
|
|
||||||
FFTSample w1 = e5 - e1;
|
|
||||||
FFTSample w2 = e6 - e2;
|
|
||||||
FFTSample w3 = e8 - e3;
|
|
||||||
FFTSample w4 = e7 - e4;
|
|
||||||
|
|
||||||
FFTSample w5 = s1 - e5;
|
|
||||||
FFTSample w6 = s2 - e6;
|
|
||||||
FFTSample w7 = s3 - e8;
|
|
||||||
FFTSample w8 = s4 - e7;
|
|
||||||
|
|
||||||
z[0].re = w1;
|
|
||||||
z[0].im = w2;
|
|
||||||
z[2].re = w3;
|
|
||||||
z[2].im = w4;
|
|
||||||
z[4].re = w5;
|
|
||||||
z[4].im = w6;
|
|
||||||
z[6].re = w7;
|
|
||||||
z[6].im = w8;
|
|
||||||
|
|
||||||
FFTSample z1 = r1 - r4;
|
|
||||||
FFTSample z2 = r1 + r4;
|
|
||||||
FFTSample z3 = r3 - r2;
|
|
||||||
FFTSample z4 = r3 + r2;
|
|
||||||
|
|
||||||
FFTSample z5 = r5 - r6;
|
|
||||||
FFTSample z6 = r5 + r6;
|
|
||||||
FFTSample z7 = r7 - r8;
|
|
||||||
FFTSample z8 = r7 + r8;
|
|
||||||
|
|
||||||
z3 *= -1;
|
|
||||||
z5 *= -M_SQRT1_2;
|
|
||||||
z6 *= -M_SQRT1_2;
|
|
||||||
z7 *= M_SQRT1_2;
|
|
||||||
z8 *= M_SQRT1_2;
|
|
||||||
|
|
||||||
FFTSample t5 = z7 - z6;
|
|
||||||
FFTSample t6 = z8 + z5;
|
|
||||||
FFTSample t7 = z8 - z5;
|
|
||||||
FFTSample t8 = z7 + z6;
|
|
||||||
|
|
||||||
FFTSample u1 = z2 + t5;
|
|
||||||
FFTSample u2 = z3 + t6;
|
|
||||||
FFTSample u3 = z1 - t7;
|
|
||||||
FFTSample u4 = z4 + t8;
|
|
||||||
|
|
||||||
FFTSample u5 = z2 - t5;
|
|
||||||
FFTSample u6 = z3 - t6;
|
|
||||||
FFTSample u7 = z1 + t7;
|
|
||||||
FFTSample u8 = z4 - t8;
|
|
||||||
|
|
||||||
z[1].re = u1;
|
|
||||||
z[1].im = u2;
|
|
||||||
z[3].re = u3;
|
|
||||||
z[3].im = u4;
|
|
||||||
z[5].re = u5;
|
|
||||||
z[5].im = u6;
|
|
||||||
z[7].re = u7;
|
|
||||||
z[7].im = u8;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see, there are 2 independent paths, one for even and one for odd coefficients.
|
|
||||||
This theme continues throughout the document. Note that in the actual assembly code,
|
|
||||||
the paths are interleaved to improve unit saturation and CPU dependency tracking, so
|
|
||||||
to more clearly see them, you'll need to deinterleave the instructions.
|
|
||||||
|
|
||||||
# 8-point SSE/ARM64 FFT transform
|
|
||||||
Input must be pre-permuted using the parity lookup table, generated via
|
|
||||||
`ff_tx_gen_split_radix_parity_revtab`.
|
|
||||||
|
|
||||||
``` C
|
|
||||||
static void fft8(FFTComplex *z)
|
|
||||||
{
|
|
||||||
FFTSample r1 = z[0].re - z[4].re;
|
|
||||||
FFTSample r2 = z[0].im - z[4].im;
|
|
||||||
FFTSample r3 = z[1].re - z[5].re;
|
|
||||||
FFTSample r4 = z[1].im - z[5].im;
|
|
||||||
|
|
||||||
FFTSample j1 = z[2].re - z[6].re;
|
|
||||||
FFTSample j2 = z[2].im - z[6].im;
|
|
||||||
FFTSample j3 = z[3].re - z[7].re;
|
|
||||||
FFTSample j4 = z[3].im - z[7].im;
|
|
||||||
|
|
||||||
FFTSample q1 = z[0].re + z[4].re;
|
|
||||||
FFTSample q2 = z[0].im + z[4].im;
|
|
||||||
FFTSample q3 = z[1].re + z[5].re;
|
|
||||||
FFTSample q4 = z[1].im + z[5].im;
|
|
||||||
|
|
||||||
FFTSample k1 = z[2].re + z[6].re;
|
|
||||||
FFTSample k2 = z[2].im + z[6].im;
|
|
||||||
FFTSample k3 = z[3].re + z[7].re;
|
|
||||||
FFTSample k4 = z[3].im + z[7].im;
|
|
||||||
/* 2 add 2 sub = 4 */
|
|
||||||
|
|
||||||
/* 2 shufs, 1 add 1 sub = 4 */
|
|
||||||
FFTSample s1 = q1 + q3;
|
|
||||||
FFTSample s2 = q2 + q4;
|
|
||||||
FFTSample g1 = k3 + k1;
|
|
||||||
FFTSample g2 = k2 + k4;
|
|
||||||
|
|
||||||
FFTSample s3 = q1 - q3;
|
|
||||||
FFTSample s4 = q2 - q4;
|
|
||||||
FFTSample g4 = k3 - k1;
|
|
||||||
FFTSample g3 = k2 - k4;
|
|
||||||
|
|
||||||
/* 1 unpack + 1 shuffle = 2 */
|
|
||||||
|
|
||||||
/* 1 add */
|
|
||||||
FFTSample w1 = s1 + g1;
|
|
||||||
FFTSample w2 = s2 + g2;
|
|
||||||
FFTSample w3 = s3 + g3;
|
|
||||||
FFTSample w4 = s4 + g4;
|
|
||||||
|
|
||||||
/* 1 sub */
|
|
||||||
FFTSample h1 = s1 - g1;
|
|
||||||
FFTSample h2 = s2 - g2;
|
|
||||||
FFTSample h3 = s3 - g3;
|
|
||||||
FFTSample h4 = s4 - g4;
|
|
||||||
|
|
||||||
z[0].re = w1;
|
|
||||||
z[0].im = w2;
|
|
||||||
z[2].re = w3;
|
|
||||||
z[2].im = w4;
|
|
||||||
z[4].re = h1;
|
|
||||||
z[4].im = h2;
|
|
||||||
z[6].re = h3;
|
|
||||||
z[6].im = h4;
|
|
||||||
|
|
||||||
/* 1 shuf + 1 shuf + 1 xor + 1 addsub */
|
|
||||||
FFTSample z1 = r1 + r4;
|
|
||||||
FFTSample z2 = r2 - r3;
|
|
||||||
FFTSample z3 = r1 - r4;
|
|
||||||
FFTSample z4 = r2 + r3;
|
|
||||||
|
|
||||||
/* 1 mult */
|
|
||||||
j1 *= M_SQRT1_2;
|
|
||||||
j2 *= -M_SQRT1_2;
|
|
||||||
j3 *= -M_SQRT1_2;
|
|
||||||
j4 *= M_SQRT1_2;
|
|
||||||
|
|
||||||
/* 1 shuf + 1 addsub */
|
|
||||||
FFTSample l2 = j1 - j2;
|
|
||||||
FFTSample l1 = j2 + j1;
|
|
||||||
FFTSample l4 = j3 - j4;
|
|
||||||
FFTSample l3 = j4 + j3;
|
|
||||||
|
|
||||||
/* 1 shuf + 1 addsub */
|
|
||||||
FFTSample t1 = l3 - l2;
|
|
||||||
FFTSample t2 = l4 + l1;
|
|
||||||
FFTSample t3 = l1 - l4;
|
|
||||||
FFTSample t4 = l2 + l3;
|
|
||||||
|
|
||||||
/* 1 add */
|
|
||||||
FFTSample u1 = z1 - t1;
|
|
||||||
FFTSample u2 = z2 - t2;
|
|
||||||
FFTSample u3 = z3 - t3;
|
|
||||||
FFTSample u4 = z4 - t4;
|
|
||||||
|
|
||||||
/* 1 sub */
|
|
||||||
FFTSample o1 = z1 + t1;
|
|
||||||
FFTSample o2 = z2 + t2;
|
|
||||||
FFTSample o3 = z3 + t3;
|
|
||||||
FFTSample o4 = z4 + t4;
|
|
||||||
|
|
||||||
z[1].re = u1;
|
|
||||||
z[1].im = u2;
|
|
||||||
z[3].re = u3;
|
|
||||||
z[3].im = u4;
|
|
||||||
z[5].re = o1;
|
|
||||||
z[5].im = o2;
|
|
||||||
z[7].re = o3;
|
|
||||||
z[7].im = o4;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Most functions here are highly tuned to use x86's addsub instruction to save on
|
|
||||||
external sign mask loading.
|
|
||||||
|
|
||||||
# 16-point AVX FFT transform
|
|
||||||
This version expects the output of the 8 and 4-point transforms to follow the
|
|
||||||
even/odd convention established above.
|
|
||||||
|
|
||||||
``` C
|
|
||||||
static void fft16(FFTComplex *z)
|
|
||||||
{
|
|
||||||
FFTSample cos_16_1 = 0.92387950420379638671875f;
|
|
||||||
FFTSample cos_16_3 = 0.3826834261417388916015625f;
|
|
||||||
|
|
||||||
fft8(z);
|
|
||||||
fft4(z+8);
|
|
||||||
fft4(z+10);
|
|
||||||
|
|
||||||
FFTSample s[32];
|
|
||||||
|
|
||||||
/*
|
|
||||||
xorps m1, m1 - free
|
|
||||||
mulps m0
|
|
||||||
shufps m1, m1, m0
|
|
||||||
xorps
|
|
||||||
addsub
|
|
||||||
shufps
|
|
||||||
mulps
|
|
||||||
mulps
|
|
||||||
addps
|
|
||||||
or (fma3)
|
|
||||||
shufps
|
|
||||||
shufps
|
|
||||||
mulps
|
|
||||||
mulps
|
|
||||||
fma
|
|
||||||
fma
|
|
||||||
*/
|
|
||||||
|
|
||||||
s[0] = z[8].re*( 1) - z[8].im*( 0);
|
|
||||||
s[1] = z[8].im*( 1) + z[8].re*( 0);
|
|
||||||
s[2] = z[9].re*( 1) - z[9].im*(-1);
|
|
||||||
s[3] = z[9].im*( 1) + z[9].re*(-1);
|
|
||||||
|
|
||||||
s[4] = z[10].re*( 1) - z[10].im*( 0);
|
|
||||||
s[5] = z[10].im*( 1) + z[10].re*( 0);
|
|
||||||
s[6] = z[11].re*( 1) - z[11].im*( 1);
|
|
||||||
s[7] = z[11].im*( 1) + z[11].re*( 1);
|
|
||||||
|
|
||||||
s[8] = z[12].re*( cos_16_1) - z[12].im*( -cos_16_3);
|
|
||||||
s[9] = z[12].im*( cos_16_1) + z[12].re*( -cos_16_3);
|
|
||||||
s[10] = z[13].re*( cos_16_3) - z[13].im*( -cos_16_1);
|
|
||||||
s[11] = z[13].im*( cos_16_3) + z[13].re*( -cos_16_1);
|
|
||||||
|
|
||||||
s[12] = z[14].re*( cos_16_1) - z[14].im*( cos_16_3);
|
|
||||||
s[13] = z[14].im*( -cos_16_1) + z[14].re*( -cos_16_3);
|
|
||||||
s[14] = z[15].re*( cos_16_3) - z[15].im*( cos_16_1);
|
|
||||||
s[15] = z[15].im*( -cos_16_3) + z[15].re*( -cos_16_1);
|
|
||||||
|
|
||||||
s[2] *= M_SQRT1_2;
|
|
||||||
s[3] *= M_SQRT1_2;
|
|
||||||
s[5] *= -1;
|
|
||||||
s[6] *= M_SQRT1_2;
|
|
||||||
s[7] *= -M_SQRT1_2;
|
|
||||||
|
|
||||||
FFTSample w5 = s[0] + s[4];
|
|
||||||
FFTSample w6 = s[1] - s[5];
|
|
||||||
FFTSample x5 = s[2] + s[6];
|
|
||||||
FFTSample x6 = s[3] - s[7];
|
|
||||||
|
|
||||||
FFTSample w3 = s[4] - s[0];
|
|
||||||
FFTSample w4 = s[5] + s[1];
|
|
||||||
FFTSample x3 = s[6] - s[2];
|
|
||||||
FFTSample x4 = s[7] + s[3];
|
|
||||||
|
|
||||||
FFTSample y5 = s[8] + s[12];
|
|
||||||
FFTSample y6 = s[9] - s[13];
|
|
||||||
FFTSample u5 = s[10] + s[14];
|
|
||||||
FFTSample u6 = s[11] - s[15];
|
|
||||||
|
|
||||||
FFTSample y3 = s[12] - s[8];
|
|
||||||
FFTSample y4 = s[13] + s[9];
|
|
||||||
FFTSample u3 = s[14] - s[10];
|
|
||||||
FFTSample u4 = s[15] + s[11];
|
|
||||||
|
|
||||||
/* 2xorps, 2vperm2fs, 2 adds, 2 vpermilps = 8 */
|
|
||||||
|
|
||||||
FFTSample o1 = z[0].re + w5;
|
|
||||||
FFTSample o2 = z[0].im + w6;
|
|
||||||
FFTSample o5 = z[1].re + x5;
|
|
||||||
FFTSample o6 = z[1].im + x6;
|
|
||||||
FFTSample o9 = z[2].re + w4; //h
|
|
||||||
FFTSample o10 = z[2].im + w3;
|
|
||||||
FFTSample o13 = z[3].re + x4;
|
|
||||||
FFTSample o14 = z[3].im + x3;
|
|
||||||
|
|
||||||
FFTSample o17 = z[0].re - w5;
|
|
||||||
FFTSample o18 = z[0].im - w6;
|
|
||||||
FFTSample o21 = z[1].re - x5;
|
|
||||||
FFTSample o22 = z[1].im - x6;
|
|
||||||
FFTSample o25 = z[2].re - w4; //h
|
|
||||||
FFTSample o26 = z[2].im - w3;
|
|
||||||
FFTSample o29 = z[3].re - x4;
|
|
||||||
FFTSample o30 = z[3].im - x3;
|
|
||||||
|
|
||||||
FFTSample o3 = z[4].re + y5;
|
|
||||||
FFTSample o4 = z[4].im + y6;
|
|
||||||
FFTSample o7 = z[5].re + u5;
|
|
||||||
FFTSample o8 = z[5].im + u6;
|
|
||||||
FFTSample o11 = z[6].re + y4; //h
|
|
||||||
FFTSample o12 = z[6].im + y3;
|
|
||||||
FFTSample o15 = z[7].re + u4;
|
|
||||||
FFTSample o16 = z[7].im + u3;
|
|
||||||
|
|
||||||
FFTSample o19 = z[4].re - y5;
|
|
||||||
FFTSample o20 = z[4].im - y6;
|
|
||||||
FFTSample o23 = z[5].re - u5;
|
|
||||||
FFTSample o24 = z[5].im - u6;
|
|
||||||
FFTSample o27 = z[6].re - y4; //h
|
|
||||||
FFTSample o28 = z[6].im - y3;
|
|
||||||
FFTSample o31 = z[7].re - u4;
|
|
||||||
FFTSample o32 = z[7].im - u3;
|
|
||||||
|
|
||||||
/* This is just deinterleaving, happens separately */
|
|
||||||
z[0] = (FFTComplex){ o1, o2 };
|
|
||||||
z[1] = (FFTComplex){ o3, o4 };
|
|
||||||
z[2] = (FFTComplex){ o5, o6 };
|
|
||||||
z[3] = (FFTComplex){ o7, o8 };
|
|
||||||
z[4] = (FFTComplex){ o9, o10 };
|
|
||||||
z[5] = (FFTComplex){ o11, o12 };
|
|
||||||
z[6] = (FFTComplex){ o13, o14 };
|
|
||||||
z[7] = (FFTComplex){ o15, o16 };
|
|
||||||
|
|
||||||
z[8] = (FFTComplex){ o17, o18 };
|
|
||||||
z[9] = (FFTComplex){ o19, o20 };
|
|
||||||
z[10] = (FFTComplex){ o21, o22 };
|
|
||||||
z[11] = (FFTComplex){ o23, o24 };
|
|
||||||
z[12] = (FFTComplex){ o25, o26 };
|
|
||||||
z[13] = (FFTComplex){ o27, o28 };
|
|
||||||
z[14] = (FFTComplex){ o29, o30 };
|
|
||||||
z[15] = (FFTComplex){ o31, o32 };
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# AVX split-radix synthesis
|
|
||||||
To create larger transforms, the following unrolling of the C split-radix
|
|
||||||
function is used.
|
|
||||||
|
|
||||||
``` C
|
|
||||||
#define BF(x, y, a, b) \
|
|
||||||
do { \
|
|
||||||
x = (a) - (b); \
|
|
||||||
y = (a) + (b); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define BUTTERFLIES(a0,a1,a2,a3) \
|
|
||||||
do { \
|
|
||||||
r0=a0.re; \
|
|
||||||
i0=a0.im; \
|
|
||||||
r1=a1.re; \
|
|
||||||
i1=a1.im; \
|
|
||||||
BF(q3, q5, q5, q1); \
|
|
||||||
BF(a2.re, a0.re, r0, q5); \
|
|
||||||
BF(a3.im, a1.im, i1, q3); \
|
|
||||||
BF(q4, q6, q2, q6); \
|
|
||||||
BF(a3.re, a1.re, r1, q4); \
|
|
||||||
BF(a2.im, a0.im, i0, q6); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#undef TRANSFORM
|
|
||||||
#define TRANSFORM(a0,a1,a2,a3,wre,wim) \
|
|
||||||
do { \
|
|
||||||
CMUL(q1, q2, a2.re, a2.im, wre, -wim); \
|
|
||||||
CMUL(q5, q6, a3.re, a3.im, wre, wim); \
|
|
||||||
BUTTERFLIES(a0, a1, a2, a3); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define CMUL(dre, dim, are, aim, bre, bim) \
|
|
||||||
do { \
|
|
||||||
(dre) = (are) * (bre) - (aim) * (bim); \
|
|
||||||
(dim) = (are) * (bim) + (aim) * (bre); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static void recombine(FFTComplex *z, const FFTSample *cos,
|
|
||||||
unsigned int n)
|
|
||||||
{
|
|
||||||
const int o1 = 2*n;
|
|
||||||
const int o2 = 4*n;
|
|
||||||
const int o3 = 6*n;
|
|
||||||
const FFTSample *wim = cos + o1 - 7;
|
|
||||||
FFTSample q1, q2, q3, q4, q5, q6, r0, i0, r1, i1;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
for (int i = 0; i < n; i += 4) {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
TRANSFORM(z[ 0 + 0], z[ 0 + 4], z[o2 + 0], z[o2 + 2], cos[0], wim[7]);
|
|
||||||
TRANSFORM(z[ 0 + 1], z[ 0 + 5], z[o2 + 1], z[o2 + 3], cos[2], wim[5]);
|
|
||||||
TRANSFORM(z[ 0 + 2], z[ 0 + 6], z[o2 + 4], z[o2 + 6], cos[4], wim[3]);
|
|
||||||
TRANSFORM(z[ 0 + 3], z[ 0 + 7], z[o2 + 5], z[o2 + 7], cos[6], wim[1]);
|
|
||||||
|
|
||||||
TRANSFORM(z[o1 + 0], z[o1 + 4], z[o3 + 0], z[o3 + 2], cos[1], wim[6]);
|
|
||||||
TRANSFORM(z[o1 + 1], z[o1 + 5], z[o3 + 1], z[o3 + 3], cos[3], wim[4]);
|
|
||||||
TRANSFORM(z[o1 + 2], z[o1 + 6], z[o3 + 4], z[o3 + 6], cos[5], wim[2]);
|
|
||||||
TRANSFORM(z[o1 + 3], z[o1 + 7], z[o3 + 5], z[o3 + 7], cos[7], wim[0]);
|
|
||||||
#else
|
|
||||||
FFTSample h[8], j[8], r[8], w[8];
|
|
||||||
FFTSample t[8];
|
|
||||||
FFTComplex *m0 = &z[0];
|
|
||||||
FFTComplex *m1 = &z[4];
|
|
||||||
FFTComplex *m2 = &z[o2 + 0];
|
|
||||||
FFTComplex *m3 = &z[o2 + 4];
|
|
||||||
|
|
||||||
const FFTSample *t1 = &cos[0];
|
|
||||||
const FFTSample *t2 = &wim[0];
|
|
||||||
|
|
||||||
/* 2 loads (tabs) */
|
|
||||||
|
|
||||||
/* 2 vperm2fs, 2 shufs (im), 2 shufs (tabs) */
|
|
||||||
/* 1 xor, 1 add, 1 sub, 4 mults OR 2 mults, 2 fmas */
|
|
||||||
/* 13 OR 10ish (-2 each for second passovers!) */
|
|
||||||
|
|
||||||
w[0] = m2[0].im*t1[0] - m2[0].re*t2[7];
|
|
||||||
w[1] = m2[0].re*t1[0] + m2[0].im*t2[7];
|
|
||||||
w[2] = m2[1].im*t1[2] - m2[1].re*t2[5];
|
|
||||||
w[3] = m2[1].re*t1[2] + m2[1].im*t2[5];
|
|
||||||
w[4] = m3[0].im*t1[4] - m3[0].re*t2[3];
|
|
||||||
w[5] = m3[0].re*t1[4] + m3[0].im*t2[3];
|
|
||||||
w[6] = m3[1].im*t1[6] - m3[1].re*t2[1];
|
|
||||||
w[7] = m3[1].re*t1[6] + m3[1].im*t2[1];
|
|
||||||
|
|
||||||
j[0] = m2[2].im*t1[0] + m2[2].re*t2[7];
|
|
||||||
j[1] = m2[2].re*t1[0] - m2[2].im*t2[7];
|
|
||||||
j[2] = m2[3].im*t1[2] + m2[3].re*t2[5];
|
|
||||||
j[3] = m2[3].re*t1[2] - m2[3].im*t2[5];
|
|
||||||
j[4] = m3[2].im*t1[4] + m3[2].re*t2[3];
|
|
||||||
j[5] = m3[2].re*t1[4] - m3[2].im*t2[3];
|
|
||||||
j[6] = m3[3].im*t1[6] + m3[3].re*t2[1];
|
|
||||||
j[7] = m3[3].re*t1[6] - m3[3].im*t2[1];
|
|
||||||
|
|
||||||
/* 1 add + 1 shuf */
|
|
||||||
t[1] = j[0] + w[0];
|
|
||||||
t[0] = j[1] + w[1];
|
|
||||||
t[3] = j[2] + w[2];
|
|
||||||
t[2] = j[3] + w[3];
|
|
||||||
t[5] = j[4] + w[4];
|
|
||||||
t[4] = j[5] + w[5];
|
|
||||||
t[7] = j[6] + w[6];
|
|
||||||
t[6] = j[7] + w[7];
|
|
||||||
|
|
||||||
/* 1 sub + 1 xor */
|
|
||||||
r[0] = (w[0] - j[0]);
|
|
||||||
r[1] = -(w[1] - j[1]);
|
|
||||||
r[2] = (w[2] - j[2]);
|
|
||||||
r[3] = -(w[3] - j[3]);
|
|
||||||
r[4] = (w[4] - j[4]);
|
|
||||||
r[5] = -(w[5] - j[5]);
|
|
||||||
r[6] = (w[6] - j[6]);
|
|
||||||
r[7] = -(w[7] - j[7]);
|
|
||||||
|
|
||||||
/* Min: 2 subs, 2 adds, 2 vperm2fs (OPTIONAL) */
|
|
||||||
m2[0].re = m0[0].re - t[0];
|
|
||||||
m2[0].im = m0[0].im - t[1];
|
|
||||||
m2[1].re = m0[1].re - t[2];
|
|
||||||
m2[1].im = m0[1].im - t[3];
|
|
||||||
m3[0].re = m0[2].re - t[4];
|
|
||||||
m3[0].im = m0[2].im - t[5];
|
|
||||||
m3[1].re = m0[3].re - t[6];
|
|
||||||
m3[1].im = m0[3].im - t[7];
|
|
||||||
|
|
||||||
m2[2].re = m1[0].re - r[0];
|
|
||||||
m2[2].im = m1[0].im - r[1];
|
|
||||||
m2[3].re = m1[1].re - r[2];
|
|
||||||
m2[3].im = m1[1].im - r[3];
|
|
||||||
m3[2].re = m1[2].re - r[4];
|
|
||||||
m3[2].im = m1[2].im - r[5];
|
|
||||||
m3[3].re = m1[3].re - r[6];
|
|
||||||
m3[3].im = m1[3].im - r[7];
|
|
||||||
|
|
||||||
m0[0].re = m0[0].re + t[0];
|
|
||||||
m0[0].im = m0[0].im + t[1];
|
|
||||||
m0[1].re = m0[1].re + t[2];
|
|
||||||
m0[1].im = m0[1].im + t[3];
|
|
||||||
m0[2].re = m0[2].re + t[4];
|
|
||||||
m0[2].im = m0[2].im + t[5];
|
|
||||||
m0[3].re = m0[3].re + t[6];
|
|
||||||
m0[3].im = m0[3].im + t[7];
|
|
||||||
|
|
||||||
m1[0].re = m1[0].re + r[0];
|
|
||||||
m1[0].im = m1[0].im + r[1];
|
|
||||||
m1[1].re = m1[1].re + r[2];
|
|
||||||
m1[1].im = m1[1].im + r[3];
|
|
||||||
m1[2].re = m1[2].re + r[4];
|
|
||||||
m1[2].im = m1[2].im + r[5];
|
|
||||||
m1[3].re = m1[3].re + r[6];
|
|
||||||
m1[3].im = m1[3].im + r[7];
|
|
||||||
|
|
||||||
/* Identical for below, but with the following parameters */
|
|
||||||
m0 = &z[o1];
|
|
||||||
m1 = &z[o1 + 4];
|
|
||||||
m2 = &z[o3 + 0];
|
|
||||||
m3 = &z[o3 + 4];
|
|
||||||
t1 = &cos[1];
|
|
||||||
t2 = &wim[-1];
|
|
||||||
|
|
||||||
w[0] = m2[0].im*t1[0] - m2[0].re*t2[7];
|
|
||||||
w[1] = m2[0].re*t1[0] + m2[0].im*t2[7];
|
|
||||||
w[2] = m2[1].im*t1[2] - m2[1].re*t2[5];
|
|
||||||
w[3] = m2[1].re*t1[2] + m2[1].im*t2[5];
|
|
||||||
w[4] = m3[0].im*t1[4] - m3[0].re*t2[3];
|
|
||||||
w[5] = m3[0].re*t1[4] + m3[0].im*t2[3];
|
|
||||||
w[6] = m3[1].im*t1[6] - m3[1].re*t2[1];
|
|
||||||
w[7] = m3[1].re*t1[6] + m3[1].im*t2[1];
|
|
||||||
|
|
||||||
j[0] = m2[2].im*t1[0] + m2[2].re*t2[7];
|
|
||||||
j[1] = m2[2].re*t1[0] - m2[2].im*t2[7];
|
|
||||||
j[2] = m2[3].im*t1[2] + m2[3].re*t2[5];
|
|
||||||
j[3] = m2[3].re*t1[2] - m2[3].im*t2[5];
|
|
||||||
j[4] = m3[2].im*t1[4] + m3[2].re*t2[3];
|
|
||||||
j[5] = m3[2].re*t1[4] - m3[2].im*t2[3];
|
|
||||||
j[6] = m3[3].im*t1[6] + m3[3].re*t2[1];
|
|
||||||
j[7] = m3[3].re*t1[6] - m3[3].im*t2[1];
|
|
||||||
|
|
||||||
/* 1 add + 1 shuf */
|
|
||||||
t[1] = j[0] + w[0];
|
|
||||||
t[0] = j[1] + w[1];
|
|
||||||
t[3] = j[2] + w[2];
|
|
||||||
t[2] = j[3] + w[3];
|
|
||||||
t[5] = j[4] + w[4];
|
|
||||||
t[4] = j[5] + w[5];
|
|
||||||
t[7] = j[6] + w[6];
|
|
||||||
t[6] = j[7] + w[7];
|
|
||||||
|
|
||||||
/* 1 sub + 1 xor */
|
|
||||||
r[0] = (w[0] - j[0]);
|
|
||||||
r[1] = -(w[1] - j[1]);
|
|
||||||
r[2] = (w[2] - j[2]);
|
|
||||||
r[3] = -(w[3] - j[3]);
|
|
||||||
r[4] = (w[4] - j[4]);
|
|
||||||
r[5] = -(w[5] - j[5]);
|
|
||||||
r[6] = (w[6] - j[6]);
|
|
||||||
r[7] = -(w[7] - j[7]);
|
|
||||||
|
|
||||||
/* Min: 2 subs, 2 adds, 2 vperm2fs (OPTIONAL) */
|
|
||||||
m2[0].re = m0[0].re - t[0];
|
|
||||||
m2[0].im = m0[0].im - t[1];
|
|
||||||
m2[1].re = m0[1].re - t[2];
|
|
||||||
m2[1].im = m0[1].im - t[3];
|
|
||||||
m3[0].re = m0[2].re - t[4];
|
|
||||||
m3[0].im = m0[2].im - t[5];
|
|
||||||
m3[1].re = m0[3].re - t[6];
|
|
||||||
m3[1].im = m0[3].im - t[7];
|
|
||||||
|
|
||||||
m2[2].re = m1[0].re - r[0];
|
|
||||||
m2[2].im = m1[0].im - r[1];
|
|
||||||
m2[3].re = m1[1].re - r[2];
|
|
||||||
m2[3].im = m1[1].im - r[3];
|
|
||||||
m3[2].re = m1[2].re - r[4];
|
|
||||||
m3[2].im = m1[2].im - r[5];
|
|
||||||
m3[3].re = m1[3].re - r[6];
|
|
||||||
m3[3].im = m1[3].im - r[7];
|
|
||||||
|
|
||||||
m0[0].re = m0[0].re + t[0];
|
|
||||||
m0[0].im = m0[0].im + t[1];
|
|
||||||
m0[1].re = m0[1].re + t[2];
|
|
||||||
m0[1].im = m0[1].im + t[3];
|
|
||||||
m0[2].re = m0[2].re + t[4];
|
|
||||||
m0[2].im = m0[2].im + t[5];
|
|
||||||
m0[3].re = m0[3].re + t[6];
|
|
||||||
m0[3].im = m0[3].im + t[7];
|
|
||||||
|
|
||||||
m1[0].re = m1[0].re + r[0];
|
|
||||||
m1[0].im = m1[0].im + r[1];
|
|
||||||
m1[1].re = m1[1].re + r[2];
|
|
||||||
m1[1].im = m1[1].im + r[3];
|
|
||||||
m1[2].re = m1[2].re + r[4];
|
|
||||||
m1[2].im = m1[2].im + r[5];
|
|
||||||
m1[3].re = m1[3].re + r[6];
|
|
||||||
m1[3].im = m1[3].im + r[7];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
z += 4; // !!!
|
|
||||||
cos += 2*4;
|
|
||||||
wim -= 2*4;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The macros used are identical to those in the generic C version, only with all
|
|
||||||
variable declarations exported to the function body.
|
|
||||||
An important point here is that the high frequency registers (m2 and m3) have
|
|
||||||
their high and low halves swapped in the output. This is intentional, as the
|
|
||||||
inputs must also have the same layout, and therefore, the input swapping is only
|
|
||||||
performed once for the bottom-most basis transform, with all subsequent combinations
|
|
||||||
using the already swapped halves.
|
|
||||||
|
|
||||||
Also note that this function requires a special iteration way, due to coefficients
|
|
||||||
beginning to overlap, particularly `[o1]` with `[0]` after the second iteration.
|
|
||||||
To iterate further, set `z = &z[16]` via `z += 8` for the second iteration. After
|
|
||||||
the 4th iteration, the layout resets, so repeat the same.
|
|
@ -719,8 +719,6 @@ FL+FR+FC+BL+BR+BC+SL+SR
|
|||||||
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
|
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
|
||||||
@item downmix
|
@item downmix
|
||||||
DL+DR
|
DL+DR
|
||||||
@item 22.2
|
|
||||||
FL+FR+FC+LFE+BL+BR+FLC+FRC+BC+SL+SR+TC+TFL+TFC+TFR+TBL+TBC+TBR+LFE2+TSL+TSR+BFC+BFL+BFR
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
A custom channel layout can be specified as a sequence of terms, separated by
|
A custom channel layout can be specified as a sequence of terms, separated by
|
||||||
|
@ -8,9 +8,7 @@ OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
|||||||
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
|
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
|
||||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||||
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
|
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
|
||||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||||
OBJS-$(HAVE_LSX) += $(LSX-OBJS) $(LSX-OBJS-yes)
|
|
||||||
OBJS-$(HAVE_LASX) += $(LASX-OBJS) $(LASX-OBJS-yes)
|
|
||||||
|
|
||||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||||
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
||||||
|
@ -1,76 +0,0 @@
|
|||||||
/*
|
|
||||||
* This file is part of FFmpeg.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
* DEALINGS IN THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
const char *name;
|
|
||||||
FILE *input, *output;
|
|
||||||
unsigned int length = 0;
|
|
||||||
unsigned char data;
|
|
||||||
|
|
||||||
if (argc < 3 || argc > 4)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
input = fopen(argv[1], "rb");
|
|
||||||
if (!input)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
output = fopen(argv[2], "wb");
|
|
||||||
if (!output)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (argc == 4) {
|
|
||||||
name = argv[3];
|
|
||||||
} else {
|
|
||||||
size_t arglen = strlen(argv[1]);
|
|
||||||
name = argv[1];
|
|
||||||
|
|
||||||
for (int i = 0; i < arglen; i++) {
|
|
||||||
if (argv[1][i] == '.')
|
|
||||||
argv[1][i] = '_';
|
|
||||||
else if (argv[1][i] == '/')
|
|
||||||
name = &argv[1][i+1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(output, "const unsigned char ff_%s_data[] = { ", name);
|
|
||||||
|
|
||||||
while (fread(&data, 1, 1, input) > 0) {
|
|
||||||
fprintf(output, "0x%02x, ", data);
|
|
||||||
length++;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(output, "0x00 };\n");
|
|
||||||
fprintf(output, "const unsigned int ff_%s_len = %u;\n", name, length);
|
|
||||||
|
|
||||||
fclose(output);
|
|
||||||
|
|
||||||
if (ferror(input) || !feof(input))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
fclose(input);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
@ -12,13 +12,10 @@ endif
|
|||||||
|
|
||||||
ifndef SUBDIR
|
ifndef SUBDIR
|
||||||
|
|
||||||
BIN2CEXE = ffbuild/bin2c$(HOSTEXESUF)
|
|
||||||
BIN2C = $(BIN2CEXE)
|
|
||||||
|
|
||||||
ifndef V
|
ifndef V
|
||||||
Q = @
|
Q = @
|
||||||
ECHO = printf "$(1)\t%s\n" $(2)
|
ECHO = printf "$(1)\t%s\n" $(2)
|
||||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C
|
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC
|
||||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
||||||
|
|
||||||
MSG = $@
|
MSG = $@
|
||||||
@ -29,7 +26,7 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
|
|||||||
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
|
||||||
|
|
||||||
# NASM requires -I path terminated with /
|
# NASM requires -I path terminated with /
|
||||||
IFLAGS := -I. -I$(SRC_LINK)/
|
IFLAGS := -I. -I$(SRC_LINK)/
|
||||||
@ -59,8 +56,6 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
|
|||||||
COMPILE_NVCC = $(call COMPILE,NVCC)
|
COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||||
COMPILE_MMI = $(call COMPILE,CC,MMIFLAGS)
|
COMPILE_MMI = $(call COMPILE,CC,MMIFLAGS)
|
||||||
COMPILE_MSA = $(call COMPILE,CC,MSAFLAGS)
|
COMPILE_MSA = $(call COMPILE,CC,MSAFLAGS)
|
||||||
COMPILE_LSX = $(call COMPILE,CC,LSXFLAGS)
|
|
||||||
COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
|
||||||
|
|
||||||
%_mmi.o: %_mmi.c
|
%_mmi.o: %_mmi.c
|
||||||
$(COMPILE_MMI)
|
$(COMPILE_MMI)
|
||||||
@ -68,12 +63,6 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
|||||||
%_msa.o: %_msa.c
|
%_msa.o: %_msa.c
|
||||||
$(COMPILE_MSA)
|
$(COMPILE_MSA)
|
||||||
|
|
||||||
%_lsx.o: %_lsx.c
|
|
||||||
$(COMPILE_LSX)
|
|
||||||
|
|
||||||
%_lasx.o: %_lasx.c
|
|
||||||
$(COMPILE_LASX)
|
|
||||||
|
|
||||||
%.o: %.c
|
%.o: %.c
|
||||||
$(COMPILE_C)
|
$(COMPILE_C)
|
||||||
|
|
||||||
@ -109,35 +98,11 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
|||||||
%.h.c:
|
%.h.c:
|
||||||
$(Q)echo '#include "$*.h"' >$@
|
$(Q)echo '#include "$*.h"' >$@
|
||||||
|
|
||||||
$(BIN2CEXE): ffbuild/bin2c_host.o
|
|
||||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS)
|
|
||||||
|
|
||||||
%.metal.air: %.metal
|
|
||||||
$(METALCC) $< -o $@
|
|
||||||
|
|
||||||
%.metallib: %.metal.air
|
|
||||||
$(METALLIB) --split-module-without-linking $< -o $@
|
|
||||||
|
|
||||||
%.metallib.c: %.metallib $(BIN2CEXE)
|
|
||||||
$(BIN2C) $< $@ $(subst .,_,$(basename $(notdir $@)))
|
|
||||||
|
|
||||||
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
|
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
|
||||||
$(COMPILE_NVCC)
|
$(COMPILE_NVCC)
|
||||||
|
|
||||||
ifdef CONFIG_PTX_COMPRESSION
|
%.ptx.c: %.ptx
|
||||||
%.ptx.gz: TAG = GZIP
|
$(Q)sh $(SRC_PATH)/compat/cuda/ptx2c.sh $@ $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||||
%.ptx.gz: %.ptx
|
|
||||||
$(M)gzip -c9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
|
|
||||||
|
|
||||||
%.ptx.c: %.ptx.gz $(BIN2CEXE)
|
|
||||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
|
||||||
else
|
|
||||||
%.ptx.c: %.ptx $(BIN2CEXE)
|
|
||||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
|
||||||
endif
|
|
||||||
|
|
||||||
clean::
|
|
||||||
$(RM) $(BIN2CEXE)
|
|
||||||
|
|
||||||
%.c %.h %.pc %.ver %.version: TAG = GEN
|
%.c %.h %.pc %.ver %.version: TAG = GEN
|
||||||
|
|
||||||
@ -157,8 +122,6 @@ include $(SRC_PATH)/ffbuild/arch.mak
|
|||||||
|
|
||||||
OBJS += $(OBJS-yes)
|
OBJS += $(OBJS-yes)
|
||||||
SLIBOBJS += $(SLIBOBJS-yes)
|
SLIBOBJS += $(SLIBOBJS-yes)
|
||||||
SHLIBOBJS += $(SHLIBOBJS-yes)
|
|
||||||
STLIBOBJS += $(STLIBOBJS-yes)
|
|
||||||
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
||||||
TESTPROGS += $(TESTPROGS-yes)
|
TESTPROGS += $(TESTPROGS-yes)
|
||||||
|
|
||||||
@ -167,8 +130,6 @@ FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=
|
|||||||
|
|
||||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||||
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
|
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
|
||||||
SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%))
|
|
||||||
STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%))
|
|
||||||
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
|
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
|
||||||
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)tests/%$(EXESUF))
|
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)tests/%$(EXESUF))
|
||||||
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
|
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
|
||||||
@ -190,7 +151,7 @@ HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
|||||||
PTXOBJS = $(filter %.ptx.o,$(OBJS))
|
PTXOBJS = $(filter %.ptx.o,$(OBJS))
|
||||||
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
|
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
|
||||||
checkheaders: $(HOBJS)
|
checkheaders: $(HOBJS)
|
||||||
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=.gz) $(PTXOBJS:.o=)
|
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=)
|
||||||
|
|
||||||
alltools: $(TOOLS)
|
alltools: $(TOOLS)
|
||||||
|
|
||||||
@ -204,14 +165,12 @@ $(OBJS): | $(sort $(dir $(OBJS)))
|
|||||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||||
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
||||||
$(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS)))
|
|
||||||
$(STLIBOBJS): | $(sort $(dir $(STLIBOBJS)))
|
|
||||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||||
$(TOOLOBJS): | tools
|
$(TOOLOBJS): | tools
|
||||||
|
|
||||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
|
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||||
|
|
||||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
||||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||||
|
|
||||||
define RULES
|
define RULES
|
||||||
@ -221,4 +180,4 @@ endef
|
|||||||
|
|
||||||
$(eval $(RULES))
|
$(eval $(RULES))
|
||||||
|
|
||||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||||
|
@ -14,26 +14,10 @@ INSTHEADERS := $(INSTHEADERS) $(HEADERS:%=$(SUBDIR)%)
|
|||||||
all-$(CONFIG_STATIC): $(SUBDIR)$(LIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
all-$(CONFIG_STATIC): $(SUBDIR)$(LIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
||||||
all-$(CONFIG_SHARED): $(SUBDIR)$(SLIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
all-$(CONFIG_SHARED): $(SUBDIR)$(SLIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
||||||
|
|
||||||
LIBOBJS := $(OBJS) $(SHLIBOBJS) $(STLIBOBJS) $(SUBDIR)%.h.o $(TESTOBJS)
|
LIBOBJS := $(OBJS) $(SUBDIR)%.h.o $(TESTOBJS)
|
||||||
$(LIBOBJS) $(LIBOBJS:.o=.s) $(LIBOBJS:.o=.i): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
$(LIBOBJS) $(LIBOBJS:.o=.s) $(LIBOBJS:.o=.i): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
||||||
|
|
||||||
ifdef CONFIG_SHARED
|
$(SUBDIR)$(LIBNAME): $(OBJS)
|
||||||
# In case both shared libs and static libs are enabled, it can happen
|
|
||||||
# that a user might want to link e.g. libavformat statically, but
|
|
||||||
# libavcodec and the other libs dynamically. In this case
|
|
||||||
# libavformat won't be able to access libavcodec's internal symbols,
|
|
||||||
# so that they have to be duplicated into the archive just like
|
|
||||||
# for purely shared builds.
|
|
||||||
# Test programs are always statically linked against their library
|
|
||||||
# to be able to access their library's internals, even with shared builds.
|
|
||||||
# Yet linking against dependend libraries still uses dynamic linking.
|
|
||||||
# This means that we are in the scenario described above.
|
|
||||||
# In case only static libs are used, the linker will only use
|
|
||||||
# one of these copies; this depends on the duplicated object files
|
|
||||||
# containing exactly the same symbols.
|
|
||||||
OBJS += $(SHLIBOBJS)
|
|
||||||
endif
|
|
||||||
$(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS)
|
|
||||||
$(RM) $@
|
$(RM) $@
|
||||||
$(AR) $(ARFLAGS) $(AR_O) $^
|
$(AR) $(ARFLAGS) $(AR_O) $^
|
||||||
$(RANLIB) $@
|
$(RANLIB) $@
|
||||||
@ -64,7 +48,7 @@ $(SUBDIR)lib$(NAME).ver: $(SUBDIR)lib$(NAME).v $(OBJS)
|
|||||||
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
|
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
|
||||||
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
|
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
|
||||||
|
|
||||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
||||||
$(SLIB_CREATE_DEF_CMD)
|
$(SLIB_CREATE_DEF_CMD)
|
||||||
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
|
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
|
||||||
$(SLIB_EXTRA_CMD)
|
$(SLIB_EXTRA_CMD)
|
||||||
|
@ -10,6 +10,11 @@ ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
|||||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||||
|
|
||||||
OBJS-ffmpeg += fftools/ffmpeg_opt.o fftools/ffmpeg_filter.o fftools/ffmpeg_hw.o
|
OBJS-ffmpeg += fftools/ffmpeg_opt.o fftools/ffmpeg_filter.o fftools/ffmpeg_hw.o
|
||||||
|
OBJS-ffmpeg-$(CONFIG_LIBMFX) += fftools/ffmpeg_qsv.o
|
||||||
|
ifndef CONFIG_VIDEOTOOLBOX
|
||||||
|
OBJS-ffmpeg-$(CONFIG_VDA) += fftools/ffmpeg_videotoolbox.o
|
||||||
|
endif
|
||||||
|
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += fftools/ffmpeg_videotoolbox.o
|
||||||
|
|
||||||
define DOFFTOOL
|
define DOFFTOOL
|
||||||
OBJS-$(1) += fftools/cmdutils.o fftools/$(1).o $(OBJS-$(1)-yes)
|
OBJS-$(1) += fftools/cmdutils.o fftools/$(1).o $(OBJS-$(1)-yes)
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
#include "libavfilter/avfilter.h"
|
#include "libavfilter/avfilter.h"
|
||||||
#include "libavdevice/avdevice.h"
|
#include "libavdevice/avdevice.h"
|
||||||
|
#include "libavresample/avresample.h"
|
||||||
#include "libswscale/swscale.h"
|
#include "libswscale/swscale.h"
|
||||||
#include "libswresample/swresample.h"
|
#include "libswresample/swresample.h"
|
||||||
#include "libpostproc/postprocess.h"
|
#include "libpostproc/postprocess.h"
|
||||||
@ -41,7 +42,6 @@
|
|||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/bprint.h"
|
#include "libavutil/bprint.h"
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/display.h"
|
#include "libavutil/display.h"
|
||||||
#include "libavutil/mathematics.h"
|
#include "libavutil/mathematics.h"
|
||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
@ -54,7 +54,6 @@
|
|||||||
#include "libavutil/cpu.h"
|
#include "libavutil/cpu.h"
|
||||||
#include "libavutil/ffversion.h"
|
#include "libavutil/ffversion.h"
|
||||||
#include "libavutil/version.h"
|
#include "libavutil/version.h"
|
||||||
#include "libavcodec/bsf.h"
|
|
||||||
#include "cmdutils.h"
|
#include "cmdutils.h"
|
||||||
#if HAVE_SYS_RESOURCE_H
|
#if HAVE_SYS_RESOURCE_H
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
@ -68,7 +67,7 @@ static int init_report(const char *env);
|
|||||||
|
|
||||||
AVDictionary *sws_dict;
|
AVDictionary *sws_dict;
|
||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
AVDictionary *format_opts, *codec_opts;
|
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||||
|
|
||||||
static FILE *report_file;
|
static FILE *report_file;
|
||||||
static int report_file_level = AV_LOG_DEBUG;
|
static int report_file_level = AV_LOG_DEBUG;
|
||||||
@ -80,12 +79,18 @@ enum show_muxdemuxers {
|
|||||||
SHOW_MUXERS,
|
SHOW_MUXERS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void init_opts(void)
|
||||||
|
{
|
||||||
|
av_dict_set(&sws_dict, "flags", "bicubic", 0);
|
||||||
|
}
|
||||||
|
|
||||||
void uninit_opts(void)
|
void uninit_opts(void)
|
||||||
{
|
{
|
||||||
av_dict_free(&swr_opts);
|
av_dict_free(&swr_opts);
|
||||||
av_dict_free(&sws_dict);
|
av_dict_free(&sws_dict);
|
||||||
av_dict_free(&format_opts);
|
av_dict_free(&format_opts);
|
||||||
av_dict_free(&codec_opts);
|
av_dict_free(&codec_opts);
|
||||||
|
av_dict_free(&resample_opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
|
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
|
||||||
@ -540,6 +545,9 @@ int opt_default(void *optctx, const char *opt, const char *arg)
|
|||||||
char opt_stripped[128];
|
char opt_stripped[128];
|
||||||
const char *p;
|
const char *p;
|
||||||
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class();
|
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class();
|
||||||
|
#if CONFIG_AVRESAMPLE
|
||||||
|
const AVClass *rc = avresample_get_class();
|
||||||
|
#endif
|
||||||
#if CONFIG_SWSCALE
|
#if CONFIG_SWSCALE
|
||||||
const AVClass *sc = sws_get_class();
|
const AVClass *sc = sws_get_class();
|
||||||
#endif
|
#endif
|
||||||
@ -609,6 +617,13 @@ int opt_default(void *optctx, const char *opt, const char *arg)
|
|||||||
consumed = 1;
|
consumed = 1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#if CONFIG_AVRESAMPLE
|
||||||
|
if ((o=opt_find(&rc, opt, NULL, 0,
|
||||||
|
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
|
||||||
|
av_dict_set(&resample_opts, opt, arg, FLAGS);
|
||||||
|
consumed = 1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (consumed)
|
if (consumed)
|
||||||
return 0;
|
return 0;
|
||||||
@ -656,11 +671,14 @@ static void finish_group(OptionParseContext *octx, int group_idx,
|
|||||||
g->swr_opts = swr_opts;
|
g->swr_opts = swr_opts;
|
||||||
g->codec_opts = codec_opts;
|
g->codec_opts = codec_opts;
|
||||||
g->format_opts = format_opts;
|
g->format_opts = format_opts;
|
||||||
|
g->resample_opts = resample_opts;
|
||||||
|
|
||||||
codec_opts = NULL;
|
codec_opts = NULL;
|
||||||
format_opts = NULL;
|
format_opts = NULL;
|
||||||
|
resample_opts = NULL;
|
||||||
sws_dict = NULL;
|
sws_dict = NULL;
|
||||||
swr_opts = NULL;
|
swr_opts = NULL;
|
||||||
|
init_opts();
|
||||||
|
|
||||||
memset(&octx->cur_group, 0, sizeof(octx->cur_group));
|
memset(&octx->cur_group, 0, sizeof(octx->cur_group));
|
||||||
}
|
}
|
||||||
@ -689,7 +707,7 @@ static void init_parse_context(OptionParseContext *octx,
|
|||||||
memset(octx, 0, sizeof(*octx));
|
memset(octx, 0, sizeof(*octx));
|
||||||
|
|
||||||
octx->nb_groups = nb_groups;
|
octx->nb_groups = nb_groups;
|
||||||
octx->groups = av_calloc(octx->nb_groups, sizeof(*octx->groups));
|
octx->groups = av_mallocz_array(octx->nb_groups, sizeof(*octx->groups));
|
||||||
if (!octx->groups)
|
if (!octx->groups)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
@ -698,6 +716,8 @@ static void init_parse_context(OptionParseContext *octx,
|
|||||||
|
|
||||||
octx->global_opts.group_def = &global_group;
|
octx->global_opts.group_def = &global_group;
|
||||||
octx->global_opts.arg = "";
|
octx->global_opts.arg = "";
|
||||||
|
|
||||||
|
init_opts();
|
||||||
}
|
}
|
||||||
|
|
||||||
void uninit_parse_context(OptionParseContext *octx)
|
void uninit_parse_context(OptionParseContext *octx)
|
||||||
@ -711,6 +731,7 @@ void uninit_parse_context(OptionParseContext *octx)
|
|||||||
av_freep(&l->groups[j].opts);
|
av_freep(&l->groups[j].opts);
|
||||||
av_dict_free(&l->groups[j].codec_opts);
|
av_dict_free(&l->groups[j].codec_opts);
|
||||||
av_dict_free(&l->groups[j].format_opts);
|
av_dict_free(&l->groups[j].format_opts);
|
||||||
|
av_dict_free(&l->groups[j].resample_opts);
|
||||||
|
|
||||||
av_dict_free(&l->groups[j].sws_dict);
|
av_dict_free(&l->groups[j].sws_dict);
|
||||||
av_dict_free(&l->groups[j].swr_opts);
|
av_dict_free(&l->groups[j].swr_opts);
|
||||||
@ -822,7 +843,7 @@ do { \
|
|||||||
return AVERROR_OPTION_NOT_FOUND;
|
return AVERROR_OPTION_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (octx->cur_group.nb_opts || codec_opts || format_opts)
|
if (octx->cur_group.nb_opts || codec_opts || format_opts || resample_opts)
|
||||||
av_log(NULL, AV_LOG_WARNING, "Trailing option(s) found in the "
|
av_log(NULL, AV_LOG_WARNING, "Trailing option(s) found in the "
|
||||||
"command: may be ignored.\n");
|
"command: may be ignored.\n");
|
||||||
|
|
||||||
@ -843,32 +864,6 @@ int opt_cpuflags(void *optctx, const char *opt, const char *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int opt_cpucount(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
int count;
|
|
||||||
|
|
||||||
static const AVOption opts[] = {
|
|
||||||
{"count", NULL, 0, AV_OPT_TYPE_INT, { .i64 = -1}, -1, INT_MAX},
|
|
||||||
{NULL},
|
|
||||||
};
|
|
||||||
static const AVClass class = {
|
|
||||||
.class_name = "cpucount",
|
|
||||||
.item_name = av_default_item_name,
|
|
||||||
.option = opts,
|
|
||||||
.version = LIBAVUTIL_VERSION_INT,
|
|
||||||
};
|
|
||||||
const AVClass *pclass = &class;
|
|
||||||
|
|
||||||
ret = av_opt_eval_int(&pclass, opts, arg, &count);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
av_cpu_force_count(count);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
const struct { const char *name; int level; } log_levels[] = {
|
const struct { const char *name; int level; } log_levels[] = {
|
||||||
@ -899,18 +894,20 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
|||||||
if (!i && !cmd) {
|
if (!i && !cmd) {
|
||||||
flags = 0; /* missing relative prefix, build absolute value */
|
flags = 0; /* missing relative prefix, build absolute value */
|
||||||
}
|
}
|
||||||
if (av_strstart(token, "repeat", &arg)) {
|
if (!strncmp(token, "repeat", 6)) {
|
||||||
if (cmd == '-') {
|
if (cmd == '-') {
|
||||||
flags |= AV_LOG_SKIP_REPEATED;
|
flags |= AV_LOG_SKIP_REPEATED;
|
||||||
} else {
|
} else {
|
||||||
flags &= ~AV_LOG_SKIP_REPEATED;
|
flags &= ~AV_LOG_SKIP_REPEATED;
|
||||||
}
|
}
|
||||||
} else if (av_strstart(token, "level", &arg)) {
|
arg = token + 6;
|
||||||
|
} else if (!strncmp(token, "level", 5)) {
|
||||||
if (cmd == '-') {
|
if (cmd == '-') {
|
||||||
flags &= ~AV_LOG_PRINT_LEVEL;
|
flags &= ~AV_LOG_PRINT_LEVEL;
|
||||||
} else {
|
} else {
|
||||||
flags |= AV_LOG_PRINT_LEVEL;
|
flags |= AV_LOG_PRINT_LEVEL;
|
||||||
}
|
}
|
||||||
|
arg = token + 5;
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1137,6 +1134,7 @@ static void print_all_libs_info(int flags, int level)
|
|||||||
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
|
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
|
||||||
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
|
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
|
||||||
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
|
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
|
||||||
|
PRINT_LIB_INFO(avresample, AVRESAMPLE, flags, level);
|
||||||
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
|
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
|
||||||
PRINT_LIB_INFO(swresample, SWRESAMPLE, flags, level);
|
PRINT_LIB_INFO(swresample, SWRESAMPLE, flags, level);
|
||||||
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
|
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
|
||||||
@ -1345,9 +1343,9 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
|||||||
break;
|
break;
|
||||||
last_name = name;
|
last_name = name;
|
||||||
|
|
||||||
printf(" %c%c %-15s %s\n",
|
printf(" %s%s %-15s %s\n",
|
||||||
decode ? 'D' : ' ',
|
decode ? "D" : " ",
|
||||||
encode ? 'E' : ' ',
|
encode ? "E" : " ",
|
||||||
name,
|
name,
|
||||||
long_name ? long_name:" ");
|
long_name ? long_name:" ");
|
||||||
}
|
}
|
||||||
@ -1399,6 +1397,8 @@ static void print_codec(const AVCodec *c)
|
|||||||
printf("horizband ");
|
printf("horizband ");
|
||||||
if (c->capabilities & AV_CODEC_CAP_DR1)
|
if (c->capabilities & AV_CODEC_CAP_DR1)
|
||||||
printf("dr1 ");
|
printf("dr1 ");
|
||||||
|
if (c->capabilities & AV_CODEC_CAP_TRUNCATED)
|
||||||
|
printf("trunc ");
|
||||||
if (c->capabilities & AV_CODEC_CAP_DELAY)
|
if (c->capabilities & AV_CODEC_CAP_DELAY)
|
||||||
printf("delay ");
|
printf("delay ");
|
||||||
if (c->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)
|
if (c->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)
|
||||||
@ -1701,14 +1701,12 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
|||||||
while ((filter = av_filter_iterate(&opaque))) {
|
while ((filter = av_filter_iterate(&opaque))) {
|
||||||
descr_cur = descr;
|
descr_cur = descr;
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
unsigned nb_pads;
|
|
||||||
if (i) {
|
if (i) {
|
||||||
*(descr_cur++) = '-';
|
*(descr_cur++) = '-';
|
||||||
*(descr_cur++) = '>';
|
*(descr_cur++) = '>';
|
||||||
}
|
}
|
||||||
pad = i ? filter->outputs : filter->inputs;
|
pad = i ? filter->outputs : filter->inputs;
|
||||||
nb_pads = avfilter_filter_pad_count(filter, i);
|
for (j = 0; pad && avfilter_pad_get_name(pad, j); j++) {
|
||||||
for (j = 0; j < nb_pads; j++) {
|
|
||||||
if (descr_cur >= descr + sizeof(descr) - 4)
|
if (descr_cur >= descr + sizeof(descr) - 4)
|
||||||
break;
|
break;
|
||||||
*(descr_cur++) = get_media_type_char(avfilter_pad_get_type(pad, j));
|
*(descr_cur++) = get_media_type_char(avfilter_pad_get_type(pad, j));
|
||||||
@ -1754,7 +1752,7 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
|
|||||||
"..H.. = Hardware accelerated format\n"
|
"..H.. = Hardware accelerated format\n"
|
||||||
"...P. = Paletted format\n"
|
"...P. = Paletted format\n"
|
||||||
"....B = Bitstream format\n"
|
"....B = Bitstream format\n"
|
||||||
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL BIT_DEPTHS\n"
|
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
|
||||||
"-----\n");
|
"-----\n");
|
||||||
|
|
||||||
#if !CONFIG_SWSCALE
|
#if !CONFIG_SWSCALE
|
||||||
@ -1764,7 +1762,7 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
|
|||||||
|
|
||||||
while ((pix_desc = av_pix_fmt_desc_next(pix_desc))) {
|
while ((pix_desc = av_pix_fmt_desc_next(pix_desc))) {
|
||||||
enum AVPixelFormat av_unused pix_fmt = av_pix_fmt_desc_get_id(pix_desc);
|
enum AVPixelFormat av_unused pix_fmt = av_pix_fmt_desc_get_id(pix_desc);
|
||||||
printf("%c%c%c%c%c %-16s %d %3d %d",
|
printf("%c%c%c%c%c %-16s %d %2d\n",
|
||||||
sws_isSupportedInput (pix_fmt) ? 'I' : '.',
|
sws_isSupportedInput (pix_fmt) ? 'I' : '.',
|
||||||
sws_isSupportedOutput(pix_fmt) ? 'O' : '.',
|
sws_isSupportedOutput(pix_fmt) ? 'O' : '.',
|
||||||
pix_desc->flags & AV_PIX_FMT_FLAG_HWACCEL ? 'H' : '.',
|
pix_desc->flags & AV_PIX_FMT_FLAG_HWACCEL ? 'H' : '.',
|
||||||
@ -1772,12 +1770,7 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
|
|||||||
pix_desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ? 'B' : '.',
|
pix_desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ? 'B' : '.',
|
||||||
pix_desc->name,
|
pix_desc->name,
|
||||||
pix_desc->nb_components,
|
pix_desc->nb_components,
|
||||||
av_get_bits_per_pixel(pix_desc),
|
av_get_bits_per_pixel(pix_desc));
|
||||||
pix_desc->comp[0].depth);
|
|
||||||
|
|
||||||
for (unsigned i = 1; i < pix_desc->nb_components; i++)
|
|
||||||
printf("-%d", pix_desc->comp[i].depth);
|
|
||||||
printf("\n");
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1820,16 +1813,6 @@ int show_sample_fmts(void *optctx, const char *opt, const char *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int show_dispositions(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < 32; i++) {
|
|
||||||
const char *str = av_disposition_to_string(1U << i);
|
|
||||||
if (str)
|
|
||||||
printf("%s\n", str);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void show_help_codec(const char *name, int encoder)
|
static void show_help_codec(const char *name, int encoder)
|
||||||
{
|
{
|
||||||
const AVCodecDescriptor *desc;
|
const AVCodecDescriptor *desc;
|
||||||
@ -1958,7 +1941,7 @@ static void show_help_filter(const char *name)
|
|||||||
printf(" slice threading supported\n");
|
printf(" slice threading supported\n");
|
||||||
|
|
||||||
printf(" Inputs:\n");
|
printf(" Inputs:\n");
|
||||||
count = avfilter_filter_pad_count(f, 0);
|
count = avfilter_pad_count(f->inputs);
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
printf(" #%d: %s (%s)\n", i, avfilter_pad_get_name(f->inputs, i),
|
printf(" #%d: %s (%s)\n", i, avfilter_pad_get_name(f->inputs, i),
|
||||||
media_type_string(avfilter_pad_get_type(f->inputs, i)));
|
media_type_string(avfilter_pad_get_type(f->inputs, i)));
|
||||||
@ -1969,7 +1952,7 @@ static void show_help_filter(const char *name)
|
|||||||
printf(" none (source filter)\n");
|
printf(" none (source filter)\n");
|
||||||
|
|
||||||
printf(" Outputs:\n");
|
printf(" Outputs:\n");
|
||||||
count = avfilter_filter_pad_count(f, 1);
|
count = avfilter_pad_count(f->outputs);
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
printf(" #%d: %s (%s)\n", i, avfilter_pad_get_name(f->outputs, i),
|
printf(" #%d: %s (%s)\n", i, avfilter_pad_get_name(f->outputs, i),
|
||||||
media_type_string(avfilter_pad_get_type(f->outputs, i)));
|
media_type_string(avfilter_pad_get_type(f->outputs, i)));
|
||||||
@ -2121,7 +2104,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
|||||||
AVFormatContext *s, AVStream *st, const AVCodec *codec)
|
AVFormatContext *s, AVStream *st, const AVCodec *codec)
|
||||||
{
|
{
|
||||||
AVDictionary *ret = NULL;
|
AVDictionary *ret = NULL;
|
||||||
const AVDictionaryEntry *t = NULL;
|
AVDictionaryEntry *t = NULL;
|
||||||
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
|
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
|
||||||
: AV_OPT_FLAG_DECODING_PARAM;
|
: AV_OPT_FLAG_DECODING_PARAM;
|
||||||
char prefix = 0;
|
char prefix = 0;
|
||||||
@ -2183,11 +2166,11 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
|||||||
|
|
||||||
if (!s->nb_streams)
|
if (!s->nb_streams)
|
||||||
return NULL;
|
return NULL;
|
||||||
opts = av_calloc(s->nb_streams, sizeof(*opts));
|
opts = av_mallocz_array(s->nb_streams, sizeof(*opts));
|
||||||
if (!opts) {
|
if (!opts) {
|
||||||
av_log(NULL, AV_LOG_ERROR,
|
av_log(NULL, AV_LOG_ERROR,
|
||||||
"Could not alloc memory for stream options.\n");
|
"Could not alloc memory for stream options.\n");
|
||||||
exit_program(1);
|
return NULL;
|
||||||
}
|
}
|
||||||
for (i = 0; i < s->nb_streams; i++)
|
for (i = 0; i < s->nb_streams; i++)
|
||||||
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
|
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
|
||||||
@ -2214,23 +2197,13 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
|||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
|
double get_rotation(AVStream *st)
|
||||||
{
|
|
||||||
void *new_elem;
|
|
||||||
|
|
||||||
if (!(new_elem = av_mallocz(elem_size)) ||
|
|
||||||
av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
return new_elem;
|
|
||||||
}
|
|
||||||
|
|
||||||
double get_rotation(int32_t *displaymatrix)
|
|
||||||
{
|
{
|
||||||
|
uint8_t* displaymatrix = av_stream_get_side_data(st,
|
||||||
|
AV_PKT_DATA_DISPLAYMATRIX, NULL);
|
||||||
double theta = 0;
|
double theta = 0;
|
||||||
if (displaymatrix)
|
if (displaymatrix)
|
||||||
theta = -round(av_display_rotation_get((int32_t*) displaymatrix));
|
theta = -av_display_rotation_get((int32_t*) displaymatrix);
|
||||||
|
|
||||||
theta -= 360*floor(theta/360 + 0.9/360);
|
theta -= 360*floor(theta/360 + 0.9/360);
|
||||||
|
|
||||||
@ -2244,62 +2217,60 @@ double get_rotation(int32_t *displaymatrix)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_AVDEVICE
|
#if CONFIG_AVDEVICE
|
||||||
static void print_device_list(const AVDeviceInfoList *device_list)
|
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||||
{
|
{
|
||||||
// print devices
|
int ret, i;
|
||||||
for (int i = 0; i < device_list->nb_devices; i++) {
|
|
||||||
const AVDeviceInfo *device = device_list->devices[i];
|
|
||||||
printf("%c %s [%s] (", device_list->default_device == i ? '*' : ' ',
|
|
||||||
device->device_name, device->device_description);
|
|
||||||
if (device->nb_media_types > 0) {
|
|
||||||
for (int j = 0; j < device->nb_media_types; ++j) {
|
|
||||||
const char* media_type = av_get_media_type_string(device->media_types[j]);
|
|
||||||
if (j > 0)
|
|
||||||
printf(", ");
|
|
||||||
printf("%s", media_type ? media_type : "unknown");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
printf("none");
|
|
||||||
}
|
|
||||||
printf(")\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static int print_device_sources(const AVInputFormat *fmt, AVDictionary *opts)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
AVDeviceInfoList *device_list = NULL;
|
AVDeviceInfoList *device_list = NULL;
|
||||||
|
|
||||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
printf("Auto-detected sources for %s:\n", fmt->name);
|
printf("Auto-detected sources for %s:\n", fmt->name);
|
||||||
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
if (!fmt->get_device_list) {
|
||||||
printf("Cannot list sources: %s\n", av_err2str(ret));
|
ret = AVERROR(ENOSYS);
|
||||||
|
printf("Cannot list sources. Not implemented.\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_device_list(device_list);
|
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
||||||
|
printf("Cannot list sources.\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < device_list->nb_devices; i++) {
|
||||||
|
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||||
|
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||||
|
}
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
avdevice_free_list_devices(&device_list);
|
avdevice_free_list_devices(&device_list);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int print_device_sinks(const AVOutputFormat *fmt, AVDictionary *opts)
|
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, i;
|
||||||
AVDeviceInfoList *device_list = NULL;
|
AVDeviceInfoList *device_list = NULL;
|
||||||
|
|
||||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
printf("Auto-detected sinks for %s:\n", fmt->name);
|
printf("Auto-detected sinks for %s:\n", fmt->name);
|
||||||
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
if (!fmt->get_device_list) {
|
||||||
printf("Cannot list sinks: %s\n", av_err2str(ret));
|
ret = AVERROR(ENOSYS);
|
||||||
|
printf("Cannot list sinks. Not implemented.\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_device_list(device_list);
|
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
||||||
|
printf("Cannot list sinks.\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < device_list->nb_devices; i++) {
|
||||||
|
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||||
|
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||||
|
}
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
avdevice_free_list_devices(&device_list);
|
avdevice_free_list_devices(&device_list);
|
||||||
@ -2330,7 +2301,7 @@ static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionar
|
|||||||
|
|
||||||
int show_sources(void *optctx, const char *opt, const char *arg)
|
int show_sources(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
const AVInputFormat *fmt = NULL;
|
AVInputFormat *fmt = NULL;
|
||||||
char *dev = NULL;
|
char *dev = NULL;
|
||||||
AVDictionary *opts = NULL;
|
AVDictionary *opts = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -2368,7 +2339,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
|
|||||||
|
|
||||||
int show_sinks(void *optctx, const char *opt, const char *arg)
|
int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
const AVOutputFormat *fmt = NULL;
|
AVOutputFormat *fmt = NULL;
|
||||||
char *dev = NULL;
|
char *dev = NULL;
|
||||||
AVDictionary *opts = NULL;
|
AVDictionary *opts = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -48,7 +48,7 @@ extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
|||||||
extern AVFormatContext *avformat_opts;
|
extern AVFormatContext *avformat_opts;
|
||||||
extern AVDictionary *sws_dict;
|
extern AVDictionary *sws_dict;
|
||||||
extern AVDictionary *swr_opts;
|
extern AVDictionary *swr_opts;
|
||||||
extern AVDictionary *format_opts, *codec_opts;
|
extern AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||||
extern int hide_banner;
|
extern int hide_banner;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -88,11 +88,6 @@ void log_callback_help(void* ptr, int level, const char* fmt, va_list vl);
|
|||||||
*/
|
*/
|
||||||
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
|
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
|
||||||
|
|
||||||
/**
|
|
||||||
* Override the cpucount.
|
|
||||||
*/
|
|
||||||
int opt_cpucount(void *optctx, const char *opt, const char *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fallback for options that are not explicitly handled, these will be
|
* Fallback for options that are not explicitly handled, these will be
|
||||||
* parsed through AVOptions.
|
* parsed through AVOptions.
|
||||||
@ -238,14 +233,12 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
|||||||
{ "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" }, \
|
{ "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" }, \
|
||||||
{ "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" }, \
|
{ "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" }, \
|
||||||
{ "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" }, \
|
{ "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" }, \
|
||||||
{ "dispositions", OPT_EXIT, { .func_arg = show_dispositions}, "show available stream dispositions" }, \
|
|
||||||
{ "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" }, \
|
{ "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" }, \
|
||||||
{ "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
{ "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||||
{ "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
{ "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||||
{ "report", 0, { .func_arg = opt_report }, "generate a report" }, \
|
{ "report", 0, { .func_arg = opt_report }, "generate a report" }, \
|
||||||
{ "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" }, \
|
{ "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" }, \
|
||||||
{ "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" }, \
|
{ "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" }, \
|
||||||
{ "cpucount", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpucount }, "force specific cpu count", "count" }, \
|
|
||||||
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" }, \
|
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" }, \
|
||||||
CMDUTILS_COMMON_OPTIONS_AVDEVICE \
|
CMDUTILS_COMMON_OPTIONS_AVDEVICE \
|
||||||
|
|
||||||
@ -324,6 +317,7 @@ typedef struct OptionGroup {
|
|||||||
|
|
||||||
AVDictionary *codec_opts;
|
AVDictionary *codec_opts;
|
||||||
AVDictionary *format_opts;
|
AVDictionary *format_opts;
|
||||||
|
AVDictionary *resample_opts;
|
||||||
AVDictionary *sws_dict;
|
AVDictionary *sws_dict;
|
||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
} OptionGroup;
|
} OptionGroup;
|
||||||
@ -430,8 +424,8 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
|||||||
* Each dictionary will contain the options from codec_opts which can
|
* Each dictionary will contain the options from codec_opts which can
|
||||||
* be applied to the corresponding stream codec context.
|
* be applied to the corresponding stream codec context.
|
||||||
*
|
*
|
||||||
* @return pointer to the created array of dictionaries.
|
* @return pointer to the created array of dictionaries, NULL if it
|
||||||
* Calls exit() on failure.
|
* cannot be created
|
||||||
*/
|
*/
|
||||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||||
AVDictionary *codec_opts);
|
AVDictionary *codec_opts);
|
||||||
@ -578,11 +572,6 @@ int show_layouts(void *optctx, const char *opt, const char *arg);
|
|||||||
*/
|
*/
|
||||||
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
|
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
|
||||||
|
|
||||||
/**
|
|
||||||
* Print a listing containing all supported stream dispositions.
|
|
||||||
*/
|
|
||||||
int show_dispositions(void *optctx, const char *opt, const char *arg);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Print a listing containing all the color names and values recognized
|
* Print a listing containing all the color names and values recognized
|
||||||
* by the program.
|
* by the program.
|
||||||
@ -628,28 +617,11 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
|||||||
*/
|
*/
|
||||||
void *grow_array(void *array, int elem_size, int *size, int new_size);
|
void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically add a new element to an array of pointers, i.e. allocate
|
|
||||||
* a new entry, reallocate the array of pointers and make the new last
|
|
||||||
* member of this array point to the newly allocated buffer.
|
|
||||||
* Calls exit() on failure.
|
|
||||||
*
|
|
||||||
* @param array array of pointers to reallocate
|
|
||||||
* @param elem_size size of the new element to allocate
|
|
||||||
* @param nb_elems pointer to the number of elements of the array array;
|
|
||||||
* *nb_elems will be incremented by one by this function.
|
|
||||||
* @return pointer to the newly allocated entry
|
|
||||||
*/
|
|
||||||
void *allocate_array_elem(void *array, size_t elem_size, int *nb_elems);
|
|
||||||
|
|
||||||
#define media_type_string av_get_media_type_string
|
#define media_type_string av_get_media_type_string
|
||||||
|
|
||||||
#define GROW_ARRAY(array, nb_elems)\
|
#define GROW_ARRAY(array, nb_elems)\
|
||||||
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
|
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
|
||||||
|
|
||||||
#define ALLOC_ARRAY_ELEM(array, nb_elems)\
|
|
||||||
allocate_array_elem(&array, sizeof(*array[0]), &nb_elems)
|
|
||||||
|
|
||||||
#define GET_PIX_FMT_NAME(pix_fmt)\
|
#define GET_PIX_FMT_NAME(pix_fmt)\
|
||||||
const char *name = av_get_pix_fmt_name(pix_fmt);
|
const char *name = av_get_pix_fmt_name(pix_fmt);
|
||||||
|
|
||||||
@ -671,6 +643,6 @@ void *allocate_array_elem(void *array, size_t elem_size, int *nb_elems);
|
|||||||
char name[128];\
|
char name[128];\
|
||||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||||
|
|
||||||
double get_rotation(int32_t *displaymatrix);
|
double get_rotation(AVStream *st);
|
||||||
|
|
||||||
#endif /* FFTOOLS_CMDUTILS_H */
|
#endif /* FFTOOLS_CMDUTILS_H */
|
||||||
|
426
fftools/ffmpeg.c
426
fftools/ffmpeg.c
@ -535,7 +535,6 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
av_frame_free(&frame);
|
av_frame_free(&frame);
|
||||||
}
|
}
|
||||||
av_fifo_freep(&ifilter->frame_queue);
|
av_fifo_freep(&ifilter->frame_queue);
|
||||||
av_freep(&ifilter->displaymatrix);
|
|
||||||
if (ist->sub2video.sub_queue) {
|
if (ist->sub2video.sub_queue) {
|
||||||
while (av_fifo_size(ist->sub2video.sub_queue)) {
|
while (av_fifo_size(ist->sub2video.sub_queue)) {
|
||||||
AVSubtitle sub;
|
AVSubtitle sub;
|
||||||
@ -555,6 +554,9 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
|
|
||||||
avfilter_inout_free(&ofilter->out_tmp);
|
avfilter_inout_free(&ofilter->out_tmp);
|
||||||
av_freep(&ofilter->name);
|
av_freep(&ofilter->name);
|
||||||
|
av_freep(&ofilter->formats);
|
||||||
|
av_freep(&ofilter->channel_layouts);
|
||||||
|
av_freep(&ofilter->sample_rates);
|
||||||
av_freep(&fg->outputs[j]);
|
av_freep(&fg->outputs[j]);
|
||||||
}
|
}
|
||||||
av_freep(&fg->outputs);
|
av_freep(&fg->outputs);
|
||||||
@ -630,6 +632,7 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
InputStream *ist = input_streams[i];
|
InputStream *ist = input_streams[i];
|
||||||
|
|
||||||
av_frame_free(&ist->decoded_frame);
|
av_frame_free(&ist->decoded_frame);
|
||||||
|
av_frame_free(&ist->filter_frame);
|
||||||
av_packet_free(&ist->pkt);
|
av_packet_free(&ist->pkt);
|
||||||
av_dict_free(&ist->decoder_opts);
|
av_dict_free(&ist->decoder_opts);
|
||||||
avsubtitle_free(&ist->prev_sub.subtitle);
|
avsubtitle_free(&ist->prev_sub.subtitle);
|
||||||
@ -650,7 +653,6 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
av_err2str(AVERROR(errno)));
|
av_err2str(AVERROR(errno)));
|
||||||
}
|
}
|
||||||
av_freep(&vstats_filename);
|
av_freep(&vstats_filename);
|
||||||
av_freep(&filter_nbthreads);
|
|
||||||
|
|
||||||
av_freep(&input_streams);
|
av_freep(&input_streams);
|
||||||
av_freep(&input_files);
|
av_freep(&input_files);
|
||||||
@ -673,7 +675,7 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
|
|
||||||
void remove_avoptions(AVDictionary **a, AVDictionary *b)
|
void remove_avoptions(AVDictionary **a, AVDictionary *b)
|
||||||
{
|
{
|
||||||
const AVDictionaryEntry *t = NULL;
|
AVDictionaryEntry *t = NULL;
|
||||||
|
|
||||||
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
|
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
|
||||||
@ -682,7 +684,7 @@ void remove_avoptions(AVDictionary **a, AVDictionary *b)
|
|||||||
|
|
||||||
void assert_avoptions(AVDictionary *m)
|
void assert_avoptions(AVDictionary *m)
|
||||||
{
|
{
|
||||||
const AVDictionaryEntry *t;
|
AVDictionaryEntry *t;
|
||||||
if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
|
av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@ -750,13 +752,14 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
|||||||
AVPacket *tmp_pkt;
|
AVPacket *tmp_pkt;
|
||||||
/* the muxer is not initialized yet, buffer the packet */
|
/* the muxer is not initialized yet, buffer the packet */
|
||||||
if (!av_fifo_space(ost->muxing_queue)) {
|
if (!av_fifo_space(ost->muxing_queue)) {
|
||||||
size_t cur_size = av_fifo_size(ost->muxing_queue);
|
|
||||||
unsigned int are_we_over_size =
|
unsigned int are_we_over_size =
|
||||||
(ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
|
(ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
|
||||||
size_t limit = are_we_over_size ? ost->max_muxing_queue_size : INT_MAX;
|
int new_size = are_we_over_size ?
|
||||||
size_t new_size = FFMIN(2 * cur_size, limit);
|
FFMIN(2 * av_fifo_size(ost->muxing_queue),
|
||||||
|
ost->max_muxing_queue_size) :
|
||||||
|
2 * av_fifo_size(ost->muxing_queue);
|
||||||
|
|
||||||
if (new_size <= cur_size) {
|
if (new_size <= av_fifo_size(ost->muxing_queue)) {
|
||||||
av_log(NULL, AV_LOG_ERROR,
|
av_log(NULL, AV_LOG_ERROR,
|
||||||
"Too many packets buffered for output stream %d:%d.\n",
|
"Too many packets buffered for output stream %d:%d.\n",
|
||||||
ost->file_index, ost->st->index);
|
ost->file_index, ost->st->index);
|
||||||
@ -866,16 +869,16 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
|||||||
main_return_code = 1;
|
main_return_code = 1;
|
||||||
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
|
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
|
||||||
}
|
}
|
||||||
|
av_packet_unref(pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void close_output_stream(OutputStream *ost)
|
static void close_output_stream(OutputStream *ost)
|
||||||
{
|
{
|
||||||
OutputFile *of = output_files[ost->file_index];
|
OutputFile *of = output_files[ost->file_index];
|
||||||
AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
|
|
||||||
|
|
||||||
ost->finished |= ENCODER_FINISHED;
|
ost->finished |= ENCODER_FINISHED;
|
||||||
if (of->shortest) {
|
if (of->shortest) {
|
||||||
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
|
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
|
||||||
of->recording_time = FFMIN(of->recording_time, end);
|
of->recording_time = FFMIN(of->recording_time, end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1029,6 +1032,7 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
|
|||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
av_packet_unref(pkt);
|
||||||
ret = avcodec_receive_packet(enc, pkt);
|
ret = avcodec_receive_packet(enc, pkt);
|
||||||
if (ret == AVERROR(EAGAIN))
|
if (ret == AVERROR(EAGAIN))
|
||||||
break;
|
break;
|
||||||
@ -1138,12 +1142,11 @@ static void do_subtitle_out(OutputFile *of,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* May modify/reset next_picture */
|
|
||||||
static void do_video_out(OutputFile *of,
|
static void do_video_out(OutputFile *of,
|
||||||
OutputStream *ost,
|
OutputStream *ost,
|
||||||
AVFrame *next_picture)
|
AVFrame *next_picture)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, format_video_sync;
|
||||||
AVPacket *pkt = ost->pkt;
|
AVPacket *pkt = ost->pkt;
|
||||||
AVCodecContext *enc = ost->enc_ctx;
|
AVCodecContext *enc = ost->enc_ctx;
|
||||||
AVRational frame_rate;
|
AVRational frame_rate;
|
||||||
@ -1165,7 +1168,7 @@ static void do_video_out(OutputFile *of,
|
|||||||
if (frame_rate.num > 0 && frame_rate.den > 0)
|
if (frame_rate.num > 0 && frame_rate.den > 0)
|
||||||
duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
|
duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
|
||||||
|
|
||||||
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
||||||
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
|
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
|
||||||
|
|
||||||
if (!ost->filters_script &&
|
if (!ost->filters_script &&
|
||||||
@ -1190,10 +1193,28 @@ static void do_video_out(OutputFile *of,
|
|||||||
nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
|
nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
|
||||||
nb_frames = 1;
|
nb_frames = 1;
|
||||||
|
|
||||||
|
format_video_sync = video_sync_method;
|
||||||
|
if (format_video_sync == VSYNC_AUTO) {
|
||||||
|
if(!strcmp(of->ctx->oformat->name, "avi")) {
|
||||||
|
format_video_sync = VSYNC_VFR;
|
||||||
|
} else
|
||||||
|
format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
|
||||||
|
if ( ist
|
||||||
|
&& format_video_sync == VSYNC_CFR
|
||||||
|
&& input_files[ist->file_index]->ctx->nb_streams == 1
|
||||||
|
&& input_files[ist->file_index]->input_ts_offset == 0) {
|
||||||
|
format_video_sync = VSYNC_VSCFR;
|
||||||
|
}
|
||||||
|
if (format_video_sync == VSYNC_CFR && copy_ts) {
|
||||||
|
format_video_sync = VSYNC_VSCFR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
|
||||||
|
|
||||||
if (delta0 < 0 &&
|
if (delta0 < 0 &&
|
||||||
delta > 0 &&
|
delta > 0 &&
|
||||||
ost->vsync_method != VSYNC_PASSTHROUGH &&
|
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||||
ost->vsync_method != VSYNC_DROP) {
|
format_video_sync != VSYNC_DROP) {
|
||||||
if (delta0 < -0.6) {
|
if (delta0 < -0.6) {
|
||||||
av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
|
av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
|
||||||
} else
|
} else
|
||||||
@ -1203,7 +1224,7 @@ static void do_video_out(OutputFile *of,
|
|||||||
delta0 = 0;
|
delta0 = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (ost->vsync_method) {
|
switch (format_video_sync) {
|
||||||
case VSYNC_VSCFR:
|
case VSYNC_VSCFR:
|
||||||
if (ost->frame_number == 0 && delta0 >= 0.5) {
|
if (ost->frame_number == 0 && delta0 >= 0.5) {
|
||||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
|
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
|
||||||
@ -1266,7 +1287,6 @@ static void do_video_out(OutputFile *of,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ost->last_dropped = nb_frames == nb0_frames && next_picture;
|
ost->last_dropped = nb_frames == nb0_frames && next_picture;
|
||||||
ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
|
|
||||||
|
|
||||||
/* duplicates frame if needed */
|
/* duplicates frame if needed */
|
||||||
for (i = 0; i < nb_frames; i++) {
|
for (i = 0; i < nb_frames; i++) {
|
||||||
@ -1274,7 +1294,7 @@ static void do_video_out(OutputFile *of,
|
|||||||
int forced_keyframe = 0;
|
int forced_keyframe = 0;
|
||||||
double pts_time;
|
double pts_time;
|
||||||
|
|
||||||
if (i < nb0_frames && ost->last_frame->buf[0]) {
|
if (i < nb0_frames && ost->last_frame) {
|
||||||
in_picture = ost->last_frame;
|
in_picture = ost->last_frame;
|
||||||
} else
|
} else
|
||||||
in_picture = next_picture;
|
in_picture = next_picture;
|
||||||
@ -1327,11 +1347,6 @@ static void do_video_out(OutputFile *of,
|
|||||||
&& in_picture->key_frame==1
|
&& in_picture->key_frame==1
|
||||||
&& !i) {
|
&& !i) {
|
||||||
forced_keyframe = 1;
|
forced_keyframe = 1;
|
||||||
} else if ( ost->forced_keyframes
|
|
||||||
&& !strncmp(ost->forced_keyframes, "source_no_drop", 14)
|
|
||||||
&& !i) {
|
|
||||||
forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
|
|
||||||
ost->dropped_keyframe = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (forced_keyframe) {
|
if (forced_keyframe) {
|
||||||
@ -1356,6 +1371,7 @@ static void do_video_out(OutputFile *of,
|
|||||||
av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
|
av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
av_packet_unref(pkt);
|
||||||
ret = avcodec_receive_packet(enc, pkt);
|
ret = avcodec_receive_packet(enc, pkt);
|
||||||
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
|
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
|
||||||
if (ret == AVERROR(EAGAIN))
|
if (ret == AVERROR(EAGAIN))
|
||||||
@ -1402,9 +1418,13 @@ static void do_video_out(OutputFile *of,
|
|||||||
do_video_stats(ost, frame_size);
|
do_video_stats(ost, frame_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ost->last_frame)
|
||||||
|
ost->last_frame = av_frame_alloc();
|
||||||
av_frame_unref(ost->last_frame);
|
av_frame_unref(ost->last_frame);
|
||||||
if (next_picture)
|
if (next_picture && ost->last_frame)
|
||||||
av_frame_move_ref(ost->last_frame, next_picture);
|
av_frame_ref(ost->last_frame, next_picture);
|
||||||
|
else
|
||||||
|
av_frame_free(&ost->last_frame);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
error:
|
error:
|
||||||
@ -1463,13 +1483,13 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
|||||||
static void finish_output_stream(OutputStream *ost)
|
static void finish_output_stream(OutputStream *ost)
|
||||||
{
|
{
|
||||||
OutputFile *of = output_files[ost->file_index];
|
OutputFile *of = output_files[ost->file_index];
|
||||||
AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
|
int i;
|
||||||
|
|
||||||
ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
|
ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
|
||||||
|
|
||||||
if (of->shortest) {
|
if (of->shortest) {
|
||||||
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
|
for (i = 0; i < of->ctx->nb_streams; i++)
|
||||||
of->recording_time = FFMIN(of->recording_time, end);
|
output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1508,6 +1528,12 @@ static int reap_filters(int flush)
|
|||||||
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
|
if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
|
||||||
init_output_stream_wrapper(ost, NULL, 1);
|
init_output_stream_wrapper(ost, NULL, 1);
|
||||||
|
|
||||||
|
if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
filtered_frame = ost->filtered_frame;
|
filtered_frame = ost->filtered_frame;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
@ -1948,6 +1974,9 @@ static void flush_encoders(void)
|
|||||||
AVPacket *pkt = ost->pkt;
|
AVPacket *pkt = ost->pkt;
|
||||||
int pkt_size;
|
int pkt_size;
|
||||||
|
|
||||||
|
if (!pkt)
|
||||||
|
break;
|
||||||
|
|
||||||
switch (enc->codec_type) {
|
switch (enc->codec_type) {
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
desc = "audio";
|
desc = "audio";
|
||||||
@ -1961,6 +1990,7 @@ static void flush_encoders(void)
|
|||||||
|
|
||||||
update_benchmark(NULL);
|
update_benchmark(NULL);
|
||||||
|
|
||||||
|
av_packet_unref(pkt);
|
||||||
while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
|
while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
|
||||||
ret = avcodec_send_frame(enc, NULL);
|
ret = avcodec_send_frame(enc, NULL);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -2055,17 +2085,19 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (f->recording_time != INT64_MAX) {
|
if (f->recording_time != INT64_MAX) {
|
||||||
start_time = 0;
|
start_time = f->ctx->start_time;
|
||||||
if (copy_ts) {
|
if (f->start_time != AV_NOPTS_VALUE && copy_ts)
|
||||||
start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
|
start_time += f->start_time;
|
||||||
start_time += start_at_zero ? 0 : f->ctx->start_time;
|
|
||||||
}
|
|
||||||
if (ist->pts >= f->recording_time + start_time) {
|
if (ist->pts >= f->recording_time + start_time) {
|
||||||
close_output_stream(ost);
|
close_output_stream(ost);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* force the input stream PTS */
|
||||||
|
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||||
|
ost->sync_opts++;
|
||||||
|
|
||||||
if (av_packet_ref(opkt, pkt) < 0)
|
if (av_packet_ref(opkt, pkt) < 0)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
@ -2089,8 +2121,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
|||||||
|
|
||||||
opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
|
opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
|
||||||
|
|
||||||
ost->sync_opts += opkt->duration;
|
|
||||||
|
|
||||||
output_packet(of, opkt, ost, 0);
|
output_packet(of, opkt, ost, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2144,15 +2174,10 @@ static int ifilter_has_all_input_formats(FilterGraph *fg)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
|
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
|
||||||
{
|
{
|
||||||
FilterGraph *fg = ifilter->graph;
|
FilterGraph *fg = ifilter->graph;
|
||||||
AVFrameSideData *sd;
|
int need_reinit, ret, i;
|
||||||
int need_reinit, ret;
|
|
||||||
int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
|
|
||||||
|
|
||||||
if (keep_reference)
|
|
||||||
buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
|
|
||||||
|
|
||||||
/* determine if the parameters for this input changed */
|
/* determine if the parameters for this input changed */
|
||||||
need_reinit = ifilter->format != frame->format;
|
need_reinit = ifilter->format != frame->format;
|
||||||
@ -2176,12 +2201,6 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_ref
|
|||||||
(ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
|
(ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
|
||||||
need_reinit = 1;
|
need_reinit = 1;
|
||||||
|
|
||||||
if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
|
|
||||||
if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
|
|
||||||
need_reinit = 1;
|
|
||||||
} else if (ifilter->displaymatrix)
|
|
||||||
need_reinit = 1;
|
|
||||||
|
|
||||||
if (need_reinit) {
|
if (need_reinit) {
|
||||||
ret = ifilter_parameters_from_frame(ifilter, frame);
|
ret = ifilter_parameters_from_frame(ifilter, frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -2190,20 +2209,23 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_ref
|
|||||||
|
|
||||||
/* (re)init the graph if possible, otherwise buffer the frame and return */
|
/* (re)init the graph if possible, otherwise buffer the frame and return */
|
||||||
if (need_reinit || !fg->graph) {
|
if (need_reinit || !fg->graph) {
|
||||||
if (!ifilter_has_all_input_formats(fg)) {
|
for (i = 0; i < fg->nb_inputs; i++) {
|
||||||
AVFrame *tmp = av_frame_clone(frame);
|
if (!ifilter_has_all_input_formats(fg)) {
|
||||||
if (!tmp)
|
AVFrame *tmp = av_frame_clone(frame);
|
||||||
return AVERROR(ENOMEM);
|
if (!tmp)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
av_frame_unref(frame);
|
||||||
|
|
||||||
if (!av_fifo_space(ifilter->frame_queue)) {
|
if (!av_fifo_space(ifilter->frame_queue)) {
|
||||||
ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
|
ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_frame_free(&tmp);
|
av_frame_free(&tmp);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = reap_filters(1);
|
ret = reap_filters(1);
|
||||||
@ -2219,7 +2241,7 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_ref
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
|
ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret != AVERROR_EOF)
|
if (ret != AVERROR_EOF)
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
|
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
|
||||||
@ -2282,10 +2304,18 @@ static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacke
|
|||||||
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
|
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
AVFrame *f;
|
||||||
|
|
||||||
av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
|
av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
|
||||||
for (i = 0; i < ist->nb_filters; i++) {
|
for (i = 0; i < ist->nb_filters; i++) {
|
||||||
ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
|
if (i < ist->nb_filters - 1) {
|
||||||
|
f = ist->filter_frame;
|
||||||
|
ret = av_frame_ref(f, decoded_frame);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
} else
|
||||||
|
f = decoded_frame;
|
||||||
|
ret = ifilter_send_frame(ist->filters[i], f);
|
||||||
if (ret == AVERROR_EOF)
|
if (ret == AVERROR_EOF)
|
||||||
ret = 0; /* ignore */
|
ret = 0; /* ignore */
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -2300,11 +2330,17 @@ static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
|
|||||||
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
||||||
int *decode_failed)
|
int *decode_failed)
|
||||||
{
|
{
|
||||||
AVFrame *decoded_frame = ist->decoded_frame;
|
AVFrame *decoded_frame;
|
||||||
AVCodecContext *avctx = ist->dec_ctx;
|
AVCodecContext *avctx = ist->dec_ctx;
|
||||||
int ret, err = 0;
|
int ret, err = 0;
|
||||||
AVRational decoded_frame_tb;
|
AVRational decoded_frame_tb;
|
||||||
|
|
||||||
|
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
decoded_frame = ist->decoded_frame;
|
||||||
|
|
||||||
update_benchmark(NULL);
|
update_benchmark(NULL);
|
||||||
ret = decode(avctx, decoded_frame, got_output, pkt);
|
ret = decode(avctx, decoded_frame, got_output, pkt);
|
||||||
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
|
update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
|
||||||
@ -2341,11 +2377,6 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
|||||||
decoded_frame->pts = ist->dts;
|
decoded_frame->pts = ist->dts;
|
||||||
decoded_frame_tb = AV_TIME_BASE_Q;
|
decoded_frame_tb = AV_TIME_BASE_Q;
|
||||||
}
|
}
|
||||||
if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
|
|
||||||
pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
|
|
||||||
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
|
||||||
if (pkt)
|
|
||||||
ist->prev_pkt_pts = pkt->pts;
|
|
||||||
if (decoded_frame->pts != AV_NOPTS_VALUE)
|
if (decoded_frame->pts != AV_NOPTS_VALUE)
|
||||||
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
|
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
|
||||||
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
|
(AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
|
||||||
@ -2353,6 +2384,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
|||||||
ist->nb_samples = decoded_frame->nb_samples;
|
ist->nb_samples = decoded_frame->nb_samples;
|
||||||
err = send_frame_to_filters(ist, decoded_frame);
|
err = send_frame_to_filters(ist, decoded_frame);
|
||||||
|
|
||||||
|
av_frame_unref(ist->filter_frame);
|
||||||
av_frame_unref(decoded_frame);
|
av_frame_unref(decoded_frame);
|
||||||
return err < 0 ? err : ret;
|
return err < 0 ? err : ret;
|
||||||
}
|
}
|
||||||
@ -2360,7 +2392,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
|||||||
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
|
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
|
||||||
int *decode_failed)
|
int *decode_failed)
|
||||||
{
|
{
|
||||||
AVFrame *decoded_frame = ist->decoded_frame;
|
AVFrame *decoded_frame;
|
||||||
int i, ret = 0, err = 0;
|
int i, ret = 0, err = 0;
|
||||||
int64_t best_effort_timestamp;
|
int64_t best_effort_timestamp;
|
||||||
int64_t dts = AV_NOPTS_VALUE;
|
int64_t dts = AV_NOPTS_VALUE;
|
||||||
@ -2371,6 +2403,11 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
|
|||||||
if (!eof && pkt && pkt->size == 0)
|
if (!eof && pkt && pkt->size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
decoded_frame = ist->decoded_frame;
|
||||||
if (ist->dts != AV_NOPTS_VALUE)
|
if (ist->dts != AV_NOPTS_VALUE)
|
||||||
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
|
dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
|
||||||
if (pkt) {
|
if (pkt) {
|
||||||
@ -2478,6 +2515,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
|
|||||||
err = send_frame_to_filters(ist, decoded_frame);
|
err = send_frame_to_filters(ist, decoded_frame);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
av_frame_unref(ist->filter_frame);
|
||||||
av_frame_unref(decoded_frame);
|
av_frame_unref(decoded_frame);
|
||||||
return err < 0 ? err : ret;
|
return err < 0 ? err : ret;
|
||||||
}
|
}
|
||||||
@ -2546,6 +2584,8 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
|
|||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
|
|
||||||
|
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
|
||||||
|
exit_program(1);
|
||||||
if (!check_output_constraints(ist, ost) || !ost->encoding_needed
|
if (!check_output_constraints(ist, ost) || !ost->encoding_needed
|
||||||
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
|
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
|
||||||
continue;
|
continue;
|
||||||
@ -2581,14 +2621,16 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
|||||||
int repeating = 0;
|
int repeating = 0;
|
||||||
int eof_reached = 0;
|
int eof_reached = 0;
|
||||||
|
|
||||||
AVPacket *avpkt = ist->pkt;
|
AVPacket *avpkt;
|
||||||
|
|
||||||
|
if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
avpkt = ist->pkt;
|
||||||
|
|
||||||
if (!ist->saw_first_ts) {
|
if (!ist->saw_first_ts) {
|
||||||
ist->first_dts =
|
|
||||||
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
|
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
|
||||||
ist->pts = 0;
|
ist->pts = 0;
|
||||||
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
||||||
ist->first_dts =
|
|
||||||
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
|
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
|
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
|
||||||
}
|
}
|
||||||
@ -2752,6 +2794,8 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
|||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
|
|
||||||
|
if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
|
||||||
|
exit_program(1);
|
||||||
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
|
if (!check_output_constraints(ist, ost) || ost->encoding_needed)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -2761,17 +2805,17 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
|||||||
return !eof_reached;
|
return !eof_reached;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int print_sdp(void)
|
static void print_sdp(void)
|
||||||
{
|
{
|
||||||
char sdp[16384];
|
char sdp[16384];
|
||||||
int i;
|
int i;
|
||||||
int j, ret;
|
int j;
|
||||||
AVIOContext *sdp_pb;
|
AVIOContext *sdp_pb;
|
||||||
AVFormatContext **avc;
|
AVFormatContext **avc;
|
||||||
|
|
||||||
for (i = 0; i < nb_output_files; i++) {
|
for (i = 0; i < nb_output_files; i++) {
|
||||||
if (!output_files[i]->header_written)
|
if (!output_files[i]->header_written)
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
||||||
@ -2784,34 +2828,26 @@ static int print_sdp(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!j) {
|
if (!j)
|
||||||
av_log(NULL, AV_LOG_ERROR, "No output streams in the SDP.\n");
|
|
||||||
ret = AVERROR(EINVAL);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
ret = av_sdp_create(avc, j, sdp, sizeof(sdp));
|
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
||||||
if (ret < 0)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
if (!sdp_filename) {
|
if (!sdp_filename) {
|
||||||
printf("SDP:\n%s\n", sdp);
|
printf("SDP:\n%s\n", sdp);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
} else {
|
} else {
|
||||||
ret = avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL);
|
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
|
||||||
if (ret < 0) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
||||||
goto fail;
|
} else {
|
||||||
|
avio_print(sdp_pb, sdp);
|
||||||
|
avio_closep(&sdp_pb);
|
||||||
|
av_freep(&sdp_filename);
|
||||||
}
|
}
|
||||||
|
|
||||||
avio_print(sdp_pb, sdp);
|
|
||||||
avio_closep(&sdp_pb);
|
|
||||||
av_freep(&sdp_filename);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
av_freep(&avc);
|
av_freep(&avc);
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
|
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
|
||||||
@ -2841,7 +2877,12 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (config && config->device_type == ist->hwaccel_device_type) {
|
if (config) {
|
||||||
|
if (config->device_type != ist->hwaccel_device_type) {
|
||||||
|
// Different hwaccel offered, ignore.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
ret = hwaccel_decode_init(s);
|
ret = hwaccel_decode_init(s);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ist->hwaccel_id == HWACCEL_GENERIC) {
|
if (ist->hwaccel_id == HWACCEL_GENERIC) {
|
||||||
@ -2854,15 +2895,57 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat
|
|||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
const HWAccel *hwaccel = NULL;
|
||||||
|
int i;
|
||||||
|
for (i = 0; hwaccels[i].name; i++) {
|
||||||
|
if (hwaccels[i].pix_fmt == *p) {
|
||||||
|
hwaccel = &hwaccels[i];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!hwaccel) {
|
||||||
|
// No hwaccel supporting this pixfmt.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (hwaccel->id != ist->hwaccel_id) {
|
||||||
|
// Does not match requested hwaccel.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
ist->hwaccel_pix_fmt = *p;
|
ret = hwaccel->init(s);
|
||||||
break;
|
if (ret < 0) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL,
|
||||||
|
"%s hwaccel requested for input stream #%d:%d, "
|
||||||
|
"but cannot be initialized.\n", hwaccel->name,
|
||||||
|
ist->file_index, ist->st->index);
|
||||||
|
return AV_PIX_FMT_NONE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ist->hw_frames_ctx) {
|
||||||
|
s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
|
||||||
|
if (!s->hw_frames_ctx)
|
||||||
|
return AV_PIX_FMT_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
ist->hwaccel_pix_fmt = *p;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return *p;
|
return *p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||||
|
{
|
||||||
|
InputStream *ist = s->opaque;
|
||||||
|
|
||||||
|
if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
|
||||||
|
return ist->hwaccel_get_buffer(s, frame, flags);
|
||||||
|
|
||||||
|
return avcodec_default_get_buffer2(s, frame, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static int init_input_stream(int ist_index, char *error, int error_len)
|
static int init_input_stream(int ist_index, char *error, int error_len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -2878,10 +2961,9 @@ static int init_input_stream(int ist_index, char *error, int error_len)
|
|||||||
|
|
||||||
ist->dec_ctx->opaque = ist;
|
ist->dec_ctx->opaque = ist;
|
||||||
ist->dec_ctx->get_format = get_format;
|
ist->dec_ctx->get_format = get_format;
|
||||||
|
ist->dec_ctx->get_buffer2 = get_buffer;
|
||||||
#if LIBAVCODEC_VERSION_MAJOR < 60
|
#if LIBAVCODEC_VERSION_MAJOR < 60
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
|
||||||
ist->dec_ctx->thread_safe_callbacks = 1;
|
ist->dec_ctx->thread_safe_callbacks = 1;
|
||||||
FF_ENABLE_DEPRECATION_WARNINGS
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
|
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
|
||||||
@ -2891,6 +2973,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||||||
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
|
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
|
||||||
|
|
||||||
/* Useful for subtitles retiming by lavf (FIXME), skipping samples in
|
/* Useful for subtitles retiming by lavf (FIXME), skipping samples in
|
||||||
* audio, and video decoders such as cuvid or mediacodec */
|
* audio, and video decoders such as cuvid or mediacodec */
|
||||||
ist->dec_ctx->pkt_timebase = ist->st->time_base;
|
ist->dec_ctx->pkt_timebase = ist->st->time_base;
|
||||||
@ -2967,13 +3051,8 @@ static int check_init_output_file(OutputFile *of, int file_index)
|
|||||||
av_dump_format(of->ctx, file_index, of->ctx->url, 1);
|
av_dump_format(of->ctx, file_index, of->ctx->url, 1);
|
||||||
nb_output_dumped++;
|
nb_output_dumped++;
|
||||||
|
|
||||||
if (sdp_filename || want_sdp) {
|
if (sdp_filename || want_sdp)
|
||||||
ret = print_sdp();
|
print_sdp();
|
||||||
if (ret < 0) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error writing the SDP.\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* flush the muxing queues */
|
/* flush the muxing queues */
|
||||||
for (i = 0; i < of->ctx->nb_streams; i++) {
|
for (i = 0; i < of->ctx->nb_streams; i++) {
|
||||||
@ -3068,28 +3147,23 @@ static int init_output_stream_streamcopy(OutputStream *ost)
|
|||||||
|
|
||||||
if (!ost->frame_rate.num)
|
if (!ost->frame_rate.num)
|
||||||
ost->frame_rate = ist->framerate;
|
ost->frame_rate = ist->framerate;
|
||||||
|
ost->st->avg_frame_rate = ost->frame_rate;
|
||||||
if (ost->frame_rate.num)
|
|
||||||
ost->st->avg_frame_rate = ost->frame_rate;
|
|
||||||
else
|
|
||||||
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
|
|
||||||
|
|
||||||
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
|
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
// copy timebase while removing common factors
|
// copy timebase while removing common factors
|
||||||
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
|
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
|
||||||
if (ost->frame_rate.num)
|
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
|
||||||
ost->st->time_base = av_inv_q(ost->frame_rate);
|
|
||||||
else
|
|
||||||
ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy estimated duration as a hint to the muxer
|
// copy estimated duration as a hint to the muxer
|
||||||
if (ost->st->duration <= 0 && ist->st->duration > 0)
|
if (ost->st->duration <= 0 && ist->st->duration > 0)
|
||||||
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
|
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
|
||||||
|
|
||||||
|
// copy disposition
|
||||||
|
ost->st->disposition = ist->st->disposition;
|
||||||
|
|
||||||
if (ist->st->nb_side_data) {
|
if (ist->st->nb_side_data) {
|
||||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||||
const AVPacketSideData *sd_src = &ist->st->side_data[i];
|
const AVPacketSideData *sd_src = &ist->st->side_data[i];
|
||||||
@ -3145,7 +3219,7 @@ static int init_output_stream_streamcopy(OutputStream *ost)
|
|||||||
|
|
||||||
static void set_encoder_id(OutputFile *of, OutputStream *ost)
|
static void set_encoder_id(OutputFile *of, OutputStream *ost)
|
||||||
{
|
{
|
||||||
const AVDictionaryEntry *e;
|
AVDictionaryEntry *e;
|
||||||
|
|
||||||
uint8_t *encoder_string;
|
uint8_t *encoder_string;
|
||||||
int encoder_string_len;
|
int encoder_string_len;
|
||||||
@ -3277,17 +3351,40 @@ static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
|
|||||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||||
AVCodecContext *dec_ctx = NULL;
|
AVCodecContext *dec_ctx = NULL;
|
||||||
AVFormatContext *oc = output_files[ost->file_index]->ctx;
|
AVFormatContext *oc = output_files[ost->file_index]->ctx;
|
||||||
int ret;
|
int j, ret;
|
||||||
|
|
||||||
set_encoder_id(output_files[ost->file_index], ost);
|
set_encoder_id(output_files[ost->file_index], ost);
|
||||||
|
|
||||||
|
// Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
|
||||||
|
// hand, the legacy API makes demuxers set "rotate" metadata entries,
|
||||||
|
// which have to be filtered out to prevent leaking them to output files.
|
||||||
|
av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
|
||||||
|
|
||||||
if (ist) {
|
if (ist) {
|
||||||
|
ost->st->disposition = ist->st->disposition;
|
||||||
|
|
||||||
dec_ctx = ist->dec_ctx;
|
dec_ctx = ist->dec_ctx;
|
||||||
|
|
||||||
|
enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
|
||||||
|
} else {
|
||||||
|
for (j = 0; j < oc->nb_streams; j++) {
|
||||||
|
AVStream *st = oc->streams[j];
|
||||||
|
if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (j == oc->nb_streams)
|
||||||
|
if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
|
||||||
|
ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||||
|
ost->st->disposition = AV_DISPOSITION_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||||
if (!ost->frame_rate.num)
|
if (!ost->frame_rate.num)
|
||||||
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
|
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
|
||||||
|
if (ist && !ost->frame_rate.num)
|
||||||
|
ost->frame_rate = ist->framerate;
|
||||||
|
if (ist && !ost->frame_rate.num)
|
||||||
|
ost->frame_rate = ist->st->r_frame_rate;
|
||||||
if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
|
if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
|
||||||
ost->frame_rate = (AVRational){25, 1};
|
ost->frame_rate = (AVRational){25, 1};
|
||||||
av_log(NULL, AV_LOG_WARNING,
|
av_log(NULL, AV_LOG_WARNING,
|
||||||
@ -3317,16 +3414,13 @@ static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
|
|||||||
switch (enc_ctx->codec_type) {
|
switch (enc_ctx->codec_type) {
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
|
enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
|
||||||
|
if (dec_ctx)
|
||||||
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
|
||||||
|
av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
|
||||||
enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
|
enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
|
||||||
enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
|
enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
|
||||||
enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
|
enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
|
||||||
|
|
||||||
if (ost->bits_per_raw_sample)
|
|
||||||
enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
|
|
||||||
else if (dec_ctx && ost->filter->graph->is_meta)
|
|
||||||
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
|
|
||||||
av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
|
|
||||||
|
|
||||||
init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
|
init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -3349,10 +3443,7 @@ static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
|
|||||||
av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
|
av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
|
||||||
|
|
||||||
enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
|
enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
|
||||||
|
if (dec_ctx)
|
||||||
if (ost->bits_per_raw_sample)
|
|
||||||
enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
|
|
||||||
else if (dec_ctx && ost->filter->graph->is_meta)
|
|
||||||
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
|
||||||
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
|
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
|
||||||
|
|
||||||
@ -3368,6 +3459,13 @@ static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
|
|||||||
|
|
||||||
ost->st->avg_frame_rate = ost->frame_rate;
|
ost->st->avg_frame_rate = ost->frame_rate;
|
||||||
|
|
||||||
|
if (!dec_ctx ||
|
||||||
|
enc_ctx->width != dec_ctx->width ||
|
||||||
|
enc_ctx->height != dec_ctx->height ||
|
||||||
|
enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
|
||||||
|
enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
|
||||||
|
}
|
||||||
|
|
||||||
// Field order: autodetection
|
// Field order: autodetection
|
||||||
if (frame) {
|
if (frame) {
|
||||||
if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
|
if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
|
||||||
@ -3456,6 +3554,11 @@ static int init_output_stream(OutputStream *ost, AVFrame *frame,
|
|||||||
}
|
}
|
||||||
if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
|
if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
|
||||||
av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
|
av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
|
||||||
|
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
|
!codec->defaults &&
|
||||||
|
!av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
|
||||||
|
!av_dict_get(ost->encoder_opts, "ab", NULL, 0))
|
||||||
|
av_dict_set(&ost->encoder_opts, "b", "128000", 0);
|
||||||
|
|
||||||
ret = hw_device_setup_for_encode(ost);
|
ret = hw_device_setup_for_encode(ost);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -3558,6 +3661,40 @@ static int init_output_stream(OutputStream *ost, AVFrame *frame,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parse user provided disposition, and update stream values
|
||||||
|
if (ost->disposition) {
|
||||||
|
static const AVOption opts[] = {
|
||||||
|
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
||||||
|
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
|
||||||
|
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
|
||||||
|
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
|
||||||
|
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
|
||||||
|
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
|
||||||
|
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
|
||||||
|
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
|
||||||
|
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
|
||||||
|
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
|
||||||
|
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
|
||||||
|
{ "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
|
||||||
|
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
|
||||||
|
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
|
||||||
|
{ "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
|
||||||
|
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
|
||||||
|
{ NULL },
|
||||||
|
};
|
||||||
|
static const AVClass class = {
|
||||||
|
.class_name = "",
|
||||||
|
.item_name = av_default_item_name,
|
||||||
|
.option = opts,
|
||||||
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
|
};
|
||||||
|
const AVClass *pclass = &class;
|
||||||
|
|
||||||
|
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* initialize bitstream filters for the output stream
|
/* initialize bitstream filters for the output stream
|
||||||
* needs to be done here, because the codec id for streamcopy is not
|
* needs to be done here, because the codec id for streamcopy is not
|
||||||
* known until now */
|
* known until now */
|
||||||
@ -3615,7 +3752,7 @@ static int transcode_init(void)
|
|||||||
/* init framerate emulation */
|
/* init framerate emulation */
|
||||||
for (i = 0; i < nb_input_files; i++) {
|
for (i = 0; i < nb_input_files; i++) {
|
||||||
InputFile *ifile = input_files[i];
|
InputFile *ifile = input_files[i];
|
||||||
if (ifile->readrate || ifile->rate_emu)
|
if (ifile->rate_emu)
|
||||||
for (j = 0; j < ifile->nb_streams; j++)
|
for (j = 0; j < ifile->nb_streams; j++)
|
||||||
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
|
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
|
||||||
}
|
}
|
||||||
@ -3809,10 +3946,10 @@ static OutputStream *choose_output(void)
|
|||||||
|
|
||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
|
int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
|
||||||
av_rescale_q(ost->last_mux_dts, ost->st->time_base,
|
av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
||||||
AV_TIME_BASE_Q);
|
AV_TIME_BASE_Q);
|
||||||
if (ost->last_mux_dts == AV_NOPTS_VALUE)
|
if (ost->st->cur_dts == AV_NOPTS_VALUE)
|
||||||
av_log(NULL, AV_LOG_DEBUG,
|
av_log(NULL, AV_LOG_DEBUG,
|
||||||
"cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
|
"cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
|
||||||
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
|
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
|
||||||
@ -3852,10 +3989,8 @@ static int check_keyboard_interaction(int64_t cur_time)
|
|||||||
last_time = cur_time;
|
last_time = cur_time;
|
||||||
}else
|
}else
|
||||||
key = -1;
|
key = -1;
|
||||||
if (key == 'q') {
|
if (key == 'q')
|
||||||
av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
|
|
||||||
return AVERROR_EXIT;
|
return AVERROR_EXIT;
|
||||||
}
|
|
||||||
if (key == '+') av_log_set_level(av_log_get_level()+10);
|
if (key == '+') av_log_set_level(av_log_get_level()+10);
|
||||||
if (key == '-') av_log_set_level(av_log_get_level()-10);
|
if (key == '-') av_log_set_level(av_log_get_level()-10);
|
||||||
if (key == 's') qp_hist ^= 1;
|
if (key == 's') qp_hist ^= 1;
|
||||||
@ -4077,20 +4212,12 @@ static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
|
|||||||
|
|
||||||
static int get_input_packet(InputFile *f, AVPacket **pkt)
|
static int get_input_packet(InputFile *f, AVPacket **pkt)
|
||||||
{
|
{
|
||||||
if (f->readrate || f->rate_emu) {
|
if (f->rate_emu) {
|
||||||
int i;
|
int i;
|
||||||
int64_t file_start = copy_ts * (
|
|
||||||
(f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
|
|
||||||
(f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
|
|
||||||
);
|
|
||||||
float scale = f->rate_emu ? 1.0 : f->readrate;
|
|
||||||
for (i = 0; i < f->nb_streams; i++) {
|
for (i = 0; i < f->nb_streams; i++) {
|
||||||
InputStream *ist = input_streams[f->ist_index + i];
|
InputStream *ist = input_streams[f->ist_index + i];
|
||||||
int64_t stream_ts_offset, pts, now;
|
int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
|
||||||
if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
|
int64_t now = av_gettime_relative() - ist->start;
|
||||||
stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
|
|
||||||
pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
|
|
||||||
now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
|
|
||||||
if (pts > now)
|
if (pts > now)
|
||||||
return AVERROR(EAGAIN);
|
return AVERROR(EAGAIN);
|
||||||
}
|
}
|
||||||
@ -4699,7 +4826,7 @@ static int transcode(void)
|
|||||||
|
|
||||||
term_exit();
|
term_exit();
|
||||||
|
|
||||||
/* write the trailer if needed */
|
/* write the trailer if needed and close file */
|
||||||
for (i = 0; i < nb_output_files; i++) {
|
for (i = 0; i < nb_output_files; i++) {
|
||||||
os = output_files[i]->ctx;
|
os = output_files[i]->ctx;
|
||||||
if (!output_files[i]->header_written) {
|
if (!output_files[i]->header_written) {
|
||||||
@ -4719,18 +4846,6 @@ static int transcode(void)
|
|||||||
/* dump report by using the first video and audio streams */
|
/* dump report by using the first video and audio streams */
|
||||||
print_report(1, timer_start, av_gettime_relative());
|
print_report(1, timer_start, av_gettime_relative());
|
||||||
|
|
||||||
/* close the output files */
|
|
||||||
for (i = 0; i < nb_output_files; i++) {
|
|
||||||
os = output_files[i]->ctx;
|
|
||||||
if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
|
|
||||||
if ((ret = avio_closep(&os->pb)) < 0) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
|
|
||||||
if (exit_on_error)
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* close each encoder */
|
/* close each encoder */
|
||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
ost = output_streams[i];
|
ost = output_streams[i];
|
||||||
@ -4786,6 +4901,7 @@ static int transcode(void)
|
|||||||
av_dict_free(&ost->encoder_opts);
|
av_dict_free(&ost->encoder_opts);
|
||||||
av_dict_free(&ost->sws_dict);
|
av_dict_free(&ost->sws_dict);
|
||||||
av_dict_free(&ost->swr_opts);
|
av_dict_free(&ost->swr_opts);
|
||||||
|
av_dict_free(&ost->resample_opts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
#include "libavformat/avio.h"
|
#include "libavformat/avio.h"
|
||||||
|
|
||||||
#include "libavcodec/avcodec.h"
|
#include "libavcodec/avcodec.h"
|
||||||
#include "libavcodec/bsf.h"
|
|
||||||
|
|
||||||
#include "libavfilter/avfilter.h"
|
#include "libavfilter/avfilter.h"
|
||||||
|
|
||||||
@ -47,14 +46,12 @@
|
|||||||
|
|
||||||
#include "libswresample/swresample.h"
|
#include "libswresample/swresample.h"
|
||||||
|
|
||||||
enum VideoSyncMethod {
|
#define VSYNC_AUTO -1
|
||||||
VSYNC_AUTO = -1,
|
#define VSYNC_PASSTHROUGH 0
|
||||||
VSYNC_PASSTHROUGH,
|
#define VSYNC_CFR 1
|
||||||
VSYNC_CFR,
|
#define VSYNC_VFR 2
|
||||||
VSYNC_VFR,
|
#define VSYNC_VSCFR 0xfe
|
||||||
VSYNC_VSCFR,
|
#define VSYNC_DROP 0xff
|
||||||
VSYNC_DROP,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||||
|
|
||||||
@ -62,8 +59,17 @@ enum HWAccelID {
|
|||||||
HWACCEL_NONE = 0,
|
HWACCEL_NONE = 0,
|
||||||
HWACCEL_AUTO,
|
HWACCEL_AUTO,
|
||||||
HWACCEL_GENERIC,
|
HWACCEL_GENERIC,
|
||||||
|
HWACCEL_VIDEOTOOLBOX,
|
||||||
|
HWACCEL_QSV,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct HWAccel {
|
||||||
|
const char *name;
|
||||||
|
int (*init)(AVCodecContext *s);
|
||||||
|
enum HWAccelID id;
|
||||||
|
enum AVPixelFormat pix_fmt;
|
||||||
|
} HWAccel;
|
||||||
|
|
||||||
typedef struct HWDevice {
|
typedef struct HWDevice {
|
||||||
const char *name;
|
const char *name;
|
||||||
enum AVHWDeviceType type;
|
enum AVHWDeviceType type;
|
||||||
@ -113,7 +119,6 @@ typedef struct OptionsContext {
|
|||||||
int64_t input_ts_offset;
|
int64_t input_ts_offset;
|
||||||
int loop;
|
int loop;
|
||||||
int rate_emu;
|
int rate_emu;
|
||||||
float readrate;
|
|
||||||
int accurate_seek;
|
int accurate_seek;
|
||||||
int thread_queue_size;
|
int thread_queue_size;
|
||||||
|
|
||||||
@ -230,8 +235,6 @@ typedef struct OptionsContext {
|
|||||||
int nb_enc_time_bases;
|
int nb_enc_time_bases;
|
||||||
SpecifierOpt *autoscale;
|
SpecifierOpt *autoscale;
|
||||||
int nb_autoscale;
|
int nb_autoscale;
|
||||||
SpecifierOpt *bits_per_raw_sample;
|
|
||||||
int nb_bits_per_raw_sample;
|
|
||||||
} OptionsContext;
|
} OptionsContext;
|
||||||
|
|
||||||
typedef struct InputFilter {
|
typedef struct InputFilter {
|
||||||
@ -254,7 +257,6 @@ typedef struct InputFilter {
|
|||||||
uint64_t channel_layout;
|
uint64_t channel_layout;
|
||||||
|
|
||||||
AVBufferRef *hw_frames_ctx;
|
AVBufferRef *hw_frames_ctx;
|
||||||
int32_t *displaymatrix;
|
|
||||||
|
|
||||||
int eof;
|
int eof;
|
||||||
} InputFilter;
|
} InputFilter;
|
||||||
@ -277,10 +279,9 @@ typedef struct OutputFilter {
|
|||||||
uint64_t channel_layout;
|
uint64_t channel_layout;
|
||||||
|
|
||||||
// those are only set if no format is specified and the encoder gives us multiple options
|
// those are only set if no format is specified and the encoder gives us multiple options
|
||||||
// They point directly to the relevant lists of the encoder.
|
int *formats;
|
||||||
const int *formats;
|
uint64_t *channel_layouts;
|
||||||
const uint64_t *channel_layouts;
|
int *sample_rates;
|
||||||
const int *sample_rates;
|
|
||||||
} OutputFilter;
|
} OutputFilter;
|
||||||
|
|
||||||
typedef struct FilterGraph {
|
typedef struct FilterGraph {
|
||||||
@ -289,9 +290,6 @@ typedef struct FilterGraph {
|
|||||||
|
|
||||||
AVFilterGraph *graph;
|
AVFilterGraph *graph;
|
||||||
int reconfiguration;
|
int reconfiguration;
|
||||||
// true when the filtergraph contains only meta filters
|
|
||||||
// that do not modify the frame data
|
|
||||||
int is_meta;
|
|
||||||
|
|
||||||
InputFilter **inputs;
|
InputFilter **inputs;
|
||||||
int nb_inputs;
|
int nb_inputs;
|
||||||
@ -311,14 +309,13 @@ typedef struct InputStream {
|
|||||||
AVCodecContext *dec_ctx;
|
AVCodecContext *dec_ctx;
|
||||||
const AVCodec *dec;
|
const AVCodec *dec;
|
||||||
AVFrame *decoded_frame;
|
AVFrame *decoded_frame;
|
||||||
|
AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
|
||||||
AVPacket *pkt;
|
AVPacket *pkt;
|
||||||
|
|
||||||
int64_t prev_pkt_pts;
|
|
||||||
int64_t start; /* time when read started */
|
int64_t start; /* time when read started */
|
||||||
/* predicted dts of the next packet read for this stream or (when there are
|
/* predicted dts of the next packet read for this stream or (when there are
|
||||||
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
|
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
|
||||||
int64_t next_dts;
|
int64_t next_dts;
|
||||||
int64_t first_dts; ///< dts of the first packet read for this stream (in AV_TIME_BASE units)
|
|
||||||
int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
|
int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
|
||||||
|
|
||||||
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
|
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
|
||||||
@ -361,6 +358,8 @@ typedef struct InputStream {
|
|||||||
unsigned int initialize; ///< marks if sub2video_update should force an initialization
|
unsigned int initialize; ///< marks if sub2video_update should force an initialization
|
||||||
} sub2video;
|
} sub2video;
|
||||||
|
|
||||||
|
int dr1;
|
||||||
|
|
||||||
/* decoded data from this stream goes into all those filters
|
/* decoded data from this stream goes into all those filters
|
||||||
* currently video and audio only */
|
* currently video and audio only */
|
||||||
InputFilter **filters;
|
InputFilter **filters;
|
||||||
@ -377,9 +376,11 @@ typedef struct InputStream {
|
|||||||
/* hwaccel context */
|
/* hwaccel context */
|
||||||
void *hwaccel_ctx;
|
void *hwaccel_ctx;
|
||||||
void (*hwaccel_uninit)(AVCodecContext *s);
|
void (*hwaccel_uninit)(AVCodecContext *s);
|
||||||
|
int (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags);
|
||||||
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
|
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
|
||||||
enum AVPixelFormat hwaccel_pix_fmt;
|
enum AVPixelFormat hwaccel_pix_fmt;
|
||||||
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
|
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
|
||||||
|
AVBufferRef *hw_frames_ctx;
|
||||||
|
|
||||||
/* stats */
|
/* stats */
|
||||||
// combined size of all the packets read
|
// combined size of all the packets read
|
||||||
@ -410,12 +411,12 @@ typedef struct InputFile {
|
|||||||
int64_t ts_offset;
|
int64_t ts_offset;
|
||||||
int64_t last_ts;
|
int64_t last_ts;
|
||||||
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
|
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
|
||||||
|
int seek_timestamp;
|
||||||
int64_t recording_time;
|
int64_t recording_time;
|
||||||
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
||||||
from ctx.nb_streams if new streams appear during av_read_frame() */
|
from ctx.nb_streams if new streams appear during av_read_frame() */
|
||||||
int nb_streams_warn; /* number of streams that the user was warned of */
|
int nb_streams_warn; /* number of streams that the user was warned of */
|
||||||
int rate_emu;
|
int rate_emu;
|
||||||
float readrate;
|
|
||||||
int accurate_seek;
|
int accurate_seek;
|
||||||
|
|
||||||
AVPacket *pkt;
|
AVPacket *pkt;
|
||||||
@ -485,13 +486,11 @@ typedef struct OutputStream {
|
|||||||
/* video only */
|
/* video only */
|
||||||
AVRational frame_rate;
|
AVRational frame_rate;
|
||||||
AVRational max_frame_rate;
|
AVRational max_frame_rate;
|
||||||
enum VideoSyncMethod vsync_method;
|
|
||||||
int is_cfr;
|
int is_cfr;
|
||||||
int force_fps;
|
int force_fps;
|
||||||
int top_field_first;
|
int top_field_first;
|
||||||
int rotate_overridden;
|
int rotate_overridden;
|
||||||
int autoscale;
|
int autoscale;
|
||||||
int bits_per_raw_sample;
|
|
||||||
double rotate_override_value;
|
double rotate_override_value;
|
||||||
|
|
||||||
AVRational frame_aspect_ratio;
|
AVRational frame_aspect_ratio;
|
||||||
@ -504,7 +503,6 @@ typedef struct OutputStream {
|
|||||||
char *forced_keyframes;
|
char *forced_keyframes;
|
||||||
AVExpr *forced_keyframes_pexpr;
|
AVExpr *forced_keyframes_pexpr;
|
||||||
double forced_keyframes_expr_const_values[FKF_NB];
|
double forced_keyframes_expr_const_values[FKF_NB];
|
||||||
int dropped_keyframe;
|
|
||||||
|
|
||||||
/* audio only */
|
/* audio only */
|
||||||
int *audio_channels_map; /* list of the channels id to pick from the source stream */
|
int *audio_channels_map; /* list of the channels id to pick from the source stream */
|
||||||
@ -521,6 +519,7 @@ typedef struct OutputStream {
|
|||||||
AVDictionary *encoder_opts;
|
AVDictionary *encoder_opts;
|
||||||
AVDictionary *sws_dict;
|
AVDictionary *sws_dict;
|
||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
|
AVDictionary *resample_opts;
|
||||||
char *apad;
|
char *apad;
|
||||||
OSTFinished finished; /* no more packets should be written for this stream */
|
OSTFinished finished; /* no more packets should be written for this stream */
|
||||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||||
@ -608,7 +607,7 @@ extern float dts_error_threshold;
|
|||||||
|
|
||||||
extern int audio_volume;
|
extern int audio_volume;
|
||||||
extern int audio_sync_method;
|
extern int audio_sync_method;
|
||||||
extern enum VideoSyncMethod video_sync_method;
|
extern int video_sync_method;
|
||||||
extern float frame_drop_threshold;
|
extern float frame_drop_threshold;
|
||||||
extern int do_benchmark;
|
extern int do_benchmark;
|
||||||
extern int do_benchmark_all;
|
extern int do_benchmark_all;
|
||||||
@ -628,8 +627,9 @@ extern int stdin_interaction;
|
|||||||
extern int frame_bits_per_raw_sample;
|
extern int frame_bits_per_raw_sample;
|
||||||
extern AVIOContext *progress_avio;
|
extern AVIOContext *progress_avio;
|
||||||
extern float max_error_rate;
|
extern float max_error_rate;
|
||||||
|
extern char *videotoolbox_pixfmt;
|
||||||
|
|
||||||
extern char *filter_nbthreads;
|
extern int filter_nbthreads;
|
||||||
extern int filter_complex_nbthreads;
|
extern int filter_complex_nbthreads;
|
||||||
extern int vstats_version;
|
extern int vstats_version;
|
||||||
extern int auto_conversion_filters;
|
extern int auto_conversion_filters;
|
||||||
@ -637,6 +637,7 @@ extern int auto_conversion_filters;
|
|||||||
extern const AVIOInterruptCB int_cb;
|
extern const AVIOInterruptCB int_cb;
|
||||||
|
|
||||||
extern const OptionDef options[];
|
extern const OptionDef options[];
|
||||||
|
extern const HWAccel hwaccels[];
|
||||||
#if CONFIG_QSV
|
#if CONFIG_QSV
|
||||||
extern char *qsv_device;
|
extern char *qsv_device;
|
||||||
#endif
|
#endif
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
#include "libavfilter/buffersink.h"
|
#include "libavfilter/buffersink.h"
|
||||||
#include "libavfilter/buffersrc.h"
|
#include "libavfilter/buffersrc.h"
|
||||||
|
|
||||||
|
#include "libavresample/avresample.h"
|
||||||
|
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/bprint.h"
|
#include "libavutil/bprint.h"
|
||||||
@ -37,16 +39,22 @@
|
|||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/samplefmt.h"
|
#include "libavutil/samplefmt.h"
|
||||||
|
|
||||||
// FIXME: YUV420P etc. are actually supported with full color range,
|
static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
|
||||||
// yet the latter information isn't available here.
|
|
||||||
static const enum AVPixelFormat *get_compliance_normal_pix_fmts(const AVCodec *codec, const enum AVPixelFormat default_formats[])
|
|
||||||
{
|
{
|
||||||
static const enum AVPixelFormat mjpeg_formats[] =
|
static const enum AVPixelFormat mjpeg_formats[] =
|
||||||
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
|
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
|
||||||
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
|
||||||
AV_PIX_FMT_NONE };
|
AV_PIX_FMT_NONE };
|
||||||
|
static const enum AVPixelFormat ljpeg_formats[] =
|
||||||
|
{ AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
|
||||||
|
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
|
||||||
|
AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
|
||||||
|
AV_PIX_FMT_NONE};
|
||||||
|
|
||||||
if (!strcmp(codec->name, "mjpeg")) {
|
if (codec_id == AV_CODEC_ID_MJPEG) {
|
||||||
return mjpeg_formats;
|
return mjpeg_formats;
|
||||||
|
} else if (codec_id == AV_CODEC_ID_LJPEG) {
|
||||||
|
return ljpeg_formats;
|
||||||
} else {
|
} else {
|
||||||
return default_formats;
|
return default_formats;
|
||||||
}
|
}
|
||||||
@ -62,8 +70,8 @@ static enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx
|
|||||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||||
|
|
||||||
if (enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
|
if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||||
p = get_compliance_normal_pix_fmts(codec, p);
|
p = get_compliance_unofficial_pix_fmts(enc_ctx->codec_id, p);
|
||||||
}
|
}
|
||||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||||
best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
|
best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
|
||||||
@ -83,13 +91,10 @@ static enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx
|
|||||||
return target;
|
return target;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* May return NULL (no pixel format found), a static string or a string
|
static char *choose_pix_fmts(OutputFilter *ofilter)
|
||||||
* backed by the bprint. Nothing has been written to the AVBPrint in case
|
|
||||||
* NULL is returned. The AVBPrint provided should be clean. */
|
|
||||||
static const char *choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint)
|
|
||||||
{
|
{
|
||||||
OutputStream *ost = ofilter->ost;
|
OutputStream *ost = ofilter->ost;
|
||||||
const AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
|
AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
|
||||||
if (strict_dict)
|
if (strict_dict)
|
||||||
// used by choose_pixel_fmt() and below
|
// used by choose_pixel_fmt() and below
|
||||||
av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
|
av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
|
||||||
@ -99,91 +104,105 @@ static const char *choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint)
|
|||||||
AVFILTER_AUTO_CONVERT_NONE);
|
AVFILTER_AUTO_CONVERT_NONE);
|
||||||
if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
|
if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
|
||||||
return NULL;
|
return NULL;
|
||||||
return av_get_pix_fmt_name(ost->enc_ctx->pix_fmt);
|
return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
|
||||||
}
|
}
|
||||||
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
|
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
|
||||||
return av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt));
|
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
|
||||||
} else if (ost->enc && ost->enc->pix_fmts) {
|
} else if (ost->enc && ost->enc->pix_fmts) {
|
||||||
const enum AVPixelFormat *p;
|
const enum AVPixelFormat *p;
|
||||||
|
AVIOContext *s = NULL;
|
||||||
|
uint8_t *ret;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (avio_open_dyn_buf(&s) < 0)
|
||||||
|
exit_program(1);
|
||||||
|
|
||||||
p = ost->enc->pix_fmts;
|
p = ost->enc->pix_fmts;
|
||||||
if (ost->enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
|
if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||||
p = get_compliance_normal_pix_fmts(ost->enc, p);
|
p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||||
const char *name = av_get_pix_fmt_name(*p);
|
const char *name = av_get_pix_fmt_name(*p);
|
||||||
av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
|
avio_printf(s, "%s|", name);
|
||||||
}
|
}
|
||||||
if (!av_bprint_is_complete(bprint))
|
len = avio_close_dyn_buf(s, &ret);
|
||||||
exit_program(1);
|
ret[len - 1] = 0;
|
||||||
return bprint->str;
|
return ret;
|
||||||
} else
|
} else
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Define a function for appending a list of allowed formats
|
/* Define a function for building a string containing a list of
|
||||||
* to an AVBPrint. If nonempty, the list will have a header. */
|
* allowed formats. */
|
||||||
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
|
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
|
||||||
static void choose_ ## name (OutputFilter *ofilter, AVBPrint *bprint) \
|
static char *choose_ ## suffix (OutputFilter *ofilter) \
|
||||||
{ \
|
{ \
|
||||||
if (ofilter->var == none && !ofilter->supported_list) \
|
|
||||||
return; \
|
|
||||||
av_bprintf(bprint, #name "="); \
|
|
||||||
if (ofilter->var != none) { \
|
if (ofilter->var != none) { \
|
||||||
av_bprintf(bprint, printf_format, get_name(ofilter->var)); \
|
get_name(ofilter->var); \
|
||||||
} else { \
|
return av_strdup(name); \
|
||||||
|
} else if (ofilter->supported_list) { \
|
||||||
const type *p; \
|
const type *p; \
|
||||||
|
AVIOContext *s = NULL; \
|
||||||
|
uint8_t *ret; \
|
||||||
|
int len; \
|
||||||
|
\
|
||||||
|
if (avio_open_dyn_buf(&s) < 0) \
|
||||||
|
exit_program(1); \
|
||||||
\
|
\
|
||||||
for (p = ofilter->supported_list; *p != none; p++) { \
|
for (p = ofilter->supported_list; *p != none; p++) { \
|
||||||
av_bprintf(bprint, printf_format "|", get_name(*p)); \
|
get_name(*p); \
|
||||||
|
avio_printf(s, "%s|", name); \
|
||||||
} \
|
} \
|
||||||
if (bprint->len > 0) \
|
len = avio_close_dyn_buf(s, &ret); \
|
||||||
bprint->str[--bprint->len] = '\0'; \
|
ret[len - 1] = 0; \
|
||||||
} \
|
return ret; \
|
||||||
av_bprint_chars(bprint, ':', 1); \
|
} else \
|
||||||
|
return NULL; \
|
||||||
}
|
}
|
||||||
|
|
||||||
//DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
|
//DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
|
||||||
// GET_PIX_FMT_NAME)
|
// GET_PIX_FMT_NAME)
|
||||||
|
|
||||||
DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
|
DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
|
||||||
AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
|
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME)
|
||||||
|
|
||||||
DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
|
DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
|
||||||
"%d", )
|
GET_SAMPLE_RATE_NAME)
|
||||||
|
|
||||||
DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
|
DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
|
||||||
"0x%"PRIx64, )
|
GET_CH_LAYOUT_NAME)
|
||||||
|
|
||||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
|
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
|
||||||
{
|
{
|
||||||
FilterGraph *fg = av_mallocz(sizeof(*fg));
|
FilterGraph *fg = av_mallocz(sizeof(*fg));
|
||||||
OutputFilter *ofilter;
|
|
||||||
InputFilter *ifilter;
|
|
||||||
|
|
||||||
if (!fg)
|
if (!fg)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
fg->index = nb_filtergraphs;
|
fg->index = nb_filtergraphs;
|
||||||
|
|
||||||
ofilter = ALLOC_ARRAY_ELEM(fg->outputs, fg->nb_outputs);
|
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||||
ofilter->ost = ost;
|
if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||||
ofilter->graph = fg;
|
exit_program(1);
|
||||||
ofilter->format = -1;
|
fg->outputs[0]->ost = ost;
|
||||||
|
fg->outputs[0]->graph = fg;
|
||||||
|
fg->outputs[0]->format = -1;
|
||||||
|
|
||||||
ost->filter = ofilter;
|
ost->filter = fg->outputs[0];
|
||||||
|
|
||||||
ifilter = ALLOC_ARRAY_ELEM(fg->inputs, fg->nb_inputs);
|
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||||
ifilter->ist = ist;
|
if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||||
ifilter->graph = fg;
|
exit_program(1);
|
||||||
ifilter->format = -1;
|
fg->inputs[0]->ist = ist;
|
||||||
|
fg->inputs[0]->graph = fg;
|
||||||
|
fg->inputs[0]->format = -1;
|
||||||
|
|
||||||
ifilter->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
|
fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
|
||||||
if (!ifilter->frame_queue)
|
if (!fg->inputs[0]->frame_queue)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
GROW_ARRAY(ist->filters, ist->nb_filters);
|
GROW_ARRAY(ist->filters, ist->nb_filters);
|
||||||
ist->filters[ist->nb_filters - 1] = ifilter;
|
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
|
||||||
|
|
||||||
GROW_ARRAY(filtergraphs, nb_filtergraphs);
|
GROW_ARRAY(filtergraphs, nb_filtergraphs);
|
||||||
filtergraphs[nb_filtergraphs - 1] = fg;
|
filtergraphs[nb_filtergraphs - 1] = fg;
|
||||||
@ -196,15 +215,17 @@ static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
|
|||||||
AVFilterContext *ctx = inout->filter_ctx;
|
AVFilterContext *ctx = inout->filter_ctx;
|
||||||
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
|
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
|
||||||
int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
|
int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
|
||||||
char *res;
|
AVIOContext *pb;
|
||||||
|
uint8_t *res = NULL;
|
||||||
|
|
||||||
if (nb_pads > 1)
|
if (avio_open_dyn_buf(&pb) < 0)
|
||||||
res = av_strdup(ctx->filter->name);
|
|
||||||
else
|
|
||||||
res = av_asprintf("%s:%s", ctx->filter->name,
|
|
||||||
avfilter_pad_get_name(pads, inout->pad_idx));
|
|
||||||
if (!res)
|
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
|
avio_printf(pb, "%s", ctx->filter->name);
|
||||||
|
if (nb_pads > 1)
|
||||||
|
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
|
||||||
|
avio_w8(pb, 0);
|
||||||
|
avio_close_dyn_buf(pb, &res);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +233,6 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
|||||||
{
|
{
|
||||||
InputStream *ist = NULL;
|
InputStream *ist = NULL;
|
||||||
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
|
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
|
||||||
InputFilter *ifilter;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
// TODO: support other filter types
|
// TODO: support other filter types
|
||||||
@ -279,19 +299,21 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
|||||||
ist->decoding_needed |= DECODING_FOR_FILTER;
|
ist->decoding_needed |= DECODING_FOR_FILTER;
|
||||||
ist->st->discard = AVDISCARD_NONE;
|
ist->st->discard = AVDISCARD_NONE;
|
||||||
|
|
||||||
ifilter = ALLOC_ARRAY_ELEM(fg->inputs, fg->nb_inputs);
|
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||||
ifilter->ist = ist;
|
if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||||
ifilter->graph = fg;
|
exit_program(1);
|
||||||
ifilter->format = -1;
|
fg->inputs[fg->nb_inputs - 1]->ist = ist;
|
||||||
ifilter->type = ist->st->codecpar->codec_type;
|
fg->inputs[fg->nb_inputs - 1]->graph = fg;
|
||||||
ifilter->name = describe_filter_link(fg, in, 1);
|
fg->inputs[fg->nb_inputs - 1]->format = -1;
|
||||||
|
fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
|
||||||
|
fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
|
||||||
|
|
||||||
ifilter->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
|
fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
|
||||||
if (!ifilter->frame_queue)
|
if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
GROW_ARRAY(ist->filters, ist->nb_filters);
|
GROW_ARRAY(ist->filters, ist->nb_filters);
|
||||||
ist->filters[ist->nb_filters - 1] = ifilter;
|
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
int init_complex_filtergraph(FilterGraph *fg)
|
int init_complex_filtergraph(FilterGraph *fg)
|
||||||
@ -315,15 +337,18 @@ int init_complex_filtergraph(FilterGraph *fg)
|
|||||||
init_input_filter(fg, cur);
|
init_input_filter(fg, cur);
|
||||||
|
|
||||||
for (cur = outputs; cur;) {
|
for (cur = outputs; cur;) {
|
||||||
OutputFilter *const ofilter = ALLOC_ARRAY_ELEM(fg->outputs, fg->nb_outputs);
|
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||||
|
fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
|
||||||
|
if (!fg->outputs[fg->nb_outputs - 1])
|
||||||
|
exit_program(1);
|
||||||
|
|
||||||
ofilter->graph = fg;
|
fg->outputs[fg->nb_outputs - 1]->graph = fg;
|
||||||
ofilter->out_tmp = cur;
|
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
|
||||||
ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
|
fg->outputs[fg->nb_outputs - 1]->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
|
||||||
cur->pad_idx);
|
cur->pad_idx);
|
||||||
ofilter->name = describe_filter_link(fg, cur, 0);
|
fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
|
||||||
cur = cur->next;
|
cur = cur->next;
|
||||||
ofilter->out_tmp->next = NULL;
|
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
@ -407,13 +432,12 @@ static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
|
|||||||
|
|
||||||
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||||
{
|
{
|
||||||
|
char *pix_fmts;
|
||||||
OutputStream *ost = ofilter->ost;
|
OutputStream *ost = ofilter->ost;
|
||||||
OutputFile *of = output_files[ost->file_index];
|
OutputFile *of = output_files[ost->file_index];
|
||||||
AVFilterContext *last_filter = out->filter_ctx;
|
AVFilterContext *last_filter = out->filter_ctx;
|
||||||
AVBPrint bprint;
|
|
||||||
int pad_idx = out->pad_idx;
|
int pad_idx = out->pad_idx;
|
||||||
int ret;
|
int ret;
|
||||||
const char *pix_fmts;
|
|
||||||
char name[255];
|
char name[255];
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
|
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
|
||||||
@ -427,7 +451,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
|
if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
|
||||||
char args[255];
|
char args[255];
|
||||||
AVFilterContext *filter;
|
AVFilterContext *filter;
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
|
|
||||||
snprintf(args, sizeof(args), "%d:%d",
|
snprintf(args, sizeof(args), "%d:%d",
|
||||||
ofilter->width, ofilter->height);
|
ofilter->width, ofilter->height);
|
||||||
@ -449,14 +473,14 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
pad_idx = 0;
|
pad_idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
|
if ((pix_fmts = choose_pix_fmts(ofilter))) {
|
||||||
if ((pix_fmts = choose_pix_fmts(ofilter, &bprint))) {
|
|
||||||
AVFilterContext *filter;
|
AVFilterContext *filter;
|
||||||
|
snprintf(name, sizeof(name), "format_out_%d_%d",
|
||||||
|
ost->file_index, ost->index);
|
||||||
ret = avfilter_graph_create_filter(&filter,
|
ret = avfilter_graph_create_filter(&filter,
|
||||||
avfilter_get_by_name("format"),
|
avfilter_get_by_name("format"),
|
||||||
"format", pix_fmts, NULL, fg->graph);
|
"format", pix_fmts, NULL, fg->graph);
|
||||||
av_bprint_finalize(&bprint, NULL);
|
av_freep(&pix_fmts);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
||||||
@ -507,7 +531,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
AVCodecContext *codec = ost->enc_ctx;
|
AVCodecContext *codec = ost->enc_ctx;
|
||||||
AVFilterContext *last_filter = out->filter_ctx;
|
AVFilterContext *last_filter = out->filter_ctx;
|
||||||
int pad_idx = out->pad_idx;
|
int pad_idx = out->pad_idx;
|
||||||
AVBPrint args;
|
char *sample_fmts, *sample_rates, *channel_layouts;
|
||||||
char name[255];
|
char name[255];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -530,58 +554,72 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
avfilter_get_by_name(filter_name), \
|
avfilter_get_by_name(filter_name), \
|
||||||
filter_name, arg, NULL, fg->graph); \
|
filter_name, arg, NULL, fg->graph); \
|
||||||
if (ret < 0) \
|
if (ret < 0) \
|
||||||
goto fail; \
|
return ret; \
|
||||||
\
|
\
|
||||||
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
|
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
|
||||||
if (ret < 0) \
|
if (ret < 0) \
|
||||||
goto fail; \
|
return ret; \
|
||||||
\
|
\
|
||||||
last_filter = filt_ctx; \
|
last_filter = filt_ctx; \
|
||||||
pad_idx = 0; \
|
pad_idx = 0; \
|
||||||
} while (0)
|
} while (0)
|
||||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
|
|
||||||
if (ost->audio_channels_mapped) {
|
if (ost->audio_channels_mapped) {
|
||||||
int i;
|
int i;
|
||||||
av_bprintf(&args, "0x%"PRIx64,
|
AVBPrint pan_buf;
|
||||||
|
av_bprint_init(&pan_buf, 256, 8192);
|
||||||
|
av_bprintf(&pan_buf, "0x%"PRIx64,
|
||||||
av_get_default_channel_layout(ost->audio_channels_mapped));
|
av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||||
for (i = 0; i < ost->audio_channels_mapped; i++)
|
for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||||
if (ost->audio_channels_map[i] != -1)
|
if (ost->audio_channels_map[i] != -1)
|
||||||
av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
|
av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
|
||||||
|
|
||||||
AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
|
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
|
||||||
av_bprint_clear(&args);
|
av_bprint_finalize(&pan_buf, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (codec->channels && !codec->channel_layout)
|
if (codec->channels && !codec->channel_layout)
|
||||||
codec->channel_layout = av_get_default_channel_layout(codec->channels);
|
codec->channel_layout = av_get_default_channel_layout(codec->channels);
|
||||||
|
|
||||||
choose_sample_fmts(ofilter, &args);
|
sample_fmts = choose_sample_fmts(ofilter);
|
||||||
choose_sample_rates(ofilter, &args);
|
sample_rates = choose_sample_rates(ofilter);
|
||||||
choose_channel_layouts(ofilter, &args);
|
channel_layouts = choose_channel_layouts(ofilter);
|
||||||
if (!av_bprint_is_complete(&args)) {
|
if (sample_fmts || sample_rates || channel_layouts) {
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
if (args.len) {
|
|
||||||
AVFilterContext *format;
|
AVFilterContext *format;
|
||||||
|
char args[256];
|
||||||
|
args[0] = 0;
|
||||||
|
|
||||||
|
if (sample_fmts)
|
||||||
|
av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
|
||||||
|
sample_fmts);
|
||||||
|
if (sample_rates)
|
||||||
|
av_strlcatf(args, sizeof(args), "sample_rates=%s:",
|
||||||
|
sample_rates);
|
||||||
|
if (channel_layouts)
|
||||||
|
av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
|
||||||
|
channel_layouts);
|
||||||
|
|
||||||
|
av_freep(&sample_fmts);
|
||||||
|
av_freep(&sample_rates);
|
||||||
|
av_freep(&channel_layouts);
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "format_out_%d_%d",
|
snprintf(name, sizeof(name), "format_out_%d_%d",
|
||||||
ost->file_index, ost->index);
|
ost->file_index, ost->index);
|
||||||
ret = avfilter_graph_create_filter(&format,
|
ret = avfilter_graph_create_filter(&format,
|
||||||
avfilter_get_by_name("aformat"),
|
avfilter_get_by_name("aformat"),
|
||||||
name, args.str, NULL, fg->graph);
|
name, args, NULL, fg->graph);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
return ret;
|
||||||
|
|
||||||
ret = avfilter_link(last_filter, pad_idx, format, 0);
|
ret = avfilter_link(last_filter, pad_idx, format, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
return ret;
|
||||||
|
|
||||||
last_filter = format;
|
last_filter = format;
|
||||||
pad_idx = 0;
|
pad_idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ost->apad && of->shortest) {
|
if (ost->apad && of->shortest) {
|
||||||
|
char args[256];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i=0; i<of->ctx->nb_streams; i++)
|
for (i=0; i<of->ctx->nb_streams; i++)
|
||||||
@ -589,7 +627,8 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (i<of->ctx->nb_streams) {
|
if (i<of->ctx->nb_streams) {
|
||||||
AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
|
snprintf(args, sizeof(args), "%s", ost->apad);
|
||||||
|
AUTO_INSERT_FILTER("-apad", "apad", args);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -598,14 +637,12 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
ret = insert_trim(of->start_time, of->recording_time,
|
ret = insert_trim(of->start_time, of->recording_time,
|
||||||
&last_filter, &pad_idx, name);
|
&last_filter, &pad_idx, name);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
return ret;
|
||||||
|
|
||||||
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
||||||
goto fail;
|
return ret;
|
||||||
fail:
|
|
||||||
av_bprint_finalize(&args, NULL);
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
|
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||||
@ -619,7 +656,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
||||||
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
||||||
default: av_assert0(0); return 0;
|
default: av_assert0(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -690,7 +727,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
{
|
{
|
||||||
AVFilterContext *last_filter;
|
AVFilterContext *last_filter;
|
||||||
const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
|
const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
|
||||||
const AVPixFmtDescriptor *desc;
|
|
||||||
InputStream *ist = ifilter->ist;
|
InputStream *ist = ifilter->ist;
|
||||||
InputFile *f = input_files[ist->file_index];
|
InputFile *f = input_files[ist->file_index];
|
||||||
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
|
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
|
||||||
@ -748,46 +784,44 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
av_freep(&par);
|
av_freep(&par);
|
||||||
last_filter = ifilter->filter;
|
last_filter = ifilter->filter;
|
||||||
|
|
||||||
desc = av_pix_fmt_desc_get(ifilter->format);
|
if (ist->autorotate) {
|
||||||
av_assert0(desc);
|
double theta = get_rotation(ist->st);
|
||||||
|
|
||||||
// TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
|
|
||||||
if (ist->autorotate && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
|
|
||||||
int32_t *displaymatrix = ifilter->displaymatrix;
|
|
||||||
double theta;
|
|
||||||
|
|
||||||
if (!displaymatrix)
|
|
||||||
displaymatrix = (int32_t *)av_stream_get_side_data(ist->st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
|
|
||||||
theta = get_rotation(displaymatrix);
|
|
||||||
|
|
||||||
if (fabs(theta - 90) < 1.0) {
|
if (fabs(theta - 90) < 1.0) {
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "transpose",
|
ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
|
||||||
displaymatrix[3] > 0 ? "cclock_flip" : "clock");
|
|
||||||
} else if (fabs(theta - 180) < 1.0) {
|
} else if (fabs(theta - 180) < 1.0) {
|
||||||
if (displaymatrix[0] < 0) {
|
ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
|
if (ret < 0)
|
||||||
if (ret < 0)
|
return ret;
|
||||||
return ret;
|
ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
|
||||||
}
|
|
||||||
if (displaymatrix[4] < 0) {
|
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
|
|
||||||
}
|
|
||||||
} else if (fabs(theta - 270) < 1.0) {
|
} else if (fabs(theta - 270) < 1.0) {
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "transpose",
|
ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
|
||||||
displaymatrix[3] < 0 ? "clock_flip" : "cclock");
|
|
||||||
} else if (fabs(theta) > 1.0) {
|
} else if (fabs(theta) > 1.0) {
|
||||||
char rotate_buf[64];
|
char rotate_buf[64];
|
||||||
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
|
ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
|
||||||
} else if (fabs(theta) < 1.0) {
|
|
||||||
if (displaymatrix && displaymatrix[4] < 0) {
|
|
||||||
ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (do_deinterlace) {
|
||||||
|
AVFilterContext *yadif;
|
||||||
|
|
||||||
|
snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
|
||||||
|
ist->file_index, ist->st->index);
|
||||||
|
if ((ret = avfilter_graph_create_filter(&yadif,
|
||||||
|
avfilter_get_by_name("yadif"),
|
||||||
|
name, "", NULL,
|
||||||
|
fg->graph)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
last_filter = yadif;
|
||||||
|
}
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "trim_in_%d_%d",
|
snprintf(name, sizeof(name), "trim_in_%d_%d",
|
||||||
ist->file_index, ist->st->index);
|
ist->file_index, ist->st->index);
|
||||||
if (copy_ts) {
|
if (copy_ts) {
|
||||||
@ -932,7 +966,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||||
default: av_assert0(0); return 0;
|
default: av_assert0(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -946,30 +980,6 @@ static void cleanup_filtergraph(FilterGraph *fg)
|
|||||||
avfilter_graph_free(&fg->graph);
|
avfilter_graph_free(&fg->graph);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int filter_is_buffersrc(const AVFilterContext *f)
|
|
||||||
{
|
|
||||||
return f->nb_inputs == 0 &&
|
|
||||||
(!strcmp(f->filter->name, "buffersrc") ||
|
|
||||||
!strcmp(f->filter->name, "abuffersrc"));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int graph_is_meta(AVFilterGraph *graph)
|
|
||||||
{
|
|
||||||
for (unsigned i = 0; i < graph->nb_filters; i++) {
|
|
||||||
const AVFilterContext *f = graph->filters[i];
|
|
||||||
|
|
||||||
/* in addition to filters flagged as meta, also
|
|
||||||
* disregard sinks and buffersources (but not other sources,
|
|
||||||
* since they introduce data we are not aware of)
|
|
||||||
*/
|
|
||||||
if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
|
|
||||||
f->nb_outputs == 0 ||
|
|
||||||
filter_is_buffersrc(f)))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int configure_filtergraph(FilterGraph *fg)
|
int configure_filtergraph(FilterGraph *fg)
|
||||||
{
|
{
|
||||||
AVFilterInOut *inputs, *outputs, *cur;
|
AVFilterInOut *inputs, *outputs, *cur;
|
||||||
@ -984,31 +994,20 @@ int configure_filtergraph(FilterGraph *fg)
|
|||||||
if (simple) {
|
if (simple) {
|
||||||
OutputStream *ost = fg->outputs[0]->ost;
|
OutputStream *ost = fg->outputs[0]->ost;
|
||||||
char args[512];
|
char args[512];
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
|
|
||||||
if (filter_nbthreads) {
|
fg->graph->nb_threads = filter_nbthreads;
|
||||||
ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
|
|
||||||
if (ret < 0)
|
|
||||||
goto fail;
|
|
||||||
} else {
|
|
||||||
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
|
|
||||||
if (e)
|
|
||||||
av_opt_set(fg->graph, "threads", e->value, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
args[0] = 0;
|
args[0] = 0;
|
||||||
e = NULL;
|
|
||||||
while ((e = av_dict_get(ost->sws_dict, "", e,
|
while ((e = av_dict_get(ost->sws_dict, "", e,
|
||||||
AV_DICT_IGNORE_SUFFIX))) {
|
AV_DICT_IGNORE_SUFFIX))) {
|
||||||
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
||||||
}
|
}
|
||||||
if (strlen(args)) {
|
if (strlen(args))
|
||||||
args[strlen(args)-1] = 0;
|
args[strlen(args)-1] = 0;
|
||||||
fg->graph->scale_sws_opts = av_strdup(args);
|
fg->graph->scale_sws_opts = av_strdup(args);
|
||||||
}
|
|
||||||
|
|
||||||
args[0] = 0;
|
args[0] = 0;
|
||||||
e = NULL;
|
|
||||||
while ((e = av_dict_get(ost->swr_opts, "", e,
|
while ((e = av_dict_get(ost->swr_opts, "", e,
|
||||||
AV_DICT_IGNORE_SUFFIX))) {
|
AV_DICT_IGNORE_SUFFIX))) {
|
||||||
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
||||||
@ -1016,6 +1015,18 @@ int configure_filtergraph(FilterGraph *fg)
|
|||||||
if (strlen(args))
|
if (strlen(args))
|
||||||
args[strlen(args)-1] = 0;
|
args[strlen(args)-1] = 0;
|
||||||
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
|
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
|
||||||
|
|
||||||
|
args[0] = '\0';
|
||||||
|
while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
|
||||||
|
AV_DICT_IGNORE_SUFFIX))) {
|
||||||
|
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
||||||
|
}
|
||||||
|
if (strlen(args))
|
||||||
|
args[strlen(args) - 1] = '\0';
|
||||||
|
|
||||||
|
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
|
||||||
|
if (e)
|
||||||
|
av_opt_set(fg->graph, "threads", e->value, 0);
|
||||||
} else {
|
} else {
|
||||||
fg->graph->nb_threads = filter_complex_nbthreads;
|
fg->graph->nb_threads = filter_complex_nbthreads;
|
||||||
}
|
}
|
||||||
@ -1070,8 +1081,6 @@ int configure_filtergraph(FilterGraph *fg)
|
|||||||
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
fg->is_meta = graph_is_meta(fg->graph);
|
|
||||||
|
|
||||||
/* limit the lists of allowed formats to the ones selected, to
|
/* limit the lists of allowed formats to the ones selected, to
|
||||||
* make sure they stay the same if the filtergraph is reconfigured later */
|
* make sure they stay the same if the filtergraph is reconfigured later */
|
||||||
for (i = 0; i < fg->nb_outputs; i++) {
|
for (i = 0; i < fg->nb_outputs; i++) {
|
||||||
@ -1147,8 +1156,6 @@ fail:
|
|||||||
|
|
||||||
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
|
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
|
||||||
{
|
{
|
||||||
AVFrameSideData *sd;
|
|
||||||
|
|
||||||
av_buffer_unref(&ifilter->hw_frames_ctx);
|
av_buffer_unref(&ifilter->hw_frames_ctx);
|
||||||
|
|
||||||
ifilter->format = frame->format;
|
ifilter->format = frame->format;
|
||||||
@ -1161,11 +1168,6 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
|
|||||||
ifilter->channels = frame->channels;
|
ifilter->channels = frame->channels;
|
||||||
ifilter->channel_layout = frame->channel_layout;
|
ifilter->channel_layout = frame->channel_layout;
|
||||||
|
|
||||||
av_freep(&ifilter->displaymatrix);
|
|
||||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
|
|
||||||
if (sd)
|
|
||||||
ifilter->displaymatrix = av_memdup(sd->data, sizeof(int32_t) * 9);
|
|
||||||
|
|
||||||
if (frame->hw_frames_ctx) {
|
if (frame->hw_frames_ctx) {
|
||||||
ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
|
ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
|
||||||
if (!ifilter->hw_frames_ctx)
|
if (!ifilter->hw_frames_ctx)
|
||||||
|
@ -93,8 +93,6 @@ static char *hw_device_default_name(enum AVHWDeviceType type)
|
|||||||
|
|
||||||
int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
||||||
{
|
{
|
||||||
// "type=name"
|
|
||||||
// "type=name,key=value,key2=value2"
|
|
||||||
// "type=name:device,key=value,key2=value2"
|
// "type=name:device,key=value,key2=value2"
|
||||||
// "type:device,key=value,key2=value2"
|
// "type:device,key=value,key2=value2"
|
||||||
// -> av_hwdevice_ctx_create()
|
// -> av_hwdevice_ctx_create()
|
||||||
@ -126,7 +124,7 @@ int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (*p == '=') {
|
if (*p == '=') {
|
||||||
k = strcspn(p + 1, ":@,");
|
k = strcspn(p + 1, ":@");
|
||||||
|
|
||||||
name = av_strndup(p + 1, k);
|
name = av_strndup(p + 1, k);
|
||||||
if (!name) {
|
if (!name) {
|
||||||
@ -192,18 +190,6 @@ int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
|||||||
src->device_ref, 0);
|
src->device_ref, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
} else if (*p == ',') {
|
|
||||||
err = av_dict_parse_string(&options, p + 1, "=", ",", 0);
|
|
||||||
|
|
||||||
if (err < 0) {
|
|
||||||
errmsg = "failed to parse options";
|
|
||||||
goto invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = av_hwdevice_ctx_create(&device_ref, type,
|
|
||||||
NULL, options, 0);
|
|
||||||
if (err < 0)
|
|
||||||
goto fail;
|
|
||||||
} else {
|
} else {
|
||||||
errmsg = "parse error";
|
errmsg = "parse error";
|
||||||
goto invalid;
|
goto invalid;
|
||||||
@ -353,18 +339,6 @@ int hw_device_setup_for_decode(InputStream *ist)
|
|||||||
} else if (ist->hwaccel_id == HWACCEL_GENERIC) {
|
} else if (ist->hwaccel_id == HWACCEL_GENERIC) {
|
||||||
type = ist->hwaccel_device_type;
|
type = ist->hwaccel_device_type;
|
||||||
dev = hw_device_get_by_type(type);
|
dev = hw_device_get_by_type(type);
|
||||||
|
|
||||||
// When "-qsv_device device" is used, an internal QSV device named
|
|
||||||
// as "__qsv_device" is created. Another QSV device is created too
|
|
||||||
// if "-init_hw_device qsv=name:device" is used. There are 2 QSV devices
|
|
||||||
// if both "-qsv_device device" and "-init_hw_device qsv=name:device"
|
|
||||||
// are used, hw_device_get_by_type(AV_HWDEVICE_TYPE_QSV) returns NULL.
|
|
||||||
// To keep back-compatibility with the removed ad-hoc libmfx setup code,
|
|
||||||
// call hw_device_get_by_name("__qsv_device") to select the internal QSV
|
|
||||||
// device.
|
|
||||||
if (!dev && type == AV_HWDEVICE_TYPE_QSV)
|
|
||||||
dev = hw_device_get_by_name("__qsv_device");
|
|
||||||
|
|
||||||
if (!dev)
|
if (!dev)
|
||||||
err = hw_device_init_from_type(type, NULL, &dev);
|
err = hw_device_init_from_type(type, NULL, &dev);
|
||||||
} else {
|
} else {
|
||||||
@ -553,21 +527,15 @@ int hw_device_setup_for_filter(FilterGraph *fg)
|
|||||||
HWDevice *dev;
|
HWDevice *dev;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
// Pick the last hardware device if the user doesn't pick the device for
|
// If the user has supplied exactly one hardware device then just
|
||||||
// filters explicitly with the filter_hw_device option.
|
// give it straight to every filter for convenience. If more than
|
||||||
|
// one device is available then the user needs to pick one explcitly
|
||||||
|
// with the filter_hw_device option.
|
||||||
if (filter_hw_device)
|
if (filter_hw_device)
|
||||||
dev = filter_hw_device;
|
dev = filter_hw_device;
|
||||||
else if (nb_hw_devices > 0) {
|
else if (nb_hw_devices == 1)
|
||||||
dev = hw_devices[nb_hw_devices - 1];
|
dev = hw_devices[0];
|
||||||
|
else
|
||||||
if (nb_hw_devices > 1)
|
|
||||||
av_log(NULL, AV_LOG_WARNING, "There are %d hardware devices. device "
|
|
||||||
"%s of type %s is picked for filters by default. Set hardware "
|
|
||||||
"device explicitly with the filter_hw_device option if device "
|
|
||||||
"%s is not usable for filters.\n",
|
|
||||||
nb_hw_devices, dev->name,
|
|
||||||
av_hwdevice_get_type_name(dev->type), dev->name);
|
|
||||||
} else
|
|
||||||
dev = NULL;
|
dev = NULL;
|
||||||
|
|
||||||
if (dev) {
|
if (dev) {
|
||||||
|
@ -27,14 +27,12 @@
|
|||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
|
|
||||||
#include "libavcodec/avcodec.h"
|
#include "libavcodec/avcodec.h"
|
||||||
#include "libavcodec/bsf.h"
|
|
||||||
|
|
||||||
#include "libavfilter/avfilter.h"
|
#include "libavfilter/avfilter.h"
|
||||||
|
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/avutil.h"
|
#include "libavutil/avutil.h"
|
||||||
#include "libavutil/bprint.h"
|
|
||||||
#include "libavutil/channel_layout.h"
|
#include "libavutil/channel_layout.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "libavutil/fifo.h"
|
#include "libavutil/fifo.h"
|
||||||
@ -80,7 +78,7 @@ static const char *const opt_name_inter_matrices[] = {"inter_matrix",
|
|||||||
static const char *const opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL};
|
static const char *const opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL};
|
||||||
static const char *const opt_name_top_field_first[] = {"top", NULL};
|
static const char *const opt_name_top_field_first[] = {"top", NULL};
|
||||||
static const char *const opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL};
|
static const char *const opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL};
|
||||||
static const char *const opt_name_copy_initial_nonkeyframes[] = {"copyinkf", NULL};
|
static const char *const opt_name_copy_initial_nonkeyframes[] = {"copyinkfr", NULL};
|
||||||
static const char *const opt_name_copy_prior_start[] = {"copypriorss", NULL};
|
static const char *const opt_name_copy_prior_start[] = {"copypriorss", NULL};
|
||||||
static const char *const opt_name_filters[] = {"filter", "af", "vf", NULL};
|
static const char *const opt_name_filters[] = {"filter", "af", "vf", NULL};
|
||||||
static const char *const opt_name_filter_scripts[] = {"filter_script", NULL};
|
static const char *const opt_name_filter_scripts[] = {"filter_script", NULL};
|
||||||
@ -97,7 +95,6 @@ static const char *const opt_name_discard[] = {"discard", NULL
|
|||||||
static const char *const opt_name_disposition[] = {"disposition", NULL};
|
static const char *const opt_name_disposition[] = {"disposition", NULL};
|
||||||
static const char *const opt_name_time_bases[] = {"time_base", NULL};
|
static const char *const opt_name_time_bases[] = {"time_base", NULL};
|
||||||
static const char *const opt_name_enc_time_bases[] = {"enc_time_base", NULL};
|
static const char *const opt_name_enc_time_bases[] = {"enc_time_base", NULL};
|
||||||
static const char *const opt_name_bits_per_raw_sample[] = {"bits_per_raw_sample", NULL};
|
|
||||||
|
|
||||||
#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\
|
#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\
|
||||||
{\
|
{\
|
||||||
@ -136,6 +133,15 @@ static const char *const opt_name_bits_per_raw_sample[] = {"bits_per_raw_s
|
|||||||
}\
|
}\
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const HWAccel hwaccels[] = {
|
||||||
|
#if CONFIG_VIDEOTOOLBOX
|
||||||
|
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
|
||||||
|
#endif
|
||||||
|
#if CONFIG_LIBMFX
|
||||||
|
{ "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV },
|
||||||
|
#endif
|
||||||
|
{ 0 },
|
||||||
|
};
|
||||||
HWDevice *filter_hw_device;
|
HWDevice *filter_hw_device;
|
||||||
|
|
||||||
char *vstats_filename;
|
char *vstats_filename;
|
||||||
@ -147,8 +153,9 @@ float dts_error_threshold = 3600*30;
|
|||||||
|
|
||||||
int audio_volume = 256;
|
int audio_volume = 256;
|
||||||
int audio_sync_method = 0;
|
int audio_sync_method = 0;
|
||||||
enum VideoSyncMethod video_sync_method = VSYNC_AUTO;
|
int video_sync_method = VSYNC_AUTO;
|
||||||
float frame_drop_threshold = 0;
|
float frame_drop_threshold = 0;
|
||||||
|
int do_deinterlace = 0;
|
||||||
int do_benchmark = 0;
|
int do_benchmark = 0;
|
||||||
int do_benchmark_all = 0;
|
int do_benchmark_all = 0;
|
||||||
int do_hex_dump = 0;
|
int do_hex_dump = 0;
|
||||||
@ -162,21 +169,23 @@ int abort_on_flags = 0;
|
|||||||
int print_stats = -1;
|
int print_stats = -1;
|
||||||
int qp_hist = 0;
|
int qp_hist = 0;
|
||||||
int stdin_interaction = 1;
|
int stdin_interaction = 1;
|
||||||
|
int frame_bits_per_raw_sample = 0;
|
||||||
float max_error_rate = 2.0/3;
|
float max_error_rate = 2.0/3;
|
||||||
char *filter_nbthreads;
|
int filter_nbthreads = 0;
|
||||||
int filter_complex_nbthreads = 0;
|
int filter_complex_nbthreads = 0;
|
||||||
int vstats_version = 2;
|
int vstats_version = 2;
|
||||||
int auto_conversion_filters = 1;
|
int auto_conversion_filters = 1;
|
||||||
int64_t stats_period = 500000;
|
int64_t stats_period = 500000;
|
||||||
|
|
||||||
|
|
||||||
|
static int intra_only = 0;
|
||||||
static int file_overwrite = 0;
|
static int file_overwrite = 0;
|
||||||
static int no_file_overwrite = 0;
|
static int no_file_overwrite = 0;
|
||||||
static int do_psnr = 0;
|
static int do_psnr = 0;
|
||||||
|
static int input_sync;
|
||||||
static int input_stream_potentially_available = 0;
|
static int input_stream_potentially_available = 0;
|
||||||
static int ignore_unknown_streams = 0;
|
static int ignore_unknown_streams = 0;
|
||||||
static int copy_unknown_streams = 0;
|
static int copy_unknown_streams = 0;
|
||||||
static int recast_media = 0;
|
|
||||||
static int find_stream_info = 1;
|
static int find_stream_info = 1;
|
||||||
|
|
||||||
static void uninit_options(OptionsContext *o)
|
static void uninit_options(OptionsContext *o)
|
||||||
@ -241,7 +250,7 @@ static int show_hwaccels(void *optctx, const char *opt, const char *arg)
|
|||||||
/* return a copy of the input with the stream specifiers removed from the keys */
|
/* return a copy of the input with the stream specifiers removed from the keys */
|
||||||
static AVDictionary *strip_specifiers(AVDictionary *dict)
|
static AVDictionary *strip_specifiers(AVDictionary *dict)
|
||||||
{
|
{
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
AVDictionary *ret = NULL;
|
AVDictionary *ret = NULL;
|
||||||
|
|
||||||
while ((e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
while ((e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
@ -256,13 +265,6 @@ static AVDictionary *strip_specifiers(AVDictionary *dict)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opt_filter_threads(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
av_free(filter_nbthreads);
|
|
||||||
filter_nbthreads = av_strdup(arg);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int opt_abort_on(void *optctx, const char *opt, const char *arg)
|
static int opt_abort_on(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
static const AVOption opts[] = {
|
static const AVOption opts[] = {
|
||||||
@ -297,6 +299,27 @@ static int opt_stats_period(void *optctx, const char *opt, const char *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int opt_sameq(void *optctx, const char *opt, const char *arg)
|
||||||
|
{
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "Option '%s' was removed. "
|
||||||
|
"If you are looking for an option to preserve the quality (which is not "
|
||||||
|
"what -%s was for), use -qscale 0 or an equivalent quality factor option.\n",
|
||||||
|
opt, opt);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int opt_video_channel(void *optctx, const char *opt, const char *arg)
|
||||||
|
{
|
||||||
|
av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n");
|
||||||
|
return opt_default(optctx, "channel", arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int opt_video_standard(void *optctx, const char *opt, const char *arg)
|
||||||
|
{
|
||||||
|
av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n");
|
||||||
|
return opt_default(optctx, "standard", arg);
|
||||||
|
}
|
||||||
|
|
||||||
static int opt_audio_codec(void *optctx, const char *opt, const char *arg)
|
static int opt_audio_codec(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
OptionsContext *o = optctx;
|
OptionsContext *o = optctx;
|
||||||
@ -546,23 +569,6 @@ static int opt_vaapi_device(void *optctx, const char *opt, const char *arg)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CONFIG_QSV
|
|
||||||
static int opt_qsv_device(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
const char *prefix = "qsv=__qsv_device:hw_any,child_device=";
|
|
||||||
int err;
|
|
||||||
char *tmp = av_asprintf("%s%s", prefix, arg);
|
|
||||||
|
|
||||||
if (!tmp)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
err = hw_device_init_from_string(tmp, NULL);
|
|
||||||
av_free(tmp);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
|
static int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
if (!strcmp(arg, "list")) {
|
if (!strcmp(arg, "list")) {
|
||||||
@ -731,11 +737,11 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
|
static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
|
||||||
{
|
{
|
||||||
const AVCodecDescriptor *desc;
|
const AVCodecDescriptor *desc;
|
||||||
const char *codec_string = encoder ? "encoder" : "decoder";
|
const char *codec_string = encoder ? "encoder" : "decoder";
|
||||||
const AVCodec *codec;
|
AVCodec *codec;
|
||||||
|
|
||||||
codec = encoder ?
|
codec = encoder ?
|
||||||
avcodec_find_encoder_by_name(name) :
|
avcodec_find_encoder_by_name(name) :
|
||||||
@ -753,7 +759,7 @@ static const AVCodec *find_codec_or_die(const char *name, enum AVMediaType type,
|
|||||||
av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
|
av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
if (codec->type != type && !recast_media) {
|
if (codec->type != type) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
|
av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
@ -768,8 +774,6 @@ static const AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVSt
|
|||||||
if (codec_name) {
|
if (codec_name) {
|
||||||
const AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
|
const AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
|
||||||
st->codecpar->codec_id = codec->id;
|
st->codecpar->codec_id = codec->id;
|
||||||
if (recast_media && st->codecpar->codec_type != codec->type)
|
|
||||||
st->codecpar->codec_type = codec->type;
|
|
||||||
return codec;
|
return codec;
|
||||||
} else
|
} else
|
||||||
return avcodec_find_decoder(st->codecpar->codec_id);
|
return avcodec_find_decoder(st->codecpar->codec_id);
|
||||||
@ -792,8 +796,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
char *next;
|
char *next;
|
||||||
char *discard_str = NULL;
|
char *discard_str = NULL;
|
||||||
const AVClass *cc = avcodec_get_class();
|
const AVClass *cc = avcodec_get_class();
|
||||||
const AVOption *discard_opt = av_opt_find(&cc, "skip_frame", NULL,
|
const AVOption *discard_opt = av_opt_find(&cc, "skip_frame", NULL, 0, 0);
|
||||||
0, AV_OPT_SEARCH_FAKE_OBJ);
|
|
||||||
|
|
||||||
if (!ist)
|
if (!ist)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@ -806,7 +809,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
ist->discard = 1;
|
ist->discard = 1;
|
||||||
st->discard = AVDISCARD_ALL;
|
st->discard = AVDISCARD_ALL;
|
||||||
ist->nb_samples = 0;
|
ist->nb_samples = 0;
|
||||||
ist->first_dts = AV_NOPTS_VALUE;
|
|
||||||
ist->min_pts = INT64_MAX;
|
ist->min_pts = INT64_MAX;
|
||||||
ist->max_pts = INT64_MIN;
|
ist->max_pts = INT64_MIN;
|
||||||
|
|
||||||
@ -846,7 +848,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
|
||||||
ist->prev_pkt_pts = AV_NOPTS_VALUE;
|
|
||||||
|
|
||||||
ist->dec_ctx = avcodec_alloc_context3(ist->dec);
|
ist->dec_ctx = avcodec_alloc_context3(ist->dec);
|
||||||
if (!ist->dec_ctx) {
|
if (!ist->dec_ctx) {
|
||||||
@ -860,14 +861,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ist->decoded_frame = av_frame_alloc();
|
|
||||||
if (!ist->decoded_frame)
|
|
||||||
exit_program(1);
|
|
||||||
|
|
||||||
ist->pkt = av_packet_alloc();
|
|
||||||
if (!ist->pkt)
|
|
||||||
exit_program(1);
|
|
||||||
|
|
||||||
if (o->bitexact)
|
if (o->bitexact)
|
||||||
ist->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
|
ist->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
|
||||||
|
|
||||||
@ -900,12 +893,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
"with old commandlines. This behaviour is DEPRECATED and will be removed "
|
"with old commandlines. This behaviour is DEPRECATED and will be removed "
|
||||||
"in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n");
|
"in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n");
|
||||||
ist->hwaccel_output_format = AV_PIX_FMT_CUDA;
|
ist->hwaccel_output_format = AV_PIX_FMT_CUDA;
|
||||||
} else if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "qsv")) {
|
|
||||||
av_log(NULL, AV_LOG_WARNING,
|
|
||||||
"WARNING: defaulting hwaccel_output_format to qsv for compatibility "
|
|
||||||
"with old commandlines. This behaviour is DEPRECATED and will be removed "
|
|
||||||
"in the future. Please explicitly set \"-hwaccel_output_format qsv\".\n");
|
|
||||||
ist->hwaccel_output_format = AV_PIX_FMT_QSV;
|
|
||||||
} else if (hwaccel_output_format) {
|
} else if (hwaccel_output_format) {
|
||||||
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
|
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
|
||||||
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
|
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
|
||||||
@ -926,10 +913,21 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
else if (!strcmp(hwaccel, "auto"))
|
else if (!strcmp(hwaccel, "auto"))
|
||||||
ist->hwaccel_id = HWACCEL_AUTO;
|
ist->hwaccel_id = HWACCEL_AUTO;
|
||||||
else {
|
else {
|
||||||
enum AVHWDeviceType type = av_hwdevice_find_type_by_name(hwaccel);
|
enum AVHWDeviceType type;
|
||||||
if (type != AV_HWDEVICE_TYPE_NONE) {
|
int i;
|
||||||
ist->hwaccel_id = HWACCEL_GENERIC;
|
for (i = 0; hwaccels[i].name; i++) {
|
||||||
ist->hwaccel_device_type = type;
|
if (!strcmp(hwaccels[i].name, hwaccel)) {
|
||||||
|
ist->hwaccel_id = hwaccels[i].id;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ist->hwaccel_id) {
|
||||||
|
type = av_hwdevice_find_type_by_name(hwaccel);
|
||||||
|
if (type != AV_HWDEVICE_TYPE_NONE) {
|
||||||
|
ist->hwaccel_id = HWACCEL_GENERIC;
|
||||||
|
ist->hwaccel_device_type = type;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ist->hwaccel_id) {
|
if (!ist->hwaccel_id) {
|
||||||
@ -1038,7 +1036,7 @@ static void dump_attachment(AVStream *st, const char *filename)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
AVIOContext *out = NULL;
|
AVIOContext *out = NULL;
|
||||||
const AVDictionaryEntry *e;
|
AVDictionaryEntry *e;
|
||||||
|
|
||||||
if (!st->codecpar->extradata_size) {
|
if (!st->codecpar->extradata_size) {
|
||||||
av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
|
av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
|
||||||
@ -1070,11 +1068,11 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
{
|
{
|
||||||
InputFile *f;
|
InputFile *f;
|
||||||
AVFormatContext *ic;
|
AVFormatContext *ic;
|
||||||
const AVInputFormat *file_iformat = NULL;
|
AVInputFormat *file_iformat = NULL;
|
||||||
int err, i, ret;
|
int err, i, ret;
|
||||||
int64_t timestamp;
|
int64_t timestamp;
|
||||||
AVDictionary *unused_opts = NULL;
|
AVDictionary *unused_opts = NULL;
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
char * video_codec_name = NULL;
|
char * video_codec_name = NULL;
|
||||||
char * audio_codec_name = NULL;
|
char * audio_codec_name = NULL;
|
||||||
char *subtitle_codec_name = NULL;
|
char *subtitle_codec_name = NULL;
|
||||||
@ -1119,22 +1117,20 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
|
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
|
||||||
}
|
}
|
||||||
if (o->nb_audio_channels) {
|
if (o->nb_audio_channels) {
|
||||||
const AVClass *priv_class;
|
|
||||||
/* because we set audio_channels based on both the "ac" and
|
/* because we set audio_channels based on both the "ac" and
|
||||||
* "channel_layout" options, we need to check that the specified
|
* "channel_layout" options, we need to check that the specified
|
||||||
* demuxer actually has the "channels" option before setting it */
|
* demuxer actually has the "channels" option before setting it */
|
||||||
if (file_iformat && (priv_class = file_iformat->priv_class) &&
|
if (file_iformat && file_iformat->priv_class &&
|
||||||
av_opt_find(&priv_class, "channels", NULL, 0,
|
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
|
||||||
AV_OPT_SEARCH_FAKE_OBJ)) {
|
AV_OPT_SEARCH_FAKE_OBJ)) {
|
||||||
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
|
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (o->nb_frame_rates) {
|
if (o->nb_frame_rates) {
|
||||||
const AVClass *priv_class;
|
|
||||||
/* set the format-level framerate option;
|
/* set the format-level framerate option;
|
||||||
* this is important for video grabbers, e.g. x11 */
|
* this is important for video grabbers, e.g. x11 */
|
||||||
if (file_iformat && (priv_class = file_iformat->priv_class) &&
|
if (file_iformat && file_iformat->priv_class &&
|
||||||
av_opt_find(&priv_class, "framerate", NULL, 0,
|
av_opt_find(&file_iformat->priv_class, "framerate", NULL, 0,
|
||||||
AV_OPT_SEARCH_FAKE_OBJ)) {
|
AV_OPT_SEARCH_FAKE_OBJ)) {
|
||||||
av_dict_set(&o->g->format_opts, "framerate",
|
av_dict_set(&o->g->format_opts, "framerate",
|
||||||
o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
|
o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
|
||||||
@ -1266,7 +1262,11 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
/* dump the file content */
|
/* dump the file content */
|
||||||
av_dump_format(ic, nb_input_files, filename, 0);
|
av_dump_format(ic, nb_input_files, filename, 0);
|
||||||
|
|
||||||
f = ALLOC_ARRAY_ELEM(input_files, nb_input_files);
|
GROW_ARRAY(input_files, nb_input_files);
|
||||||
|
f = av_mallocz(sizeof(*f));
|
||||||
|
if (!f)
|
||||||
|
exit_program(1);
|
||||||
|
input_files[nb_input_files - 1] = f;
|
||||||
|
|
||||||
f->ctx = ic;
|
f->ctx = ic;
|
||||||
f->ist_index = nb_input_streams - ic->nb_streams;
|
f->ist_index = nb_input_streams - ic->nb_streams;
|
||||||
@ -1280,17 +1280,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
f->loop = o->loop;
|
f->loop = o->loop;
|
||||||
f->duration = 0;
|
f->duration = 0;
|
||||||
f->time_base = (AVRational){ 1, 1 };
|
f->time_base = (AVRational){ 1, 1 };
|
||||||
|
|
||||||
f->readrate = o->readrate ? o->readrate : 0.0;
|
|
||||||
if (f->readrate < 0.0f) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Option -readrate for Input #%d is %0.3f; it must be non-negative.\n", nb_input_files, f->readrate);
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
if (f->readrate && f->rate_emu) {
|
|
||||||
av_log(NULL, AV_LOG_WARNING, "Both -readrate and -re set for Input #%d. Using -readrate %0.3f.\n", nb_input_files, f->readrate);
|
|
||||||
f->rate_emu = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
f->pkt = av_packet_alloc();
|
f->pkt = av_packet_alloc();
|
||||||
if (!f->pkt)
|
if (!f->pkt)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@ -1352,18 +1341,23 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *get_line(AVIOContext *s, AVBPrint *bprint)
|
static uint8_t *get_line(AVIOContext *s)
|
||||||
{
|
{
|
||||||
|
AVIOContext *line;
|
||||||
|
uint8_t *buf;
|
||||||
char c;
|
char c;
|
||||||
|
|
||||||
while ((c = avio_r8(s)) && c != '\n')
|
if (avio_open_dyn_buf(&line) < 0) {
|
||||||
av_bprint_chars(bprint, c, 1);
|
|
||||||
|
|
||||||
if (!av_bprint_is_complete(bprint)) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
|
av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
return bprint->str;
|
|
||||||
|
while ((c = avio_r8(s)) && c != '\n')
|
||||||
|
avio_w8(line, c);
|
||||||
|
avio_w8(line, 0);
|
||||||
|
avio_close_dyn_buf(line, &buf);
|
||||||
|
|
||||||
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
|
static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
|
||||||
@ -1476,14 +1470,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ost->filtered_frame = av_frame_alloc();
|
|
||||||
if (!ost->filtered_frame)
|
|
||||||
exit_program(1);
|
|
||||||
|
|
||||||
ost->pkt = av_packet_alloc();
|
|
||||||
if (!ost->pkt)
|
|
||||||
exit_program(1);
|
|
||||||
|
|
||||||
if (ost->enc) {
|
if (ost->enc) {
|
||||||
AVIOContext *s = NULL;
|
AVIOContext *s = NULL;
|
||||||
char *buf = NULL, *arg = NULL, *preset = NULL;
|
char *buf = NULL, *arg = NULL, *preset = NULL;
|
||||||
@ -1494,21 +1480,20 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
ost->autoscale = 1;
|
ost->autoscale = 1;
|
||||||
MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st);
|
MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st);
|
||||||
if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
|
if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
|
||||||
AVBPrint bprint;
|
|
||||||
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
|
|
||||||
do {
|
do {
|
||||||
av_bprint_clear(&bprint);
|
buf = get_line(s);
|
||||||
buf = get_line(s, &bprint);
|
if (!buf[0] || buf[0] == '#') {
|
||||||
if (!buf[0] || buf[0] == '#')
|
av_free(buf);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
if (!(arg = strchr(buf, '='))) {
|
if (!(arg = strchr(buf, '='))) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
|
av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
*arg++ = 0;
|
*arg++ = 0;
|
||||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||||
|
av_free(buf);
|
||||||
} while (!s->eof_reached);
|
} while (!s->eof_reached);
|
||||||
av_bprint_finalize(&bprint, NULL);
|
|
||||||
avio_closep(&s);
|
avio_closep(&s);
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1589,7 +1574,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
|
|
||||||
ost->max_muxing_queue_size = 128;
|
ost->max_muxing_queue_size = 128;
|
||||||
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
|
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
|
||||||
ost->max_muxing_queue_size = FFMIN(ost->max_muxing_queue_size, INT_MAX / sizeof(ost->pkt));
|
|
||||||
ost->max_muxing_queue_size *= sizeof(ost->pkt);
|
ost->max_muxing_queue_size *= sizeof(ost->pkt);
|
||||||
|
|
||||||
ost->muxing_queue_data_size = 0;
|
ost->muxing_queue_data_size = 0;
|
||||||
@ -1597,9 +1581,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
ost->muxing_queue_data_threshold = 50*1024*1024;
|
ost->muxing_queue_data_threshold = 50*1024*1024;
|
||||||
MATCH_PER_STREAM_OPT(muxing_queue_data_threshold, i, ost->muxing_queue_data_threshold, oc, st);
|
MATCH_PER_STREAM_OPT(muxing_queue_data_threshold, i, ost->muxing_queue_data_threshold, oc, st);
|
||||||
|
|
||||||
MATCH_PER_STREAM_OPT(bits_per_raw_sample, i, ost->bits_per_raw_sample,
|
|
||||||
oc, st);
|
|
||||||
|
|
||||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
|
||||||
@ -1609,6 +1590,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
if (ost->enc && av_get_exact_bits_per_sample(ost->enc->id) == 24)
|
if (ost->enc && av_get_exact_bits_per_sample(ost->enc->id) == 24)
|
||||||
av_dict_set(&ost->swr_opts, "output_sample_bits", "24", 0);
|
av_dict_set(&ost->swr_opts, "output_sample_bits", "24", 0);
|
||||||
|
|
||||||
|
av_dict_copy(&ost->resample_opts, o->g->resample_opts, 0);
|
||||||
|
|
||||||
ost->source_index = source_index;
|
ost->source_index = source_index;
|
||||||
if (source_index >= 0) {
|
if (source_index >= 0) {
|
||||||
ost->sync_ist = input_streams[source_index];
|
ost->sync_ist = input_streams[source_index];
|
||||||
@ -1642,26 +1625,29 @@ static void parse_matrix_coeffs(uint16_t *dest, const char *str)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* read file contents into a string */
|
/* read file contents into a string */
|
||||||
static char *read_file(const char *filename)
|
static uint8_t *read_file(const char *filename)
|
||||||
{
|
{
|
||||||
AVIOContext *pb = NULL;
|
AVIOContext *pb = NULL;
|
||||||
|
AVIOContext *dyn_buf = NULL;
|
||||||
int ret = avio_open(&pb, filename, AVIO_FLAG_READ);
|
int ret = avio_open(&pb, filename, AVIO_FLAG_READ);
|
||||||
AVBPrint bprint;
|
uint8_t buf[1024], *str;
|
||||||
char *str;
|
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error opening file %s.\n", filename);
|
av_log(NULL, AV_LOG_ERROR, "Error opening file %s.\n", filename);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
|
ret = avio_open_dyn_buf(&dyn_buf);
|
||||||
ret = avio_read_to_bprint(pb, &bprint, SIZE_MAX);
|
|
||||||
avio_closep(&pb);
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
av_bprint_finalize(&bprint, NULL);
|
avio_closep(&pb);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
ret = av_bprint_finalize(&bprint, &str);
|
while ((ret = avio_read(pb, buf, sizeof(buf))) > 0)
|
||||||
|
avio_write(dyn_buf, buf, ret);
|
||||||
|
avio_w8(dyn_buf, 0);
|
||||||
|
avio_closep(&pb);
|
||||||
|
|
||||||
|
ret = avio_close_dyn_buf(dyn_buf, &str);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
return str;
|
return str;
|
||||||
@ -1731,7 +1717,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
|
|
||||||
if ((frame_rate || max_frame_rate) &&
|
if ((frame_rate || max_frame_rate) &&
|
||||||
video_sync_method == VSYNC_PASSTHROUGH)
|
video_sync_method == VSYNC_PASSTHROUGH)
|
||||||
av_log(NULL, AV_LOG_ERROR, "Using -vsync passthrough and -r/-fpsmax can produce invalid output files\n");
|
av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r/-fpsmax can produce invalid output files\n");
|
||||||
|
|
||||||
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
||||||
if (frame_aspect_ratio) {
|
if (frame_aspect_ratio) {
|
||||||
@ -1762,6 +1748,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
video_enc->bits_per_raw_sample = frame_bits_per_raw_sample;
|
||||||
MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
|
MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
|
||||||
if (frame_pix_fmt && *frame_pix_fmt == '+') {
|
if (frame_pix_fmt && *frame_pix_fmt == '+') {
|
||||||
ost->keep_pix_fmt = 1;
|
ost->keep_pix_fmt = 1;
|
||||||
@ -1774,6 +1761,8 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
}
|
}
|
||||||
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
|
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
|
||||||
|
|
||||||
|
if (intra_only)
|
||||||
|
video_enc->gop_size = 0;
|
||||||
MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
|
MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
|
||||||
if (intra_matrix) {
|
if (intra_matrix) {
|
||||||
if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
|
if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
|
||||||
@ -1895,38 +1884,10 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
ost->top_field_first = -1;
|
ost->top_field_first = -1;
|
||||||
MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
|
MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
|
||||||
|
|
||||||
ost->vsync_method = video_sync_method;
|
|
||||||
if (ost->vsync_method == VSYNC_AUTO) {
|
|
||||||
if (!strcmp(oc->oformat->name, "avi")) {
|
|
||||||
ost->vsync_method = VSYNC_VFR;
|
|
||||||
} else {
|
|
||||||
ost->vsync_method = (oc->oformat->flags & AVFMT_VARIABLE_FPS) ?
|
|
||||||
((oc->oformat->flags & AVFMT_NOTIMESTAMPS) ?
|
|
||||||
VSYNC_PASSTHROUGH : VSYNC_VFR) :
|
|
||||||
VSYNC_CFR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ost->source_index >= 0 && ost->vsync_method == VSYNC_CFR) {
|
|
||||||
const InputStream *ist = input_streams[ost->source_index];
|
|
||||||
const InputFile *ifile = input_files[ist->file_index];
|
|
||||||
|
|
||||||
if (ifile->nb_streams == 1 && ifile->input_ts_offset == 0)
|
|
||||||
ost->vsync_method = VSYNC_VSCFR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ost->vsync_method == VSYNC_CFR && copy_ts) {
|
|
||||||
ost->vsync_method = VSYNC_VSCFR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ost->is_cfr = (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR);
|
|
||||||
|
|
||||||
ost->avfilter = get_ost_filters(o, oc, ost);
|
ost->avfilter = get_ost_filters(o, oc, ost);
|
||||||
if (!ost->avfilter)
|
if (!ost->avfilter)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
|
|
||||||
ost->last_frame = av_frame_alloc();
|
|
||||||
if (!ost->last_frame)
|
|
||||||
exit_program(1);
|
|
||||||
} else {
|
} else {
|
||||||
MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
|
MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
|
||||||
}
|
}
|
||||||
@ -2137,72 +2098,6 @@ static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_dispositions(OutputFile *of)
|
|
||||||
{
|
|
||||||
int nb_streams[AVMEDIA_TYPE_NB] = { 0 };
|
|
||||||
int have_default[AVMEDIA_TYPE_NB] = { 0 };
|
|
||||||
int have_manual = 0;
|
|
||||||
|
|
||||||
// first, copy the input dispositions
|
|
||||||
for (int i = 0; i< of->ctx->nb_streams; i++) {
|
|
||||||
OutputStream *ost = output_streams[of->ost_index + i];
|
|
||||||
|
|
||||||
nb_streams[ost->st->codecpar->codec_type]++;
|
|
||||||
|
|
||||||
have_manual |= !!ost->disposition;
|
|
||||||
|
|
||||||
if (ost->source_index >= 0) {
|
|
||||||
ost->st->disposition = input_streams[ost->source_index]->st->disposition;
|
|
||||||
|
|
||||||
if (ost->st->disposition & AV_DISPOSITION_DEFAULT)
|
|
||||||
have_default[ost->st->codecpar->codec_type] = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (have_manual) {
|
|
||||||
// process manually set dispositions - they override the above copy
|
|
||||||
for (int i = 0; i< of->ctx->nb_streams; i++) {
|
|
||||||
OutputStream *ost = output_streams[of->ost_index + i];
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!ost->disposition)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
#if LIBAVFORMAT_VERSION_MAJOR >= 60
|
|
||||||
ret = av_opt_set(ost->st, "disposition", ost->disposition, 0);
|
|
||||||
#else
|
|
||||||
{
|
|
||||||
const AVClass *class = av_stream_get_class();
|
|
||||||
const AVOption *o = av_opt_find(&class, "disposition", NULL, 0, AV_OPT_SEARCH_FAKE_OBJ);
|
|
||||||
|
|
||||||
av_assert0(o);
|
|
||||||
ret = av_opt_eval_flags(&class, o, ost->disposition, &ost->st->disposition);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// For each media type with more than one stream, find a suitable stream to
|
|
||||||
// mark as default, unless one is already marked default.
|
|
||||||
// "Suitable" means the first of that type, skipping attached pictures.
|
|
||||||
for (int i = 0; i< of->ctx->nb_streams; i++) {
|
|
||||||
OutputStream *ost = output_streams[of->ost_index + i];
|
|
||||||
enum AVMediaType type = ost->st->codecpar->codec_type;
|
|
||||||
|
|
||||||
if (nb_streams[type] < 2 || have_default[type] ||
|
|
||||||
ost->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ost->st->disposition |= AV_DISPOSITION_DEFAULT;
|
|
||||||
have_default[type] = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
||||||
AVFormatContext *oc)
|
AVFormatContext *oc)
|
||||||
{
|
{
|
||||||
@ -2217,6 +2112,7 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
|||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ost->source_index = -1;
|
||||||
ost->filter = ofilter;
|
ost->filter = ofilter;
|
||||||
|
|
||||||
ofilter->ost = ost;
|
ofilter->ost = ost;
|
||||||
@ -2264,7 +2160,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
InputStream *ist;
|
InputStream *ist;
|
||||||
AVDictionary *unused_opts = NULL;
|
AVDictionary *unused_opts = NULL;
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
|
int format_flags = 0;
|
||||||
|
|
||||||
if (o->stop_time != INT64_MAX && o->recording_time != INT64_MAX) {
|
if (o->stop_time != INT64_MAX && o->recording_time != INT64_MAX) {
|
||||||
o->stop_time = INT64_MAX;
|
o->stop_time = INT64_MAX;
|
||||||
@ -2281,7 +2178,11 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
of = ALLOC_ARRAY_ELEM(output_files, nb_output_files);
|
GROW_ARRAY(output_files, nb_output_files);
|
||||||
|
of = av_mallocz(sizeof(*of));
|
||||||
|
if (!of)
|
||||||
|
exit_program(1);
|
||||||
|
output_files[nb_output_files - 1] = of;
|
||||||
|
|
||||||
of->ost_index = nb_output_streams;
|
of->ost_index = nb_output_streams;
|
||||||
of->recording_time = o->recording_time;
|
of->recording_time = o->recording_time;
|
||||||
@ -2305,7 +2206,13 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
|
|
||||||
oc->interrupt_callback = int_cb;
|
oc->interrupt_callback = int_cb;
|
||||||
|
|
||||||
|
e = av_dict_get(o->g->format_opts, "fflags", NULL, 0);
|
||||||
|
if (e) {
|
||||||
|
const AVOption *o = av_opt_find(oc, "fflags", NULL, 0, 0);
|
||||||
|
av_opt_eval_flags(oc, o, e->value, &format_flags);
|
||||||
|
}
|
||||||
if (o->bitexact) {
|
if (o->bitexact) {
|
||||||
|
format_flags |= AVFMT_FLAG_BITEXACT;
|
||||||
oc->flags |= AVFMT_FLAG_BITEXACT;
|
oc->flags |= AVFMT_FLAG_BITEXACT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2335,35 +2242,23 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||||
int best_score = 0, idx = -1;
|
int best_score = 0, idx = -1;
|
||||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||||
for (j = 0; j < nb_input_files; j++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
InputFile *ifile = input_files[j];
|
int score;
|
||||||
int file_best_score = 0, file_best_idx = -1;
|
ist = input_streams[i];
|
||||||
for (i = 0; i < ifile->nb_streams; i++) {
|
score = ist->st->codecpar->width * ist->st->codecpar->height
|
||||||
int score;
|
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
|
||||||
ist = input_streams[ifile->ist_index + i];
|
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
||||||
score = ist->st->codecpar->width * ist->st->codecpar->height
|
if (ist->user_set_discard == AVDISCARD_ALL)
|
||||||
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
|
continue;
|
||||||
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||||
if (ist->user_set_discard == AVDISCARD_ALL)
|
score = 1;
|
||||||
|
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||||
|
score > best_score) {
|
||||||
|
if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||||
continue;
|
continue;
|
||||||
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
best_score = score;
|
||||||
score = 1;
|
idx = i;
|
||||||
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
|
|
||||||
score > file_best_score) {
|
|
||||||
if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
|
||||||
continue;
|
|
||||||
file_best_score = score;
|
|
||||||
file_best_idx = ifile->ist_index + i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (file_best_idx >= 0) {
|
|
||||||
if((qcr == MKTAG('A', 'P', 'I', 'C')) || !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
|
||||||
file_best_score -= 5000000*!!(input_streams[file_best_idx]->st->disposition & AV_DISPOSITION_DEFAULT);
|
|
||||||
if (file_best_score > best_score) {
|
|
||||||
best_score = file_best_score;
|
|
||||||
idx = file_best_idx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (idx >= 0)
|
if (idx >= 0)
|
||||||
new_video_stream(o, oc, idx);
|
new_video_stream(o, oc, idx);
|
||||||
@ -2372,30 +2267,18 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
/* audio: most channels */
|
/* audio: most channels */
|
||||||
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||||
int best_score = 0, idx = -1;
|
int best_score = 0, idx = -1;
|
||||||
for (j = 0; j < nb_input_files; j++) {
|
for (i = 0; i < nb_input_streams; i++) {
|
||||||
InputFile *ifile = input_files[j];
|
int score;
|
||||||
int file_best_score = 0, file_best_idx = -1;
|
ist = input_streams[i];
|
||||||
for (i = 0; i < ifile->nb_streams; i++) {
|
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames
|
||||||
int score;
|
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
||||||
ist = input_streams[ifile->ist_index + i];
|
if (ist->user_set_discard == AVDISCARD_ALL)
|
||||||
score = ist->st->codecpar->channels
|
continue;
|
||||||
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
|
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
|
||||||
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
score > best_score) {
|
||||||
if (ist->user_set_discard == AVDISCARD_ALL)
|
best_score = score;
|
||||||
continue;
|
idx = i;
|
||||||
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
|
|
||||||
score > file_best_score) {
|
|
||||||
file_best_score = score;
|
|
||||||
file_best_idx = ifile->ist_index + i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (file_best_idx >= 0) {
|
|
||||||
file_best_score -= 5000000*!!(input_streams[file_best_idx]->st->disposition & AV_DISPOSITION_DEFAULT);
|
|
||||||
if (file_best_score > best_score) {
|
|
||||||
best_score = file_best_score;
|
|
||||||
idx = file_best_idx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (idx >= 0)
|
if (idx >= 0)
|
||||||
new_audio_stream(o, oc, idx);
|
new_audio_stream(o, oc, idx);
|
||||||
@ -2557,6 +2440,19 @@ loop_end:
|
|||||||
avio_closep(&pb);
|
avio_closep(&pb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_LAVF_AVCTX
|
||||||
|
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
||||||
|
AVDictionaryEntry *e;
|
||||||
|
ost = output_streams[i];
|
||||||
|
|
||||||
|
if ((ost->stream_copy || ost->attachment_filename)
|
||||||
|
&& (e = av_dict_get(o->g->codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
|
||||||
|
&& (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
|
||||||
|
if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
|
||||||
|
exit_program(1);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
|
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
|
||||||
av_dump_format(oc, nb_output_files - 1, oc->url, 1);
|
av_dump_format(oc, nb_output_files - 1, oc->url, 1);
|
||||||
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);
|
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);
|
||||||
@ -2629,6 +2525,7 @@ loop_end:
|
|||||||
/* set the filter output constraints */
|
/* set the filter output constraints */
|
||||||
if (ost->filter) {
|
if (ost->filter) {
|
||||||
OutputFilter *f = ost->filter;
|
OutputFilter *f = ost->filter;
|
||||||
|
int count;
|
||||||
switch (ost->enc_ctx->codec_type) {
|
switch (ost->enc_ctx->codec_type) {
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
f->frame_rate = ost->frame_rate;
|
f->frame_rate = ost->frame_rate;
|
||||||
@ -2636,25 +2533,51 @@ loop_end:
|
|||||||
f->height = ost->enc_ctx->height;
|
f->height = ost->enc_ctx->height;
|
||||||
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
|
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
|
||||||
f->format = ost->enc_ctx->pix_fmt;
|
f->format = ost->enc_ctx->pix_fmt;
|
||||||
} else {
|
} else if (ost->enc->pix_fmts) {
|
||||||
f->formats = ost->enc->pix_fmts;
|
count = 0;
|
||||||
|
while (ost->enc->pix_fmts[count] != AV_PIX_FMT_NONE)
|
||||||
|
count++;
|
||||||
|
f->formats = av_mallocz_array(count + 1, sizeof(*f->formats));
|
||||||
|
if (!f->formats)
|
||||||
|
exit_program(1);
|
||||||
|
memcpy(f->formats, ost->enc->pix_fmts, (count + 1) * sizeof(*f->formats));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
|
if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
|
||||||
f->format = ost->enc_ctx->sample_fmt;
|
f->format = ost->enc_ctx->sample_fmt;
|
||||||
} else {
|
} else if (ost->enc->sample_fmts) {
|
||||||
f->formats = ost->enc->sample_fmts;
|
count = 0;
|
||||||
|
while (ost->enc->sample_fmts[count] != AV_SAMPLE_FMT_NONE)
|
||||||
|
count++;
|
||||||
|
f->formats = av_mallocz_array(count + 1, sizeof(*f->formats));
|
||||||
|
if (!f->formats)
|
||||||
|
exit_program(1);
|
||||||
|
memcpy(f->formats, ost->enc->sample_fmts, (count + 1) * sizeof(*f->formats));
|
||||||
}
|
}
|
||||||
if (ost->enc_ctx->sample_rate) {
|
if (ost->enc_ctx->sample_rate) {
|
||||||
f->sample_rate = ost->enc_ctx->sample_rate;
|
f->sample_rate = ost->enc_ctx->sample_rate;
|
||||||
} else {
|
} else if (ost->enc->supported_samplerates) {
|
||||||
f->sample_rates = ost->enc->supported_samplerates;
|
count = 0;
|
||||||
|
while (ost->enc->supported_samplerates[count])
|
||||||
|
count++;
|
||||||
|
f->sample_rates = av_mallocz_array(count + 1, sizeof(*f->sample_rates));
|
||||||
|
if (!f->sample_rates)
|
||||||
|
exit_program(1);
|
||||||
|
memcpy(f->sample_rates, ost->enc->supported_samplerates,
|
||||||
|
(count + 1) * sizeof(*f->sample_rates));
|
||||||
}
|
}
|
||||||
if (ost->enc_ctx->channels) {
|
if (ost->enc_ctx->channels) {
|
||||||
f->channel_layout = av_get_default_channel_layout(ost->enc_ctx->channels);
|
f->channel_layout = av_get_default_channel_layout(ost->enc_ctx->channels);
|
||||||
} else {
|
} else if (ost->enc->channel_layouts) {
|
||||||
f->channel_layouts = ost->enc->channel_layouts;
|
count = 0;
|
||||||
|
while (ost->enc->channel_layouts[count])
|
||||||
|
count++;
|
||||||
|
f->channel_layouts = av_mallocz_array(count + 1, sizeof(*f->channel_layouts));
|
||||||
|
if (!f->channel_layouts)
|
||||||
|
exit_program(1);
|
||||||
|
memcpy(f->channel_layouts, ost->enc->channel_layouts,
|
||||||
|
(count + 1) * sizeof(*f->channel_layouts));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2878,12 +2801,6 @@ loop_end:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = set_dispositions(of);
|
|
||||||
if (err < 0) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL, "Error setting output stream dispositions\n");
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3215,11 +3132,8 @@ static int opt_vsync(void *optctx, const char *opt, const char *arg)
|
|||||||
else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
|
else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
|
||||||
else if (!av_strcasecmp(arg, "drop")) video_sync_method = VSYNC_DROP;
|
else if (!av_strcasecmp(arg, "drop")) video_sync_method = VSYNC_DROP;
|
||||||
|
|
||||||
if (video_sync_method == VSYNC_AUTO) {
|
if (video_sync_method == VSYNC_AUTO)
|
||||||
video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
|
video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
|
||||||
av_log(NULL, AV_LOG_WARNING, "Passing a number to -vsync is deprecated,"
|
|
||||||
" use a string argument as described in the manual.\n");
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3281,12 +3195,12 @@ static int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
|
|||||||
|
|
||||||
static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
FilterGraph *fg;
|
GROW_ARRAY(filtergraphs, nb_filtergraphs);
|
||||||
ALLOC_ARRAY_ELEM(filtergraphs, nb_filtergraphs);
|
if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
|
||||||
fg = filtergraphs[nb_filtergraphs - 1];
|
return AVERROR(ENOMEM);
|
||||||
fg->index = nb_filtergraphs - 1;
|
filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
|
||||||
fg->graph_desc = av_strdup(arg);
|
filtergraphs[nb_filtergraphs - 1]->graph_desc = av_strdup(arg);
|
||||||
if (!fg->graph_desc)
|
if (!filtergraphs[nb_filtergraphs - 1]->graph_desc)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
input_stream_potentially_available = 1;
|
input_stream_potentially_available = 1;
|
||||||
@ -3296,14 +3210,15 @@ static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
|||||||
|
|
||||||
static int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
|
static int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
FilterGraph *fg;
|
uint8_t *graph_desc = read_file(arg);
|
||||||
char *graph_desc = read_file(arg);
|
|
||||||
if (!graph_desc)
|
if (!graph_desc)
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
fg = ALLOC_ARRAY_ELEM(filtergraphs, nb_filtergraphs);
|
GROW_ARRAY(filtergraphs, nb_filtergraphs);
|
||||||
fg->index = nb_filtergraphs - 1;
|
if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
|
||||||
fg->graph_desc = graph_desc;
|
return AVERROR(ENOMEM);
|
||||||
|
filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
|
||||||
|
filtergraphs[nb_filtergraphs - 1]->graph_desc = graph_desc;
|
||||||
|
|
||||||
input_stream_potentially_available = 1;
|
input_stream_potentially_available = 1;
|
||||||
|
|
||||||
@ -3523,8 +3438,6 @@ const OptionDef options[] = {
|
|||||||
"Ignore unknown stream types" },
|
"Ignore unknown stream types" },
|
||||||
{ "copy_unknown", OPT_BOOL | OPT_EXPERT, { ©_unknown_streams },
|
{ "copy_unknown", OPT_BOOL | OPT_EXPERT, { ©_unknown_streams },
|
||||||
"Copy unknown stream types" },
|
"Copy unknown stream types" },
|
||||||
{ "recast_media", OPT_BOOL | OPT_EXPERT, { &recast_media },
|
|
||||||
"allow recasting stream type in order to force a decoder of different media type" },
|
|
||||||
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC |
|
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
|
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
|
||||||
"codec name", "codec" },
|
"codec name", "codec" },
|
||||||
@ -3598,10 +3511,7 @@ const OptionDef options[] = {
|
|||||||
"when dumping packets, also dump the payload" },
|
"when dumping packets, also dump the payload" },
|
||||||
{ "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
{ "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
||||||
OPT_INPUT, { .off = OFFSET(rate_emu) },
|
OPT_INPUT, { .off = OFFSET(rate_emu) },
|
||||||
"read input at native frame rate; equivalent to -readrate 1", "" },
|
"read input at native frame rate", "" },
|
||||||
{ "readrate", HAS_ARG | OPT_FLOAT | OPT_OFFSET |
|
|
||||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(readrate) },
|
|
||||||
"read input at specified rate", "speed" },
|
|
||||||
{ "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
|
{ "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
|
||||||
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
|
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
|
||||||
"with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
|
"with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
|
||||||
@ -3656,7 +3566,7 @@ const OptionDef options[] = {
|
|||||||
"set profile", "profile" },
|
"set profile", "profile" },
|
||||||
{ "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
|
{ "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
|
||||||
"set stream filtergraph", "filter_graph" },
|
"set stream filtergraph", "filter_graph" },
|
||||||
{ "filter_threads", HAS_ARG, { .func_arg = opt_filter_threads },
|
{ "filter_threads", HAS_ARG | OPT_INT, { &filter_nbthreads },
|
||||||
"number of non-complex filter threads" },
|
"number of non-complex filter threads" },
|
||||||
{ "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
|
{ "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
|
||||||
"read stream filtergraph description from a file", "filename" },
|
"read stream filtergraph description from a file", "filename" },
|
||||||
@ -3699,9 +3609,6 @@ const OptionDef options[] = {
|
|||||||
"set the maximum number of queued packets from the demuxer" },
|
"set the maximum number of queued packets from the demuxer" },
|
||||||
{ "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
{ "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||||
"read and decode the streams to fill missing information with heuristics" },
|
"read and decode the streams to fill missing information with heuristics" },
|
||||||
{ "bits_per_raw_sample", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT,
|
|
||||||
{ .off = OFFSET(bits_per_raw_sample) },
|
|
||||||
"set the number of bits per raw sample", "number" },
|
|
||||||
|
|
||||||
/* video options */
|
/* video options */
|
||||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||||
@ -3721,6 +3628,10 @@ const OptionDef options[] = {
|
|||||||
{ "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
|
{ "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
|
||||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
|
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
|
||||||
"set pixel format", "format" },
|
"set pixel format", "format" },
|
||||||
|
{ "bits_per_raw_sample", OPT_VIDEO | OPT_INT | HAS_ARG, { &frame_bits_per_raw_sample },
|
||||||
|
"set the number of bits per raw sample", "number" },
|
||||||
|
{ "intra", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &intra_only },
|
||||||
|
"deprecated use -g 1" },
|
||||||
{ "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
|
{ "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
|
||||||
"disable video" },
|
"disable video" },
|
||||||
{ "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
|
{ "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
|
||||||
@ -3729,6 +3640,10 @@ const OptionDef options[] = {
|
|||||||
{ "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
|
{ "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
|
||||||
OPT_OUTPUT, { .func_arg = opt_video_codec },
|
OPT_OUTPUT, { .func_arg = opt_video_codec },
|
||||||
"force video codec ('copy' to copy stream)", "codec" },
|
"force video codec ('copy' to copy stream)", "codec" },
|
||||||
|
{ "sameq", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
|
||||||
|
"Removed" },
|
||||||
|
{ "same_quant", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
|
||||||
|
"Removed" },
|
||||||
{ "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
|
{ "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
|
||||||
"set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
|
"set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
|
||||||
{ "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
|
{ "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
|
||||||
@ -3736,6 +3651,8 @@ const OptionDef options[] = {
|
|||||||
{ "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
|
{ "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
|
||||||
OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
|
OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
|
||||||
"select two pass log file name prefix", "prefix" },
|
"select two pass log file name prefix", "prefix" },
|
||||||
|
{ "deinterlace", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_deinterlace },
|
||||||
|
"this option is deprecated, use the yadif filter instead" },
|
||||||
{ "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
|
{ "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
|
||||||
"calculate PSNR of compressed frames" },
|
"calculate PSNR of compressed frames" },
|
||||||
{ "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
|
{ "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
|
||||||
@ -3785,6 +3702,9 @@ const OptionDef options[] = {
|
|||||||
{ "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
{ "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||||
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
|
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
|
||||||
"select output format used with HW accelerated decoding", "format" },
|
"select output format used with HW accelerated decoding", "format" },
|
||||||
|
#if CONFIG_VIDEOTOOLBOX
|
||||||
|
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
|
||||||
|
#endif
|
||||||
{ "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
|
{ "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
|
||||||
"show available HW acceleration methods" },
|
"show available HW acceleration methods" },
|
||||||
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
|
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
|
||||||
@ -3838,6 +3758,13 @@ const OptionDef options[] = {
|
|||||||
{ "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
|
{ "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
|
||||||
"set canvas size (WxH or abbreviation)", "size" },
|
"set canvas size (WxH or abbreviation)", "size" },
|
||||||
|
|
||||||
|
/* grab options */
|
||||||
|
{ "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_channel },
|
||||||
|
"deprecated, use -channel", "channel" },
|
||||||
|
{ "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_standard },
|
||||||
|
"deprecated, use -standard", "standard" },
|
||||||
|
{ "isync", OPT_BOOL | OPT_EXPERT, { &input_sync }, "this option is deprecated and does nothing", "" },
|
||||||
|
|
||||||
/* muxer options */
|
/* muxer options */
|
||||||
{ "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
|
{ "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
|
||||||
"set the maximum demux-decode delay", "seconds" },
|
"set the maximum demux-decode delay", "seconds" },
|
||||||
@ -3887,7 +3814,7 @@ const OptionDef options[] = {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CONFIG_QSV
|
#if CONFIG_QSV
|
||||||
{ "qsv_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_qsv_device },
|
{ "qsv_device", HAS_ARG | OPT_STRING | OPT_EXPERT, { &qsv_device },
|
||||||
"set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
|
"set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/eval.h"
|
#include "libavutil/eval.h"
|
||||||
#include "libavutil/mathematics.h"
|
#include "libavutil/mathematics.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
@ -40,6 +39,7 @@
|
|||||||
#include "libavutil/fifo.h"
|
#include "libavutil/fifo.h"
|
||||||
#include "libavutil/parseutils.h"
|
#include "libavutil/parseutils.h"
|
||||||
#include "libavutil/samplefmt.h"
|
#include "libavutil/samplefmt.h"
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/time.h"
|
#include "libavutil/time.h"
|
||||||
#include "libavutil/bprint.h"
|
#include "libavutil/bprint.h"
|
||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
@ -60,6 +60,8 @@
|
|||||||
|
|
||||||
#include "cmdutils.h"
|
#include "cmdutils.h"
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
const char program_name[] = "ffplay";
|
const char program_name[] = "ffplay";
|
||||||
const int program_birth_year = 2003;
|
const int program_birth_year = 2003;
|
||||||
|
|
||||||
@ -201,7 +203,7 @@ typedef struct Decoder {
|
|||||||
|
|
||||||
typedef struct VideoState {
|
typedef struct VideoState {
|
||||||
SDL_Thread *read_tid;
|
SDL_Thread *read_tid;
|
||||||
const AVInputFormat *iformat;
|
AVInputFormat *iformat;
|
||||||
int abort_request;
|
int abort_request;
|
||||||
int force_refresh;
|
int force_refresh;
|
||||||
int paused;
|
int paused;
|
||||||
@ -306,7 +308,7 @@ typedef struct VideoState {
|
|||||||
} VideoState;
|
} VideoState;
|
||||||
|
|
||||||
/* options specified by the user */
|
/* options specified by the user */
|
||||||
static const AVInputFormat *file_iformat;
|
static AVInputFormat *file_iformat;
|
||||||
static const char *input_filename;
|
static const char *input_filename;
|
||||||
static const char *window_title;
|
static const char *window_title;
|
||||||
static int default_width = 640;
|
static int default_width = 640;
|
||||||
@ -963,10 +965,10 @@ static void set_sdl_yuv_conversion_mode(AVFrame *frame)
|
|||||||
mode = SDL_YUV_CONVERSION_JPEG;
|
mode = SDL_YUV_CONVERSION_JPEG;
|
||||||
else if (frame->colorspace == AVCOL_SPC_BT709)
|
else if (frame->colorspace == AVCOL_SPC_BT709)
|
||||||
mode = SDL_YUV_CONVERSION_BT709;
|
mode = SDL_YUV_CONVERSION_BT709;
|
||||||
else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
|
else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
|
||||||
mode = SDL_YUV_CONVERSION_BT601;
|
mode = SDL_YUV_CONVERSION_BT601;
|
||||||
}
|
}
|
||||||
SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
|
SDL_SetYUVConversionMode(mode);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1856,7 +1858,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
|||||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
||||||
AVCodecParameters *codecpar = is->video_st->codecpar;
|
AVCodecParameters *codecpar = is->video_st->codecpar;
|
||||||
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
int nb_pix_fmts = 0;
|
int nb_pix_fmts = 0;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
@ -1925,8 +1927,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
if (autorotate) {
|
if (autorotate) {
|
||||||
int32_t *displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
|
double theta = get_rotation(is->video_st);
|
||||||
double theta = get_rotation(displaymatrix);
|
|
||||||
|
|
||||||
if (fabs(theta - 90) < 1.0) {
|
if (fabs(theta - 90) < 1.0) {
|
||||||
INSERT_FILT("transpose", "clock");
|
INSERT_FILT("transpose", "clock");
|
||||||
@ -1960,7 +1961,7 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
|
|||||||
int channels[2] = { 0, -1 };
|
int channels[2] = { 0, -1 };
|
||||||
AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
|
AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
|
||||||
char aresample_swr_opts[512] = "";
|
char aresample_swr_opts[512] = "";
|
||||||
const AVDictionaryEntry *e = NULL;
|
AVDictionaryEntry *e = NULL;
|
||||||
char asrc_args[256];
|
char asrc_args[256];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2575,7 +2576,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
|||||||
const AVCodec *codec;
|
const AVCodec *codec;
|
||||||
const char *forced_codec_name = NULL;
|
const char *forced_codec_name = NULL;
|
||||||
AVDictionary *opts = NULL;
|
AVDictionary *opts = NULL;
|
||||||
const AVDictionaryEntry *t = NULL;
|
AVDictionaryEntry *t = NULL;
|
||||||
int sample_rate, nb_channels;
|
int sample_rate, nb_channels;
|
||||||
int64_t channel_layout;
|
int64_t channel_layout;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -2760,7 +2761,7 @@ static int read_thread(void *arg)
|
|||||||
AVPacket *pkt = NULL;
|
AVPacket *pkt = NULL;
|
||||||
int64_t stream_start_time;
|
int64_t stream_start_time;
|
||||||
int pkt_in_play_range = 0;
|
int pkt_in_play_range = 0;
|
||||||
const AVDictionaryEntry *t;
|
AVDictionaryEntry *t;
|
||||||
SDL_mutex *wait_mutex = SDL_CreateMutex();
|
SDL_mutex *wait_mutex = SDL_CreateMutex();
|
||||||
int scan_all_pmts_set = 0;
|
int scan_all_pmts_set = 0;
|
||||||
int64_t pkt_ts;
|
int64_t pkt_ts;
|
||||||
@ -3074,8 +3075,7 @@ static int read_thread(void *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static VideoState *stream_open(const char *filename,
|
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
|
||||||
const AVInputFormat *iformat)
|
|
||||||
{
|
{
|
||||||
VideoState *is;
|
VideoState *is;
|
||||||
|
|
||||||
@ -3696,6 +3696,8 @@ int main(int argc, char **argv)
|
|||||||
#endif
|
#endif
|
||||||
avformat_network_init();
|
avformat_network_init();
|
||||||
|
|
||||||
|
init_opts();
|
||||||
|
|
||||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||||
|
|
||||||
@ -3746,10 +3748,6 @@ int main(int argc, char **argv)
|
|||||||
flags |= SDL_WINDOW_BORDERLESS;
|
flags |= SDL_WINDOW_BORDERLESS;
|
||||||
else
|
else
|
||||||
flags |= SDL_WINDOW_RESIZABLE;
|
flags |= SDL_WINDOW_RESIZABLE;
|
||||||
|
|
||||||
#ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
|
|
||||||
SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
|
|
||||||
#endif
|
|
||||||
window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
|
window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
|
||||||
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
|
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
|
||||||
if (window) {
|
if (window) {
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/bprint.h"
|
#include "libavutil/bprint.h"
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/display.h"
|
#include "libavutil/display.h"
|
||||||
#include "libavutil/hash.h"
|
#include "libavutil/hash.h"
|
||||||
#include "libavutil/hdr_dynamic_metadata.h"
|
#include "libavutil/hdr_dynamic_metadata.h"
|
||||||
@ -118,11 +117,6 @@ static int use_byte_value_binary_prefix = 0;
|
|||||||
static int use_value_sexagesimal_format = 0;
|
static int use_value_sexagesimal_format = 0;
|
||||||
static int show_private_data = 1;
|
static int show_private_data = 1;
|
||||||
|
|
||||||
#define SHOW_OPTIONAL_FIELDS_AUTO -1
|
|
||||||
#define SHOW_OPTIONAL_FIELDS_NEVER 0
|
|
||||||
#define SHOW_OPTIONAL_FIELDS_ALWAYS 1
|
|
||||||
static int show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
|
|
||||||
|
|
||||||
static char *print_format;
|
static char *print_format;
|
||||||
static char *stream_specifier;
|
static char *stream_specifier;
|
||||||
static char *show_data_hash;
|
static char *show_data_hash;
|
||||||
@ -175,10 +169,6 @@ typedef enum {
|
|||||||
SECTION_ID_FRAME_SIDE_DATA,
|
SECTION_ID_FRAME_SIDE_DATA,
|
||||||
SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST,
|
SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST,
|
||||||
SECTION_ID_FRAME_SIDE_DATA_TIMECODE,
|
SECTION_ID_FRAME_SIDE_DATA_TIMECODE,
|
||||||
SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST,
|
|
||||||
SECTION_ID_FRAME_SIDE_DATA_COMPONENT,
|
|
||||||
SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST,
|
|
||||||
SECTION_ID_FRAME_SIDE_DATA_PIECE,
|
|
||||||
SECTION_ID_FRAME_LOG,
|
SECTION_ID_FRAME_LOG,
|
||||||
SECTION_ID_FRAME_LOGS,
|
SECTION_ID_FRAME_LOGS,
|
||||||
SECTION_ID_LIBRARY_VERSION,
|
SECTION_ID_LIBRARY_VERSION,
|
||||||
@ -223,13 +213,9 @@ static struct section sections[] = {
|
|||||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } },
|
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } },
|
||||||
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
|
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" },
|
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" },
|
||||||
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST, -1 } },
|
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, -1 } },
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
|
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
|
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST] = { SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST, "components", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_COMPONENT, -1 } },
|
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_COMPONENT] = { SECTION_ID_FRAME_SIDE_DATA_COMPONENT, "component", 0, { SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST, -1 } },
|
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST, "pieces", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_PIECE, -1 } },
|
|
||||||
[SECTION_ID_FRAME_SIDE_DATA_PIECE] = { SECTION_ID_FRAME_SIDE_DATA_PIECE, "section", 0, { -1 } },
|
|
||||||
[SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } },
|
[SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } },
|
||||||
[SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, },
|
[SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, },
|
||||||
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
|
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
|
||||||
@ -271,7 +257,7 @@ static const OptionDef *options;
|
|||||||
/* FFprobe context */
|
/* FFprobe context */
|
||||||
static const char *input_filename;
|
static const char *input_filename;
|
||||||
static const char *print_input_filename;
|
static const char *print_input_filename;
|
||||||
static const AVInputFormat *iformat = NULL;
|
static AVInputFormat *iformat = NULL;
|
||||||
|
|
||||||
static struct AVHashContext *hash;
|
static struct AVHashContext *hash;
|
||||||
|
|
||||||
@ -588,7 +574,7 @@ static int writer_open(WriterContext **wctx, const Writer *writer, const char *a
|
|||||||
/* convert options to dictionary */
|
/* convert options to dictionary */
|
||||||
if (args) {
|
if (args) {
|
||||||
AVDictionary *opts = NULL;
|
AVDictionary *opts = NULL;
|
||||||
const AVDictionaryEntry *opt = NULL;
|
AVDictionaryEntry *opt = NULL;
|
||||||
|
|
||||||
if ((ret = av_dict_parse_string(&opts, args, "=", ":", 0)) < 0) {
|
if ((ret = av_dict_parse_string(&opts, args, "=", ":", 0)) < 0) {
|
||||||
av_log(*wctx, AV_LOG_ERROR, "Failed to parse option string '%s' provided to writer context\n", args);
|
av_log(*wctx, AV_LOG_ERROR, "Failed to parse option string '%s' provided to writer context\n", args);
|
||||||
@ -759,10 +745,8 @@ static inline int writer_print_string(WriterContext *wctx,
|
|||||||
const struct section *section = wctx->section[wctx->level];
|
const struct section *section = wctx->section[wctx->level];
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_NEVER ||
|
if ((flags & PRINT_STRING_OPT)
|
||||||
(show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO
|
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
|
||||||
&& (flags & PRINT_STRING_OPT)
|
|
||||||
&& !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS)))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
|
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
|
||||||
@ -1159,10 +1143,8 @@ static void compact_print_section_header(WriterContext *wctx)
|
|||||||
if (parent_section && compact->has_nested_elems[wctx->level-1] &&
|
if (parent_section && compact->has_nested_elems[wctx->level-1] &&
|
||||||
(section->flags & SECTION_FLAG_IS_ARRAY)) {
|
(section->flags & SECTION_FLAG_IS_ARRAY)) {
|
||||||
compact->terminate_line[wctx->level-1] = 0;
|
compact->terminate_line[wctx->level-1] = 0;
|
||||||
|
printf("\n");
|
||||||
}
|
}
|
||||||
if (parent_section && !(parent_section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)) &&
|
|
||||||
wctx->level && wctx->nb_item[wctx->level-1])
|
|
||||||
printf("%c", compact->item_sep);
|
|
||||||
if (compact->print_section &&
|
if (compact->print_section &&
|
||||||
!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
|
!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
|
||||||
printf("%s%c", section->name, compact->item_sep);
|
printf("%s%c", section->name, compact->item_sep);
|
||||||
@ -1678,6 +1660,13 @@ static av_cold int xml_init(WriterContext *wctx)
|
|||||||
CHECK_COMPLIANCE(show_private_data, "private");
|
CHECK_COMPLIANCE(show_private_data, "private");
|
||||||
CHECK_COMPLIANCE(show_value_unit, "unit");
|
CHECK_COMPLIANCE(show_value_unit, "unit");
|
||||||
CHECK_COMPLIANCE(use_value_prefix, "prefix");
|
CHECK_COMPLIANCE(use_value_prefix, "prefix");
|
||||||
|
|
||||||
|
if (do_show_frames && do_show_packets) {
|
||||||
|
av_log(wctx, AV_LOG_ERROR,
|
||||||
|
"Interleaved frames and packets are not allowed in XSD. "
|
||||||
|
"Select only one between the -show_frames and the -show_packets options.\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1693,9 +1682,9 @@ static void xml_print_section_header(WriterContext *wctx)
|
|||||||
wctx->section[wctx->level-1] : NULL;
|
wctx->section[wctx->level-1] : NULL;
|
||||||
|
|
||||||
if (wctx->level == 0) {
|
if (wctx->level == 0) {
|
||||||
const char *qual = " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
|
const char *qual = " xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' "
|
||||||
"xmlns:ffprobe=\"http://www.ffmpeg.org/schema/ffprobe\" "
|
"xmlns:ffprobe='http://www.ffmpeg.org/schema/ffprobe' "
|
||||||
"xsi:schemaLocation=\"http://www.ffmpeg.org/schema/ffprobe ffprobe.xsd\"";
|
"xsi:schemaLocation='http://www.ffmpeg.org/schema/ffprobe ffprobe.xsd'";
|
||||||
|
|
||||||
printf("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
|
printf("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
|
||||||
printf("<%sffprobe%s>\n",
|
printf("<%sffprobe%s>\n",
|
||||||
@ -1817,16 +1806,6 @@ static void writer_register_all(void)
|
|||||||
writer_print_string(w, k, pbuf.str, 0); \
|
writer_print_string(w, k, pbuf.str, 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define print_list_fmt(k, f, n, ...) do { \
|
|
||||||
av_bprint_clear(&pbuf); \
|
|
||||||
for (int idx = 0; idx < n; idx++) { \
|
|
||||||
if (idx > 0) \
|
|
||||||
av_bprint_chars(&pbuf, ' ', 1); \
|
|
||||||
av_bprintf(&pbuf, f, __VA_ARGS__); \
|
|
||||||
} \
|
|
||||||
writer_print_string(w, k, pbuf.str, 0); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define print_int(k, v) writer_print_integer(w, k, v)
|
#define print_int(k, v) writer_print_integer(w, k, v)
|
||||||
#define print_q(k, v, s) writer_print_rational(w, k, v, s)
|
#define print_q(k, v, s) writer_print_rational(w, k, v, s)
|
||||||
#define print_str(k, v) writer_print_string(w, k, v, 0)
|
#define print_str(k, v) writer_print_string(w, k, v, 0)
|
||||||
@ -1856,7 +1835,7 @@ static void writer_register_all(void)
|
|||||||
|
|
||||||
static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id)
|
static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id)
|
||||||
{
|
{
|
||||||
const AVDictionaryEntry *tag = NULL;
|
AVDictionaryEntry *tag = NULL;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!tags)
|
if (!tags)
|
||||||
@ -1872,153 +1851,6 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
|
|
||||||
{
|
|
||||||
if (!dovi)
|
|
||||||
return;
|
|
||||||
|
|
||||||
{
|
|
||||||
const AVDOVIRpuDataHeader *hdr = av_dovi_get_header(dovi);
|
|
||||||
const AVDOVIDataMapping *mapping = av_dovi_get_mapping(dovi);
|
|
||||||
const AVDOVIColorMetadata *color = av_dovi_get_color(dovi);
|
|
||||||
AVBPrint pbuf;
|
|
||||||
|
|
||||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
|
||||||
|
|
||||||
// header
|
|
||||||
print_int("rpu_type", hdr->rpu_type);
|
|
||||||
print_int("rpu_format", hdr->rpu_format);
|
|
||||||
print_int("vdr_rpu_profile", hdr->vdr_rpu_profile);
|
|
||||||
print_int("vdr_rpu_level", hdr->vdr_rpu_level);
|
|
||||||
print_int("chroma_resampling_explicit_filter_flag",
|
|
||||||
hdr->chroma_resampling_explicit_filter_flag);
|
|
||||||
print_int("coef_data_type", hdr->coef_data_type);
|
|
||||||
print_int("coef_log2_denom", hdr->coef_log2_denom);
|
|
||||||
print_int("vdr_rpu_normalized_idc", hdr->vdr_rpu_normalized_idc);
|
|
||||||
print_int("bl_video_full_range_flag", hdr->bl_video_full_range_flag);
|
|
||||||
print_int("bl_bit_depth", hdr->bl_bit_depth);
|
|
||||||
print_int("el_bit_depth", hdr->el_bit_depth);
|
|
||||||
print_int("vdr_bit_depth", hdr->vdr_bit_depth);
|
|
||||||
print_int("spatial_resampling_filter_flag",
|
|
||||||
hdr->spatial_resampling_filter_flag);
|
|
||||||
print_int("el_spatial_resampling_filter_flag",
|
|
||||||
hdr->el_spatial_resampling_filter_flag);
|
|
||||||
print_int("disable_residual_flag", hdr->disable_residual_flag);
|
|
||||||
|
|
||||||
// data mapping values
|
|
||||||
print_int("vdr_rpu_id", mapping->vdr_rpu_id);
|
|
||||||
print_int("mapping_color_space", mapping->mapping_color_space);
|
|
||||||
print_int("mapping_chroma_format_idc",
|
|
||||||
mapping->mapping_chroma_format_idc);
|
|
||||||
|
|
||||||
print_int("nlq_method_idc", mapping->nlq_method_idc);
|
|
||||||
switch (mapping->nlq_method_idc) {
|
|
||||||
case AV_DOVI_NLQ_NONE:
|
|
||||||
print_str("nlq_method_idc_name", "none");
|
|
||||||
break;
|
|
||||||
case AV_DOVI_NLQ_LINEAR_DZ:
|
|
||||||
print_str("nlq_method_idc_name", "linear_dz");
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
print_str("nlq_method_idc_name", "unknown");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
print_int("num_x_partitions", mapping->num_x_partitions);
|
|
||||||
print_int("num_y_partitions", mapping->num_y_partitions);
|
|
||||||
|
|
||||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST);
|
|
||||||
|
|
||||||
for (int c = 0; c < 3; c++) {
|
|
||||||
const AVDOVIReshapingCurve *curve = &mapping->curves[c];
|
|
||||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_COMPONENT);
|
|
||||||
|
|
||||||
print_list_fmt("pivots", "%"PRIu16, curve->num_pivots, curve->pivots[idx]);
|
|
||||||
|
|
||||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST);
|
|
||||||
for (int i = 0; i < curve->num_pivots - 1; i++) {
|
|
||||||
|
|
||||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_PIECE);
|
|
||||||
print_int("mapping_idc", curve->mapping_idc[i]);
|
|
||||||
switch (curve->mapping_idc[i]) {
|
|
||||||
case AV_DOVI_MAPPING_POLYNOMIAL:
|
|
||||||
print_str("mapping_idc_name", "polynomial");
|
|
||||||
print_int("poly_order", curve->poly_order[i]);
|
|
||||||
print_list_fmt("poly_coef", "%"PRIi64,
|
|
||||||
curve->poly_order[i] + 1,
|
|
||||||
curve->poly_coef[i][idx]);
|
|
||||||
break;
|
|
||||||
case AV_DOVI_MAPPING_MMR:
|
|
||||||
print_str("mapping_idc_name", "mmr");
|
|
||||||
print_int("mmr_order", curve->mmr_order[i]);
|
|
||||||
print_int("mmr_constant", curve->mmr_constant[i]);
|
|
||||||
print_list_fmt("mmr_coef", "%"PRIi64,
|
|
||||||
curve->mmr_order[i] * 7,
|
|
||||||
curve->mmr_coef[i][0][idx]);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
print_str("mapping_idc_name", "unknown");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// SECTION_ID_FRAME_SIDE_DATA_PIECE
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
// SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
|
|
||||||
if (mapping->nlq_method_idc != AV_DOVI_NLQ_NONE) {
|
|
||||||
const AVDOVINLQParams *nlq = &mapping->nlq[c];
|
|
||||||
print_int("nlq_offset", nlq->nlq_offset);
|
|
||||||
print_int("vdr_in_max", nlq->vdr_in_max);
|
|
||||||
|
|
||||||
switch (mapping->nlq_method_idc) {
|
|
||||||
case AV_DOVI_NLQ_LINEAR_DZ:
|
|
||||||
print_int("linear_deadzone_slope", nlq->linear_deadzone_slope);
|
|
||||||
print_int("linear_deadzone_threshold", nlq->linear_deadzone_threshold);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SECTION_ID_FRAME_SIDE_DATA_COMPONENT
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
// SECTION_ID_FRAME_SIDE_DATA_COMPONENT_LIST
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
|
|
||||||
// color metadata
|
|
||||||
print_int("dm_metadata_id", color->dm_metadata_id);
|
|
||||||
print_int("scene_refresh_flag", color->scene_refresh_flag);
|
|
||||||
print_list_fmt("ycc_to_rgb_matrix", "%d/%d",
|
|
||||||
FF_ARRAY_ELEMS(color->ycc_to_rgb_matrix),
|
|
||||||
color->ycc_to_rgb_matrix[idx].num,
|
|
||||||
color->ycc_to_rgb_matrix[idx].den);
|
|
||||||
print_list_fmt("ycc_to_rgb_offset", "%d/%d",
|
|
||||||
FF_ARRAY_ELEMS(color->ycc_to_rgb_offset),
|
|
||||||
color->ycc_to_rgb_offset[idx].num,
|
|
||||||
color->ycc_to_rgb_offset[idx].den);
|
|
||||||
print_list_fmt("rgb_to_lms_matrix", "%d/%d",
|
|
||||||
FF_ARRAY_ELEMS(color->rgb_to_lms_matrix),
|
|
||||||
color->rgb_to_lms_matrix[idx].num,
|
|
||||||
color->rgb_to_lms_matrix[idx].den);
|
|
||||||
print_int("signal_eotf", color->signal_eotf);
|
|
||||||
print_int("signal_eotf_param0", color->signal_eotf_param0);
|
|
||||||
print_int("signal_eotf_param1", color->signal_eotf_param1);
|
|
||||||
print_int("signal_eotf_param2", color->signal_eotf_param2);
|
|
||||||
print_int("signal_bit_depth", color->signal_bit_depth);
|
|
||||||
print_int("signal_color_space", color->signal_color_space);
|
|
||||||
print_int("signal_chroma_format", color->signal_chroma_format);
|
|
||||||
print_int("signal_full_range_flag", color->signal_full_range_flag);
|
|
||||||
print_int("source_min_pq", color->source_min_pq);
|
|
||||||
print_int("source_max_pq", color->source_max_pq);
|
|
||||||
print_int("source_diagonal", color->source_diagonal);
|
|
||||||
|
|
||||||
av_bprint_finalize(&pbuf, NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void print_dynamic_hdr10_plus(WriterContext *w, const AVDynamicHDRPlus *metadata)
|
static void print_dynamic_hdr10_plus(WriterContext *w, const AVDynamicHDRPlus *metadata)
|
||||||
{
|
{
|
||||||
if (!metadata)
|
if (!metadata)
|
||||||
@ -2197,23 +2029,6 @@ static void print_pkt_side_data(WriterContext *w,
|
|||||||
print_int("el_present_flag", dovi->el_present_flag);
|
print_int("el_present_flag", dovi->el_present_flag);
|
||||||
print_int("bl_present_flag", dovi->bl_present_flag);
|
print_int("bl_present_flag", dovi->bl_present_flag);
|
||||||
print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id);
|
print_int("dv_bl_signal_compatibility_id", dovi->dv_bl_signal_compatibility_id);
|
||||||
} else if (sd->type == AV_PKT_DATA_AUDIO_SERVICE_TYPE) {
|
|
||||||
enum AVAudioServiceType *t = (enum AVAudioServiceType *)sd->data;
|
|
||||||
print_int("service_type", *t);
|
|
||||||
} else if (sd->type == AV_PKT_DATA_MPEGTS_STREAM_ID) {
|
|
||||||
print_int("id", *sd->data);
|
|
||||||
} else if (sd->type == AV_PKT_DATA_CPB_PROPERTIES) {
|
|
||||||
const AVCPBProperties *prop = (AVCPBProperties *)sd->data;
|
|
||||||
print_int("max_bitrate", prop->max_bitrate);
|
|
||||||
print_int("min_bitrate", prop->min_bitrate);
|
|
||||||
print_int("avg_bitrate", prop->avg_bitrate);
|
|
||||||
print_int("buffer_size", prop->buffer_size);
|
|
||||||
print_int("vbv_delay", prop->vbv_delay);
|
|
||||||
} else if (sd->type == AV_PKT_DATA_WEBVTT_IDENTIFIER ||
|
|
||||||
sd->type == AV_PKT_DATA_WEBVTT_SETTINGS) {
|
|
||||||
if (do_show_data)
|
|
||||||
writer_print_data(w, "data", sd->data, sd->size);
|
|
||||||
writer_print_data_hash(w, "data_hash", sd->data, sd->size);
|
|
||||||
}
|
}
|
||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
@ -2350,7 +2165,7 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
|
|||||||
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
|
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
|
||||||
|
|
||||||
if (pkt->side_data_elems) {
|
if (pkt->side_data_elems) {
|
||||||
size_t size;
|
int size;
|
||||||
const uint8_t *side_metadata;
|
const uint8_t *side_metadata;
|
||||||
|
|
||||||
side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
|
side_metadata = av_packet_get_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, &size);
|
||||||
@ -2415,8 +2230,8 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
|||||||
else print_str_opt("media_type", "unknown");
|
else print_str_opt("media_type", "unknown");
|
||||||
print_int("stream_index", stream->index);
|
print_int("stream_index", stream->index);
|
||||||
print_int("key_frame", frame->key_frame);
|
print_int("key_frame", frame->key_frame);
|
||||||
print_ts ("pts", frame->pts);
|
print_ts ("pkt_pts", frame->pts);
|
||||||
print_time("pts_time", frame->pts, &stream->time_base);
|
print_time("pkt_pts_time", frame->pts, &stream->time_base);
|
||||||
print_ts ("pkt_dts", frame->pkt_dts);
|
print_ts ("pkt_dts", frame->pkt_dts);
|
||||||
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
|
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
|
||||||
print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
|
print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
|
||||||
@ -2531,12 +2346,10 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
|||||||
print_int("max_content", metadata->MaxCLL);
|
print_int("max_content", metadata->MaxCLL);
|
||||||
print_int("max_average", metadata->MaxFALL);
|
print_int("max_average", metadata->MaxFALL);
|
||||||
} else if (sd->type == AV_FRAME_DATA_ICC_PROFILE) {
|
} else if (sd->type == AV_FRAME_DATA_ICC_PROFILE) {
|
||||||
const AVDictionaryEntry *tag = av_dict_get(sd->metadata, "name", NULL, AV_DICT_MATCH_CASE);
|
AVDictionaryEntry *tag = av_dict_get(sd->metadata, "name", NULL, AV_DICT_MATCH_CASE);
|
||||||
if (tag)
|
if (tag)
|
||||||
print_str(tag->key, tag->value);
|
print_str(tag->key, tag->value);
|
||||||
print_int("size", sd->size);
|
print_int("size", sd->size);
|
||||||
} else if (sd->type == AV_FRAME_DATA_DOVI_METADATA) {
|
|
||||||
print_dovi_metadata(w, (const AVDOVIMetadata *)sd->data);
|
|
||||||
}
|
}
|
||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
@ -2825,7 +2638,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
print_int("coded_width", dec_ctx->coded_width);
|
print_int("coded_width", dec_ctx->coded_width);
|
||||||
print_int("coded_height", dec_ctx->coded_height);
|
print_int("coded_height", dec_ctx->coded_height);
|
||||||
print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS));
|
print_int("closed_captions", !!(dec_ctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS));
|
||||||
print_int("film_grain", !!(dec_ctx->properties & FF_CODEC_PROPERTY_FILM_GRAIN));
|
|
||||||
}
|
}
|
||||||
print_int("has_b_frames", par->video_delay);
|
print_int("has_b_frames", par->video_delay);
|
||||||
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
|
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
|
||||||
@ -2902,7 +2714,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
const AVOption *opt = NULL;
|
const AVOption *opt = NULL;
|
||||||
while (opt = av_opt_next(dec_ctx->priv_data,opt)) {
|
while (opt = av_opt_next(dec_ctx->priv_data,opt)) {
|
||||||
uint8_t *str;
|
uint8_t *str;
|
||||||
if (!(opt->flags & AV_OPT_FLAG_EXPORT)) continue;
|
if (opt->flags) continue;
|
||||||
if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) {
|
if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) {
|
||||||
print_str(opt->name, str);
|
print_str(opt->name, str);
|
||||||
av_free(str);
|
av_free(str);
|
||||||
@ -2936,12 +2748,8 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
if (do_show_data)
|
if (do_show_data)
|
||||||
writer_print_data(w, "extradata", par->extradata,
|
writer_print_data(w, "extradata", par->extradata,
|
||||||
par->extradata_size);
|
par->extradata_size);
|
||||||
|
writer_print_data_hash(w, "extradata_hash", par->extradata,
|
||||||
if (par->extradata_size > 0) {
|
par->extradata_size);
|
||||||
print_int("extradata_size", par->extradata_size);
|
|
||||||
writer_print_data_hash(w, "extradata_hash", par->extradata,
|
|
||||||
par->extradata_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print disposition information */
|
/* Print disposition information */
|
||||||
#define PRINT_DISPOSITION(flagname, name) do { \
|
#define PRINT_DISPOSITION(flagname, name) do { \
|
||||||
@ -2962,11 +2770,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
|
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
|
||||||
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
|
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
|
||||||
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
|
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
|
||||||
PRINT_DISPOSITION(CAPTIONS, "captions");
|
|
||||||
PRINT_DISPOSITION(DESCRIPTIONS, "descriptions");
|
|
||||||
PRINT_DISPOSITION(METADATA, "metadata");
|
|
||||||
PRINT_DISPOSITION(DEPENDENT, "dependent");
|
|
||||||
PRINT_DISPOSITION(STILL_IMAGE, "still_image");
|
|
||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3014,6 +2817,10 @@ static int show_program(WriterContext *w, InputFile *ifile, AVProgram *program)
|
|||||||
print_int("nb_streams", program->nb_stream_indexes);
|
print_int("nb_streams", program->nb_stream_indexes);
|
||||||
print_int("pmt_pid", program->pmt_pid);
|
print_int("pmt_pid", program->pmt_pid);
|
||||||
print_int("pcr_pid", program->pcr_pid);
|
print_int("pcr_pid", program->pcr_pid);
|
||||||
|
print_ts("start_pts", program->start_time);
|
||||||
|
print_time("start_time", program->start_time, &AV_TIME_BASE_Q);
|
||||||
|
print_ts("end_pts", program->end_time);
|
||||||
|
print_time("end_time", program->end_time, &AV_TIME_BASE_Q);
|
||||||
if (do_show_program_tags)
|
if (do_show_program_tags)
|
||||||
ret = show_tags(w, program->metadata, SECTION_ID_PROGRAM_TAGS);
|
ret = show_tags(w, program->metadata, SECTION_ID_PROGRAM_TAGS);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -3127,7 +2934,7 @@ static int open_input_file(InputFile *ifile, const char *filename,
|
|||||||
{
|
{
|
||||||
int err, i;
|
int err, i;
|
||||||
AVFormatContext *fmt_ctx = NULL;
|
AVFormatContext *fmt_ctx = NULL;
|
||||||
const AVDictionaryEntry *t = NULL;
|
AVDictionaryEntry *t = NULL;
|
||||||
int scan_all_pmts_set = 0;
|
int scan_all_pmts_set = 0;
|
||||||
|
|
||||||
fmt_ctx = avformat_alloc_context();
|
fmt_ctx = avformat_alloc_context();
|
||||||
@ -3173,7 +2980,8 @@ static int open_input_file(InputFile *ifile, const char *filename,
|
|||||||
|
|
||||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||||
|
|
||||||
ifile->streams = av_calloc(fmt_ctx->nb_streams, sizeof(*ifile->streams));
|
ifile->streams = av_mallocz_array(fmt_ctx->nb_streams,
|
||||||
|
sizeof(*ifile->streams));
|
||||||
if (!ifile->streams)
|
if (!ifile->streams)
|
||||||
exit(1);
|
exit(1);
|
||||||
ifile->nb_streams = fmt_ctx->nb_streams;
|
ifile->nb_streams = fmt_ctx->nb_streams;
|
||||||
@ -3245,7 +3053,8 @@ static void close_input_file(InputFile *ifile)
|
|||||||
|
|
||||||
/* close decoder for each stream */
|
/* close decoder for each stream */
|
||||||
for (i = 0; i < ifile->nb_streams; i++)
|
for (i = 0; i < ifile->nb_streams; i++)
|
||||||
avcodec_free_context(&ifile->streams[i].dec_ctx);
|
if (ifile->streams[i].st->codecpar->codec_id != AV_CODEC_ID_NONE)
|
||||||
|
avcodec_free_context(&ifile->streams[i].dec_ctx);
|
||||||
|
|
||||||
av_freep(&ifile->streams);
|
av_freep(&ifile->streams);
|
||||||
ifile->nb_streams = 0;
|
ifile->nb_streams = 0;
|
||||||
@ -3437,17 +3246,6 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
|
|||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opt_show_optional_fields(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
if (!av_strcasecmp(arg, "always")) show_optional_fields = SHOW_OPTIONAL_FIELDS_ALWAYS;
|
|
||||||
else if (!av_strcasecmp(arg, "never")) show_optional_fields = SHOW_OPTIONAL_FIELDS_NEVER;
|
|
||||||
else if (!av_strcasecmp(arg, "auto")) show_optional_fields = SHOW_OPTIONAL_FIELDS_AUTO;
|
|
||||||
|
|
||||||
if (show_optional_fields == SHOW_OPTIONAL_FIELDS_AUTO && av_strcasecmp(arg, "auto"))
|
|
||||||
show_optional_fields = parse_number_or_die("show_optional_fields", arg, OPT_INT, SHOW_OPTIONAL_FIELDS_AUTO, SHOW_OPTIONAL_FIELDS_ALWAYS);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int opt_format(void *optctx, const char *opt, const char *arg)
|
static int opt_format(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
iformat = av_find_input_format(arg);
|
iformat = av_find_input_format(arg);
|
||||||
@ -3835,7 +3633,6 @@ static const OptionDef real_options[] = {
|
|||||||
{ "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" },
|
{ "show_library_versions", 0, { .func_arg = &opt_show_library_versions }, "show library versions" },
|
||||||
{ "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" },
|
{ "show_versions", 0, { .func_arg = &opt_show_versions }, "show program and library versions" },
|
||||||
{ "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" },
|
{ "show_pixel_formats", 0, { .func_arg = &opt_show_pixel_formats }, "show pixel format descriptions" },
|
||||||
{ "show_optional_fields", HAS_ARG, { .func_arg = &opt_show_optional_fields }, "show optional fields" },
|
|
||||||
{ "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" },
|
{ "show_private_data", OPT_BOOL, { &show_private_data }, "show private data" },
|
||||||
{ "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" },
|
{ "private", OPT_BOOL, { &show_private_data }, "same as show_private_data" },
|
||||||
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
||||||
@ -3887,6 +3684,7 @@ int main(int argc, char **argv)
|
|||||||
options = real_options;
|
options = real_options;
|
||||||
parse_loglevel(argc, argv, options);
|
parse_loglevel(argc, argv, options);
|
||||||
avformat_network_init();
|
avformat_network_init();
|
||||||
|
init_opts();
|
||||||
#if CONFIG_AVDEVICE
|
#if CONFIG_AVDEVICE
|
||||||
avdevice_register_all();
|
avdevice_register_all();
|
||||||
#endif
|
#endif
|
||||||
|
@ -144,7 +144,7 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return avpkt->size;
|
return avpkt->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
const AVCodec ff_zero12v_decoder = {
|
AVCodec ff_zero12v_decoder = {
|
||||||
.name = "012v",
|
.name = "012v",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
|
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
@ -152,5 +152,4 @@ const AVCodec ff_zero12v_decoder = {
|
|||||||
.init = zero12v_decode_init,
|
.init = zero12v_decode_init,
|
||||||
.decode = zero12v_decode_frame,
|
.decode = zero12v_decode_frame,
|
||||||
.capabilities = AV_CODEC_CAP_DR1,
|
.capabilities = AV_CODEC_CAP_DR1,
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
|
||||||
};
|
};
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "libavutil/mem_internal.h"
|
#include "libavutil/mem_internal.h"
|
||||||
#include "libavutil/thread.h"
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "blockdsp.h"
|
#include "blockdsp.h"
|
||||||
#include "bswapdsp.h"
|
#include "bswapdsp.h"
|
||||||
@ -247,7 +246,7 @@ static void idct(int16_t block[64])
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold void init_vlcs(void)
|
static av_cold void init_vlcs(FourXContext *f)
|
||||||
{
|
{
|
||||||
static VLC_TYPE table[2][4][32][2];
|
static VLC_TYPE table[2][4][32][2];
|
||||||
int i, j;
|
int i, j;
|
||||||
@ -989,7 +988,6 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
|||||||
|
|
||||||
static av_cold int decode_init(AVCodecContext *avctx)
|
static av_cold int decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
static AVOnce init_static_once = AV_ONCE_INIT;
|
|
||||||
FourXContext * const f = avctx->priv_data;
|
FourXContext * const f = avctx->priv_data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1017,18 +1015,17 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
ff_blockdsp_init(&f->bdsp, avctx);
|
ff_blockdsp_init(&f->bdsp, avctx);
|
||||||
ff_bswapdsp_init(&f->bbdsp);
|
ff_bswapdsp_init(&f->bbdsp);
|
||||||
f->avctx = avctx;
|
f->avctx = avctx;
|
||||||
|
init_vlcs(f);
|
||||||
|
|
||||||
if (f->version > 2)
|
if (f->version > 2)
|
||||||
avctx->pix_fmt = AV_PIX_FMT_RGB565;
|
avctx->pix_fmt = AV_PIX_FMT_RGB565;
|
||||||
else
|
else
|
||||||
avctx->pix_fmt = AV_PIX_FMT_BGR555;
|
avctx->pix_fmt = AV_PIX_FMT_BGR555;
|
||||||
|
|
||||||
ff_thread_once(&init_static_once, init_vlcs);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const AVCodec ff_fourxm_decoder = {
|
AVCodec ff_fourxm_decoder = {
|
||||||
.name = "4xm",
|
.name = "4xm",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("4X Movie"),
|
.long_name = NULL_IF_CONFIG_SMALL("4X Movie"),
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
@ -1038,5 +1035,4 @@ const AVCodec ff_fourxm_decoder = {
|
|||||||
.close = decode_end,
|
.close = decode_end,
|
||||||
.decode = decode_frame,
|
.decode = decode_frame,
|
||||||
.capabilities = AV_CODEC_CAP_DR1,
|
.capabilities = AV_CODEC_CAP_DR1,
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
|
||||||
};
|
};
|
||||||
|
@ -37,7 +37,6 @@
|
|||||||
#include "libavutil/internal.h"
|
#include "libavutil/internal.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "decode.h"
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
|
||||||
@ -71,9 +70,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
unsigned char *planemap = c->planemap;
|
unsigned char *planemap = c->planemap;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (buf_size < planes * height *2)
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
|
|
||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -126,7 +122,16 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (avctx->bits_per_coded_sample <= 8) {
|
if (avctx->bits_per_coded_sample <= 8) {
|
||||||
frame->palette_has_changed = ff_copy_palette(c->pal, avpkt, avctx);
|
buffer_size_t size;
|
||||||
|
const uint8_t *pal = av_packet_get_side_data(avpkt,
|
||||||
|
AV_PKT_DATA_PALETTE,
|
||||||
|
&size);
|
||||||
|
if (pal && size == AVPALETTE_SIZE) {
|
||||||
|
frame->palette_has_changed = 1;
|
||||||
|
memcpy(c->pal, pal, AVPALETTE_SIZE);
|
||||||
|
} else if (pal) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size);
|
||||||
|
}
|
||||||
|
|
||||||
memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
|
memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
|
||||||
}
|
}
|
||||||
@ -176,7 +181,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const AVCodec ff_eightbps_decoder = {
|
AVCodec ff_eightbps_decoder = {
|
||||||
.name = "8bps",
|
.name = "8bps",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
|
.long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
|
@ -184,7 +184,7 @@ static av_cold int eightsvx_decode_close(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_EIGHTSVX_FIB_DECODER
|
#if CONFIG_EIGHTSVX_FIB_DECODER
|
||||||
const AVCodec ff_eightsvx_fib_decoder = {
|
AVCodec ff_eightsvx_fib_decoder = {
|
||||||
.name = "8svx_fib",
|
.name = "8svx_fib",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
|
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
@ -196,11 +196,10 @@ const AVCodec ff_eightsvx_fib_decoder = {
|
|||||||
.capabilities = AV_CODEC_CAP_DR1,
|
.capabilities = AV_CODEC_CAP_DR1,
|
||||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
|
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
|
||||||
AV_SAMPLE_FMT_NONE },
|
AV_SAMPLE_FMT_NONE },
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_EIGHTSVX_EXP_DECODER
|
#if CONFIG_EIGHTSVX_EXP_DECODER
|
||||||
const AVCodec ff_eightsvx_exp_decoder = {
|
AVCodec ff_eightsvx_exp_decoder = {
|
||||||
.name = "8svx_exp",
|
.name = "8svx_exp",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
|
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
@ -212,6 +211,5 @@ const AVCodec ff_eightsvx_exp_decoder = {
|
|||||||
.capabilities = AV_CODEC_CAP_DR1,
|
.capabilities = AV_CODEC_CAP_DR1,
|
||||||
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
|
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
|
||||||
AV_SAMPLE_FMT_NONE },
|
AV_SAMPLE_FMT_NONE },
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -12,7 +12,6 @@ HEADERS = ac3_parser.h \
|
|||||||
codec_id.h \
|
codec_id.h \
|
||||||
codec_par.h \
|
codec_par.h \
|
||||||
d3d11va.h \
|
d3d11va.h \
|
||||||
defs.h \
|
|
||||||
dirac.h \
|
dirac.h \
|
||||||
dv_profile.h \
|
dv_profile.h \
|
||||||
dxva2.h \
|
dxva2.h \
|
||||||
@ -20,6 +19,7 @@ HEADERS = ac3_parser.h \
|
|||||||
mediacodec.h \
|
mediacodec.h \
|
||||||
packet.h \
|
packet.h \
|
||||||
qsv.h \
|
qsv.h \
|
||||||
|
vaapi.h \
|
||||||
vdpau.h \
|
vdpau.h \
|
||||||
version.h \
|
version.h \
|
||||||
videotoolbox.h \
|
videotoolbox.h \
|
||||||
@ -32,7 +32,9 @@ OBJS = ac3_parser.o \
|
|||||||
avcodec.o \
|
avcodec.o \
|
||||||
avdct.o \
|
avdct.o \
|
||||||
avpacket.o \
|
avpacket.o \
|
||||||
|
avpicture.o \
|
||||||
bitstream.o \
|
bitstream.o \
|
||||||
|
bitstream_filter.o \
|
||||||
bitstream_filters.o \
|
bitstream_filters.o \
|
||||||
bsf.o \
|
bsf.o \
|
||||||
codec_desc.o \
|
codec_desc.o \
|
||||||
@ -60,7 +62,7 @@ OBJS = ac3_parser.o \
|
|||||||
# subsystems
|
# subsystems
|
||||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||||
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o ac3.o ac3tab.o
|
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o ac3.o ac3tab.o
|
||||||
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio_sample_rates.o
|
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_AMF) += amfenc.o
|
OBJS-$(CONFIG_AMF) += amfenc.o
|
||||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
||||||
OBJS-$(CONFIG_ATSC_A53) += atsc_a53.o
|
OBJS-$(CONFIG_ATSC_A53) += atsc_a53.o
|
||||||
@ -77,7 +79,6 @@ OBJS-$(CONFIG_CBS_MPEG2) += cbs_mpeg2.o
|
|||||||
OBJS-$(CONFIG_CBS_VP9) += cbs_vp9.o
|
OBJS-$(CONFIG_CBS_VP9) += cbs_vp9.o
|
||||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||||
OBJS-$(CONFIG_DOVI_RPU) += dovi_rpu.o
|
|
||||||
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
|
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
|
||||||
OBJS-$(CONFIG_EXIF) += exif.o tiff_common.o
|
OBJS-$(CONFIG_EXIF) += exif.o tiff_common.o
|
||||||
OBJS-$(CONFIG_FAANDCT) += faandct.o
|
OBJS-$(CONFIG_FAANDCT) += faandct.o
|
||||||
@ -118,22 +119,20 @@ OBJS-$(CONFIG_MDCT) += mdct_float.o mdct_fixed_32.o
|
|||||||
OBJS-$(CONFIG_ME_CMP) += me_cmp.o
|
OBJS-$(CONFIG_ME_CMP) += me_cmp.o
|
||||||
OBJS-$(CONFIG_MEDIACODEC) += mediacodecdec_common.o mediacodec_surface.o mediacodec_wrapper.o mediacodec_sw_buffer.o
|
OBJS-$(CONFIG_MEDIACODEC) += mediacodecdec_common.o mediacodec_surface.o mediacodec_wrapper.o mediacodec_sw_buffer.o
|
||||||
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
||||||
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodec_common.o \
|
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodec_common.o
|
||||||
mpegaudiodata.o
|
|
||||||
OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
|
OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
|
||||||
mpegaudiodsp_data.o \
|
mpegaudiodsp_data.o \
|
||||||
mpegaudiodsp_fixed.o \
|
mpegaudiodsp_fixed.o \
|
||||||
mpegaudiodsp_float.o
|
mpegaudiodsp_float.o
|
||||||
OBJS-$(CONFIG_MPEGAUDIOHEADER) += mpegaudiodecheader.o mpegaudiotabs.o
|
OBJS-$(CONFIG_MPEGAUDIOHEADER) += mpegaudiodecheader.o mpegaudiodata.o
|
||||||
OBJS-$(CONFIG_MPEG4AUDIO) += mpeg4audio.o mpeg4audio_sample_rates.o
|
|
||||||
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o rl.o \
|
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o rl.o \
|
||||||
mpegvideo_motion.o mpegutils.o \
|
mpegvideo_motion.o mpegutils.o \
|
||||||
mpegvideodata.o mpegpicture.o \
|
mpegvideodata.o mpegpicture.o
|
||||||
to_upper4.o
|
|
||||||
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
||||||
motion_est.o ratecontrol.o \
|
motion_est.o ratecontrol.o \
|
||||||
mpegvideoencdsp.o
|
mpegvideoencdsp.o
|
||||||
OBJS-$(CONFIG_MSS34DSP) += mss34dsp.o
|
OBJS-$(CONFIG_MSS34DSP) += mss34dsp.o
|
||||||
|
OBJS-$(CONFIG_NVENC) += nvenc.o
|
||||||
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
||||||
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
||||||
OBJS-$(CONFIG_QSV) += qsv.o
|
OBJS-$(CONFIG_QSV) += qsv.o
|
||||||
@ -142,6 +141,7 @@ OBJS-$(CONFIG_QSVENC) += qsvenc.o
|
|||||||
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
||||||
OBJS-$(CONFIG_RDFT) += rdft.o
|
OBJS-$(CONFIG_RDFT) += rdft.o
|
||||||
OBJS-$(CONFIG_RV34DSP) += rv34dsp.o
|
OBJS-$(CONFIG_RV34DSP) += rv34dsp.o
|
||||||
|
OBJS-$(CONFIG_SHARED) += log2_tab.o reverse.o
|
||||||
OBJS-$(CONFIG_SINEWIN) += sinewin.o
|
OBJS-$(CONFIG_SINEWIN) += sinewin.o
|
||||||
OBJS-$(CONFIG_SNAPPY) += snappy.o
|
OBJS-$(CONFIG_SNAPPY) += snappy.o
|
||||||
OBJS-$(CONFIG_STARTCODE) += startcode.o
|
OBJS-$(CONFIG_STARTCODE) += startcode.o
|
||||||
@ -163,10 +163,10 @@ OBJS-$(CONFIG_ZERO12V_DECODER) += 012v.o
|
|||||||
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
|
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
|
||||||
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
|
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
|
||||||
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps_common.o aacps_float.o \
|
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps_common.o aacps_float.o \
|
||||||
kbdwin.o \
|
mpeg4audio.o kbdwin.o \
|
||||||
sbrdsp.o aacpsdsp_float.o cbrt_data.o
|
sbrdsp.o aacpsdsp_float.o cbrt_data.o
|
||||||
OBJS-$(CONFIG_AAC_FIXED_DECODER) += aacdec_fixed.o aactab.o aacsbr_fixed.o aacps_common.o aacps_fixed.o \
|
OBJS-$(CONFIG_AAC_FIXED_DECODER) += aacdec_fixed.o aactab.o aacsbr_fixed.o aacps_common.o aacps_fixed.o \
|
||||||
kbdwin.o \
|
mpeg4audio.o kbdwin.o \
|
||||||
sbrdsp_fixed.o aacpsdsp_fixed.o cbrt_data_fixed.o
|
sbrdsp_fixed.o aacpsdsp_fixed.o cbrt_data_fixed.o
|
||||||
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
|
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
|
||||||
aacpsy.o aactab.o \
|
aacpsy.o aactab.o \
|
||||||
@ -174,14 +174,11 @@ OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o aacenctab.o \
|
|||||||
aacenc_tns.o \
|
aacenc_tns.o \
|
||||||
aacenc_ltp.o \
|
aacenc_ltp.o \
|
||||||
aacenc_pred.o \
|
aacenc_pred.o \
|
||||||
psymodel.o kbdwin.o \
|
psymodel.o mpeg4audio.o kbdwin.o
|
||||||
mpeg4audio_sample_rates.o
|
|
||||||
OBJS-$(CONFIG_AAC_MF_ENCODER) += mfenc.o mf_utils.o
|
OBJS-$(CONFIG_AAC_MF_ENCODER) += mfenc.o mf_utils.o
|
||||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o \
|
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
|
||||||
kbdwin.o ac3tab.o ac3_channel_layout_tab.o
|
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o ac3tab.o
|
||||||
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o \
|
|
||||||
kbdwin.o ac3tab.o ac3_channel_layout_tab.o
|
|
||||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||||
ac3.o kbdwin.o
|
ac3.o kbdwin.o
|
||||||
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o kbdwin.o
|
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o kbdwin.o
|
||||||
@ -193,7 +190,7 @@ OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o alacdsp.o
|
|||||||
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
|
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
|
||||||
OBJS-$(CONFIG_ALIAS_PIX_DECODER) += aliaspixdec.o
|
OBJS-$(CONFIG_ALIAS_PIX_DECODER) += aliaspixdec.o
|
||||||
OBJS-$(CONFIG_ALIAS_PIX_ENCODER) += aliaspixenc.o
|
OBJS-$(CONFIG_ALIAS_PIX_ENCODER) += aliaspixenc.o
|
||||||
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mlz.o
|
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mlz.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
|
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
|
||||||
celp_math.o acelp_filters.o \
|
celp_math.o acelp_filters.o \
|
||||||
acelp_vectors.o \
|
acelp_vectors.o \
|
||||||
@ -202,7 +199,8 @@ OBJS-$(CONFIG_AMRWB_DECODER) += amrwbdec.o celp_filters.o \
|
|||||||
celp_math.o acelp_filters.o \
|
celp_math.o acelp_filters.o \
|
||||||
acelp_vectors.o \
|
acelp_vectors.o \
|
||||||
acelp_pitch_delay.o
|
acelp_pitch_delay.o
|
||||||
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpegenc_common.o
|
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpegenc_common.o \
|
||||||
|
mjpegenc_huffman.o
|
||||||
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
||||||
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
||||||
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
||||||
@ -248,8 +246,7 @@ OBJS-$(CONFIG_BINK_DECODER) += bink.o binkdsp.o
|
|||||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o
|
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o
|
||||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o
|
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o
|
||||||
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
|
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
|
||||||
OBJS-$(CONFIG_BITPACKED_DECODER) += bitpacked_dec.o
|
OBJS-$(CONFIG_BITPACKED_DECODER) += bitpacked.o
|
||||||
OBJS-$(CONFIG_BITPACKED_ENCODER) += bitpacked_enc.o
|
|
||||||
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
|
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
|
||||||
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
|
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
|
||||||
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmvaudio.o
|
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmvaudio.o
|
||||||
@ -279,8 +276,7 @@ OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
|
|||||||
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
||||||
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadata.o dcahuff.o \
|
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadata.o dcahuff.o \
|
||||||
dca_core.o dca_exss.o dca_xll.o dca_lbr.o \
|
dca_core.o dca_exss.o dca_xll.o dca_lbr.o \
|
||||||
dcadsp.o dcadct.o dca_sample_rate_tab.o \
|
dcadsp.o dcadct.o synth_filter.o
|
||||||
synth_filter.o
|
|
||||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dcadata.o dcahuff.o \
|
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dcadata.o dcahuff.o \
|
||||||
dcaadpcm.o
|
dcaadpcm.o
|
||||||
OBJS-$(CONFIG_DDS_DECODER) += dds.o
|
OBJS-$(CONFIG_DDS_DECODER) += dds.o
|
||||||
@ -318,8 +314,7 @@ OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
|||||||
mpeg12data.o
|
mpeg12data.o
|
||||||
OBJS-$(CONFIG_EATGQ_DECODER) += eatgq.o eaidct.o
|
OBJS-$(CONFIG_EATGQ_DECODER) += eatgq.o eaidct.o
|
||||||
OBJS-$(CONFIG_EATGV_DECODER) += eatgv.o
|
OBJS-$(CONFIG_EATGV_DECODER) += eatgv.o
|
||||||
OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o mpeg12.o \
|
OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o mpeg12.o mpeg12data.o mpegvideodata.o rl.o
|
||||||
mpeg12data.o mpegvideodata.o
|
|
||||||
OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
|
OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
|
||||||
OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER) += 8svx.o
|
OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER) += 8svx.o
|
||||||
OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
|
OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
|
||||||
@ -355,7 +350,6 @@ OBJS-$(CONFIG_G723_1_ENCODER) += g723_1enc.o g723_1.o \
|
|||||||
acelp_vectors.o celp_filters.o celp_math.o
|
acelp_vectors.o celp_filters.o celp_math.o
|
||||||
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o celp_filters.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
|
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o celp_filters.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
|
||||||
OBJS-$(CONFIG_GDV_DECODER) += gdv.o
|
OBJS-$(CONFIG_GDV_DECODER) += gdv.o
|
||||||
OBJS-$(CONFIG_GEM_DECODER) += gemdec.o
|
|
||||||
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
|
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
|
||||||
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
|
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
|
||||||
OBJS-$(CONFIG_GREMLIN_DPCM_DECODER) += dpcm.o
|
OBJS-$(CONFIG_GREMLIN_DPCM_DECODER) += dpcm.o
|
||||||
@ -375,13 +369,15 @@ OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \
|
|||||||
h264_direct.o h264_loopfilter.o \
|
h264_direct.o h264_loopfilter.o \
|
||||||
h264_mb.o h264_picture.o \
|
h264_mb.o h264_picture.o \
|
||||||
h264_refs.o h264_sei.o \
|
h264_refs.o h264_sei.o \
|
||||||
h264_slice.o h264data.o h274.o
|
h264_slice.o h264data.o
|
||||||
OBJS-$(CONFIG_H264_AMF_ENCODER) += amfenc_h264.o
|
OBJS-$(CONFIG_H264_AMF_ENCODER) += amfenc_h264.o
|
||||||
OBJS-$(CONFIG_H264_CUVID_DECODER) += cuviddec.o
|
OBJS-$(CONFIG_H264_CUVID_DECODER) += cuviddec.o
|
||||||
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
|
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
|
||||||
OBJS-$(CONFIG_H264_MF_ENCODER) += mfenc.o mf_utils.o
|
OBJS-$(CONFIG_H264_MF_ENCODER) += mfenc.o mf_utils.o
|
||||||
OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
|
OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
|
||||||
OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc_h264.o nvenc.o
|
OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc_h264.o
|
||||||
|
OBJS-$(CONFIG_NVENC_ENCODER) += nvenc_h264.o
|
||||||
|
OBJS-$(CONFIG_NVENC_H264_ENCODER) += nvenc_h264.o
|
||||||
OBJS-$(CONFIG_H264_OMX_ENCODER) += omx.o
|
OBJS-$(CONFIG_H264_OMX_ENCODER) += omx.o
|
||||||
OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec.o
|
OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec.o
|
||||||
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
|
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
|
||||||
@ -396,13 +392,13 @@ OBJS-$(CONFIG_HCA_DECODER) += hcadec.o
|
|||||||
OBJS-$(CONFIG_HCOM_DECODER) += hcom.o
|
OBJS-$(CONFIG_HCOM_DECODER) += hcom.o
|
||||||
OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
|
OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
|
||||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||||
hevcdsp.o hevc_filter.o hevc_data.o \
|
hevcdsp.o hevc_filter.o hevc_data.o
|
||||||
h274.o
|
|
||||||
OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o
|
OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o
|
||||||
OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o
|
OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o
|
||||||
OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
|
OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
|
||||||
OBJS-$(CONFIG_HEVC_MF_ENCODER) += mfenc.o mf_utils.o
|
OBJS-$(CONFIG_HEVC_MF_ENCODER) += mfenc.o mf_utils.o
|
||||||
OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc_hevc.o nvenc.o
|
OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc_hevc.o
|
||||||
|
OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc_hevc.o
|
||||||
OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec.o
|
OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec.o
|
||||||
OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \
|
OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \
|
||||||
hevc_data.o
|
hevc_data.o
|
||||||
@ -410,7 +406,6 @@ OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o
|
|||||||
OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o
|
OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o
|
||||||
OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||||
OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
|
OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
|
||||||
OBJS-$(CONFIG_HEVC_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o
|
|
||||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
||||||
OBJS-$(CONFIG_HQ_HQA_DECODER) += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
|
OBJS-$(CONFIG_HQ_HQA_DECODER) += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
|
||||||
canopus.o
|
canopus.o
|
||||||
@ -476,19 +471,17 @@ OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec_fixed.o
|
|||||||
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o
|
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o
|
||||||
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec_fixed.o
|
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec_fixed.o
|
||||||
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc_float.o mpegaudio.o \
|
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc_float.o mpegaudio.o \
|
||||||
mpegaudiodata.o mpegaudiodsp_data.o \
|
mpegaudiodata.o mpegaudiodsp_data.o
|
||||||
mpegaudiotabs.o
|
|
||||||
OBJS-$(CONFIG_MP2FIXED_ENCODER) += mpegaudioenc_fixed.o mpegaudio.o \
|
OBJS-$(CONFIG_MP2FIXED_ENCODER) += mpegaudioenc_fixed.o mpegaudio.o \
|
||||||
mpegaudiodata.o mpegaudiodsp_data.o \
|
mpegaudiodata.o mpegaudiodsp_data.o
|
||||||
mpegaudiotabs.o
|
|
||||||
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
|
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
|
||||||
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec_fixed.o
|
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec_fixed.o
|
||||||
OBJS-$(CONFIG_MP3_MF_ENCODER) += mfenc.o mf_utils.o
|
OBJS-$(CONFIG_MP3_MF_ENCODER) += mfenc.o mf_utils.o
|
||||||
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec_fixed.o
|
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec_fixed.o
|
||||||
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
|
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
|
||||||
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
|
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
|
||||||
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o
|
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o
|
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
|
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
|
||||||
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
|
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
|
||||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
|
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
|
||||||
@ -520,7 +513,6 @@ OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
|||||||
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
|
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
|
||||||
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||||
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
|
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4enc.o msmpeg4.o msmpeg4data.o
|
||||||
OBJS-$(CONFIG_MSNSIREN_DECODER) += siren.o
|
|
||||||
OBJS-$(CONFIG_MSP2_DECODER) += msp2dec.o
|
OBJS-$(CONFIG_MSP2_DECODER) += msp2dec.o
|
||||||
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
|
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
|
||||||
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o mss12.o
|
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o mss12.o
|
||||||
@ -574,7 +566,6 @@ OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o proresdata.o
|
|||||||
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o proresdata.o
|
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o proresdata.o
|
||||||
OBJS-$(CONFIG_PRORES_AW_ENCODER) += proresenc_anatoliy.o proresdata.o
|
OBJS-$(CONFIG_PRORES_AW_ENCODER) += proresenc_anatoliy.o proresdata.o
|
||||||
OBJS-$(CONFIG_PRORES_KS_ENCODER) += proresenc_kostya.o proresdata.o
|
OBJS-$(CONFIG_PRORES_KS_ENCODER) += proresenc_kostya.o proresdata.o
|
||||||
OBJS-$(CONFIG_PRORES_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o
|
|
||||||
OBJS-$(CONFIG_PROSUMER_DECODER) += prosumer.o
|
OBJS-$(CONFIG_PROSUMER_DECODER) += prosumer.o
|
||||||
OBJS-$(CONFIG_PSD_DECODER) += psd.o
|
OBJS-$(CONFIG_PSD_DECODER) += psd.o
|
||||||
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
|
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
|
||||||
@ -635,7 +626,6 @@ OBJS-$(CONFIG_SIMBIOSIS_IMX_DECODER) += imx.o
|
|||||||
OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
|
OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
|
||||||
OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
|
OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
|
||||||
OBJS-$(CONFIG_SMC_DECODER) += smc.o
|
OBJS-$(CONFIG_SMC_DECODER) += smc.o
|
||||||
OBJS-$(CONFIG_SMC_ENCODER) += smcenc.o
|
|
||||||
OBJS-$(CONFIG_SNOW_DECODER) += snowdec.o snow.o snow_dwt.o
|
OBJS-$(CONFIG_SNOW_DECODER) += snowdec.o snow.o snow_dwt.o
|
||||||
OBJS-$(CONFIG_SNOW_ENCODER) += snowenc.o snow.o snow_dwt.o \
|
OBJS-$(CONFIG_SNOW_ENCODER) += snowenc.o snow.o snow_dwt.o \
|
||||||
h263.o h263data.o ituh263enc.o
|
h263.o h263data.o ituh263enc.o
|
||||||
@ -645,7 +635,6 @@ OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
|
|||||||
OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
||||||
OBJS-$(CONFIG_SPEEDHQ_DECODER) += speedhq.o mpeg12.o mpeg12data.o simple_idct.o
|
OBJS-$(CONFIG_SPEEDHQ_DECODER) += speedhq.o mpeg12.o mpeg12data.o simple_idct.o
|
||||||
OBJS-$(CONFIG_SPEEDHQ_ENCODER) += speedhq.o mpeg12data.o mpeg12enc.o speedhqenc.o
|
OBJS-$(CONFIG_SPEEDHQ_ENCODER) += speedhq.o mpeg12data.o mpeg12enc.o speedhqenc.o
|
||||||
OBJS-$(CONFIG_SPEEX_DECODER) += speexdec.o
|
|
||||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
||||||
OBJS-$(CONFIG_SRGC_DECODER) += mscc.o
|
OBJS-$(CONFIG_SRGC_DECODER) += mscc.o
|
||||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o htmlsubtitles.o
|
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o htmlsubtitles.o
|
||||||
@ -870,7 +859,7 @@ OBJS-$(CONFIG_ADPCM_AFC_DECODER) += adpcm.o adpcm_data.o
|
|||||||
OBJS-$(CONFIG_ADPCM_AGM_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_AGM_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_AICA_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_AICA_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_ARGO_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_ARGO_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_ARGO_ENCODER) += adpcm.o adpcm_data.o adpcmenc.o
|
OBJS-$(CONFIG_ADPCM_ARGO_ENCODER) += adpcm.o adpcmenc.o
|
||||||
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_DTK_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_DTK_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
|
||||||
@ -885,9 +874,7 @@ OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
|
|||||||
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
||||||
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
||||||
OBJS-$(CONFIG_ADPCM_G726LE_ENCODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726LE_ENCODER) += g726.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_ACORN_DECODER) += adpcm.o adpcm_data.o
|
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_AMV_ENCODER) += adpcmenc.o adpcm_data.o
|
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_ALP_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_ALP_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_ALP_ENCODER) += adpcmenc.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_ALP_ENCODER) += adpcmenc.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
|
||||||
@ -912,7 +899,6 @@ OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o adpcm_data.o
|
|||||||
OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_IMA_WS_ENCODER) += adpcmenc.o adpcm_data.o
|
|
||||||
OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_MTAF_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_MTAF_DECODER) += adpcm.o adpcm_data.o
|
||||||
@ -988,30 +974,28 @@ OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o
|
|||||||
OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o
|
OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o
|
||||||
OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o
|
OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o
|
||||||
OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
|
OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
|
||||||
OBJS-$(CONFIG_VP9_VIDEOTOOLBOX_HWACCEL) += videotoolbox_vp9.o
|
|
||||||
OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec.o
|
OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec.o
|
||||||
|
|
||||||
# Objects duplicated from other libraries for shared builds
|
# libavformat dependencies
|
||||||
SHLIBOBJS += log2_tab.o reverse.o
|
OBJS-$(CONFIG_ISO_MEDIA) += mpeg4audio.o mpegaudiodata.o
|
||||||
|
|
||||||
# General libavformat dependencies
|
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_CODEC2_DEMUXER) += codec2utils.o
|
||||||
|
OBJS-$(CONFIG_CODEC2_MUXER) += codec2utils.o
|
||||||
|
OBJS-$(CONFIG_CODEC2RAW_DEMUXER) += codec2utils.o
|
||||||
|
OBJS-$(CONFIG_DNXHD_DEMUXER) += dnxhddata.o
|
||||||
OBJS-$(CONFIG_FITS_DEMUXER) += fits.o
|
OBJS-$(CONFIG_FITS_DEMUXER) += fits.o
|
||||||
|
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_MOV_DEMUXER) += ac3tab.o
|
||||||
|
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
||||||
|
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||||
|
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o
|
||||||
|
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
||||||
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
||||||
|
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o
|
||||||
# libavformat dependencies for static builds
|
|
||||||
STLIBOBJS-$(CONFIG_AVFORMAT) += to_upper4.o
|
|
||||||
STLIBOBJS-$(CONFIG_ISO_MEDIA) += mpegaudiotabs.o
|
|
||||||
STLIBOBJS-$(CONFIG_FLV_MUXER) += mpeg4audio_sample_rates.o
|
|
||||||
STLIBOBJS-$(CONFIG_HLS_DEMUXER) += ac3_channel_layout_tab.o
|
|
||||||
STLIBOBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio_sample_rates.o
|
|
||||||
STLIBOBJS-$(CONFIG_MOV_DEMUXER) += ac3_channel_layout_tab.o
|
|
||||||
STLIBOBJS-$(CONFIG_MXF_MUXER) += golomb.o
|
|
||||||
STLIBOBJS-$(CONFIG_MP3_MUXER) += mpegaudiotabs.o
|
|
||||||
STLIBOBJS-$(CONFIG_NUT_MUXER) += mpegaudiotabs.o
|
|
||||||
STLIBOBJS-$(CONFIG_RTPDEC) += jpegtables.o
|
|
||||||
STLIBOBJS-$(CONFIG_RTP_MUXER) += golomb.o jpegtables.o \
|
|
||||||
mpeg4audio_sample_rates.o
|
|
||||||
STLIBOBJS-$(CONFIG_SPDIF_MUXER) += dca_sample_rate_tab.o
|
|
||||||
|
|
||||||
# libavfilter dependencies
|
# libavfilter dependencies
|
||||||
OBJS-$(CONFIG_ELBG_FILTER) += elbg.o
|
OBJS-$(CONFIG_ELBG_FILTER) += elbg.o
|
||||||
@ -1041,8 +1025,8 @@ OBJS-$(CONFIG_LIBAOM_AV1_DECODER) += libaomdec.o
|
|||||||
OBJS-$(CONFIG_LIBAOM_AV1_ENCODER) += libaomenc.o
|
OBJS-$(CONFIG_LIBAOM_AV1_ENCODER) += libaomenc.o
|
||||||
OBJS-$(CONFIG_LIBARIBB24_DECODER) += libaribb24.o ass.o
|
OBJS-$(CONFIG_LIBARIBB24_DECODER) += libaribb24.o ass.o
|
||||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||||
OBJS-$(CONFIG_LIBCODEC2_DECODER) += libcodec2.o
|
OBJS-$(CONFIG_LIBCODEC2_DECODER) += libcodec2.o codec2utils.o
|
||||||
OBJS-$(CONFIG_LIBCODEC2_ENCODER) += libcodec2.o
|
OBJS-$(CONFIG_LIBCODEC2_ENCODER) += libcodec2.o codec2utils.o
|
||||||
OBJS-$(CONFIG_LIBDAV1D_DECODER) += libdav1d.o
|
OBJS-$(CONFIG_LIBDAV1D_DECODER) += libdav1d.o
|
||||||
OBJS-$(CONFIG_LIBDAVS2_DECODER) += libdavs2.o
|
OBJS-$(CONFIG_LIBDAVS2_DECODER) += libdavs2.o
|
||||||
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
|
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
|
||||||
@ -1094,12 +1078,11 @@ OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o ass.o
|
|||||||
|
|
||||||
# parsers
|
# parsers
|
||||||
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
|
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
|
||||||
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o
|
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
|
||||||
OBJS-$(CONFIG_AC3_PARSER) += aac_ac3_parser.o ac3tab.o \
|
mpeg4audio.o
|
||||||
ac3_channel_layout_tab.o
|
OBJS-$(CONFIG_AC3_PARSER) += ac3tab.o aac_ac3_parser.o
|
||||||
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
|
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
|
||||||
OBJS-$(CONFIG_AMR_PARSER) += amr_parser.o
|
OBJS-$(CONFIG_AV1_PARSER) += av1_parser.o av1_parse.o
|
||||||
OBJS-$(CONFIG_AV1_PARSER) += av1_parser.o
|
|
||||||
OBJS-$(CONFIG_AVS2_PARSER) += avs2_parser.o
|
OBJS-$(CONFIG_AVS2_PARSER) += avs2_parser.o
|
||||||
OBJS-$(CONFIG_AVS3_PARSER) += avs3_parser.o
|
OBJS-$(CONFIG_AVS3_PARSER) += avs3_parser.o
|
||||||
OBJS-$(CONFIG_AV3A_PARSER) += av3a_parser.o
|
OBJS-$(CONFIG_AV3A_PARSER) += av3a_parser.o
|
||||||
@ -1107,8 +1090,7 @@ OBJS-$(CONFIG_BMP_PARSER) += bmp_parser.o
|
|||||||
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
|
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
|
||||||
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
|
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
|
||||||
OBJS-$(CONFIG_CRI_PARSER) += cri_parser.o
|
OBJS-$(CONFIG_CRI_PARSER) += cri_parser.o
|
||||||
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca_exss.o dca.o \
|
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca_exss.o dca.o
|
||||||
dca_sample_rate_tab.o
|
|
||||||
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
|
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
|
||||||
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o dnxhddata.o
|
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o dnxhddata.o
|
||||||
OBJS-$(CONFIG_DOLBY_E_PARSER) += dolby_e_parser.o dolby_e_parse.o
|
OBJS-$(CONFIG_DOLBY_E_PARSER) += dolby_e_parser.o dolby_e_parse.o
|
||||||
@ -1155,7 +1137,7 @@ OBJS-$(CONFIG_XBM_PARSER) += xbm_parser.o
|
|||||||
OBJS-$(CONFIG_XMA_PARSER) += xma_parser.o
|
OBJS-$(CONFIG_XMA_PARSER) += xma_parser.o
|
||||||
|
|
||||||
# bitstream filters
|
# bitstream filters
|
||||||
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o
|
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_AV1_METADATA_BSF) += av1_metadata_bsf.o
|
OBJS-$(CONFIG_AV1_METADATA_BSF) += av1_metadata_bsf.o
|
||||||
OBJS-$(CONFIG_AV1_FRAME_MERGE_BSF) += av1_frame_merge_bsf.o
|
OBJS-$(CONFIG_AV1_FRAME_MERGE_BSF) += av1_frame_merge_bsf.o
|
||||||
OBJS-$(CONFIG_AV1_FRAME_SPLIT_BSF) += av1_frame_split_bsf.o
|
OBJS-$(CONFIG_AV1_FRAME_SPLIT_BSF) += av1_frame_split_bsf.o
|
||||||
@ -1178,14 +1160,14 @@ OBJS-$(CONFIG_MJPEGA_DUMP_HEADER_BSF) += mjpega_dump_header_bsf.o
|
|||||||
OBJS-$(CONFIG_MPEG4_UNPACK_BFRAMES_BSF) += mpeg4_unpack_bframes_bsf.o
|
OBJS-$(CONFIG_MPEG4_UNPACK_BFRAMES_BSF) += mpeg4_unpack_bframes_bsf.o
|
||||||
OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
|
OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
|
||||||
OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
|
OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
|
||||||
mpegaudiotabs.o
|
mpegaudiodata.o
|
||||||
OBJS-$(CONFIG_MPEG2_METADATA_BSF) += mpeg2_metadata_bsf.o
|
OBJS-$(CONFIG_MPEG2_METADATA_BSF) += mpeg2_metadata_bsf.o
|
||||||
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
|
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
|
||||||
OBJS-$(CONFIG_NULL_BSF) += null_bsf.o
|
OBJS-$(CONFIG_NULL_BSF) += null_bsf.o
|
||||||
OBJS-$(CONFIG_OPUS_METADATA_BSF) += opus_metadata_bsf.o
|
OBJS-$(CONFIG_OPUS_METADATA_BSF) += opus_metadata_bsf.o
|
||||||
OBJS-$(CONFIG_PCM_RECHUNK_BSF) += pcm_rechunk_bsf.o
|
OBJS-$(CONFIG_PCM_RECHUNK_BSF) += pcm_rechunk_bsf.o
|
||||||
OBJS-$(CONFIG_PRORES_METADATA_BSF) += prores_metadata_bsf.o
|
OBJS-$(CONFIG_PRORES_METADATA_BSF) += prores_metadata_bsf.o
|
||||||
OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o av1_parse.o
|
OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
|
||||||
OBJS-$(CONFIG_SETTS_BSF) += setts_bsf.o
|
OBJS-$(CONFIG_SETTS_BSF) += setts_bsf.o
|
||||||
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
|
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
|
||||||
OBJS-$(CONFIG_TRACE_HEADERS_BSF) += trace_headers_bsf.o
|
OBJS-$(CONFIG_TRACE_HEADERS_BSF) += trace_headers_bsf.o
|
||||||
@ -1206,6 +1188,7 @@ SLIBOBJS-$(HAVE_GNU_WINDRES) += avcodecres.o
|
|||||||
|
|
||||||
SKIPHEADERS += %_tablegen.h \
|
SKIPHEADERS += %_tablegen.h \
|
||||||
%_tables.h \
|
%_tables.h \
|
||||||
|
fft-internal.h \
|
||||||
tableprint.h \
|
tableprint.h \
|
||||||
tableprint_vlc.h \
|
tableprint_vlc.h \
|
||||||
aaccoder_twoloop.h \
|
aaccoder_twoloop.h \
|
||||||
@ -1232,13 +1215,14 @@ SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
|||||||
SKIPHEADERS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.h vt_internal.h
|
SKIPHEADERS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.h vt_internal.h
|
||||||
SKIPHEADERS-$(CONFIG_V4L2_M2M) += v4l2_buffers.h v4l2_context.h v4l2_m2m.h
|
SKIPHEADERS-$(CONFIG_V4L2_M2M) += v4l2_buffers.h v4l2_context.h v4l2_m2m.h
|
||||||
|
|
||||||
TESTPROGS = avcodec \
|
TESTPROGS = avpacket \
|
||||||
avpacket \
|
|
||||||
celp_math \
|
celp_math \
|
||||||
codec_desc \
|
codec_desc \
|
||||||
htmlsubtitles \
|
htmlsubtitles \
|
||||||
|
imgconvert \
|
||||||
jpeg2000dwt \
|
jpeg2000dwt \
|
||||||
mathops \
|
mathops \
|
||||||
|
utils \
|
||||||
|
|
||||||
TESTPROGS-$(CONFIG_CABAC) += cabac
|
TESTPROGS-$(CONFIG_CABAC) += cabac
|
||||||
TESTPROGS-$(CONFIG_DCT) += avfft
|
TESTPROGS-$(CONFIG_DCT) += avfft
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
#include "a64colors.h"
|
#include "a64colors.h"
|
||||||
#include "a64tables.h"
|
#include "a64tables.h"
|
||||||
#include "elbg.h"
|
#include "elbg.h"
|
||||||
#include "encode.h"
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
@ -43,7 +42,6 @@
|
|||||||
|
|
||||||
typedef struct A64Context {
|
typedef struct A64Context {
|
||||||
/* variables for multicolor modes */
|
/* variables for multicolor modes */
|
||||||
struct ELBGContext *elbg;
|
|
||||||
AVLFG randctx;
|
AVLFG randctx;
|
||||||
int mc_lifetime;
|
int mc_lifetime;
|
||||||
int mc_use_5col;
|
int mc_use_5col;
|
||||||
@ -52,6 +50,7 @@ typedef struct A64Context {
|
|||||||
int *mc_charmap;
|
int *mc_charmap;
|
||||||
int *mc_best_cb;
|
int *mc_best_cb;
|
||||||
int mc_luma_vals[5];
|
int mc_luma_vals[5];
|
||||||
|
uint8_t *mc_charset;
|
||||||
uint8_t *mc_colram;
|
uint8_t *mc_colram;
|
||||||
uint8_t *mc_palette;
|
uint8_t *mc_palette;
|
||||||
int mc_pal_size;
|
int mc_pal_size;
|
||||||
@ -196,11 +195,9 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
|||||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
|
|
||||||
avpriv_elbg_free(&c->elbg);
|
|
||||||
|
|
||||||
av_freep(&c->mc_meta_charset);
|
av_freep(&c->mc_meta_charset);
|
||||||
av_freep(&c->mc_best_cb);
|
av_freep(&c->mc_best_cb);
|
||||||
|
av_freep(&c->mc_charset);
|
||||||
av_freep(&c->mc_charmap);
|
av_freep(&c->mc_charmap);
|
||||||
av_freep(&c->mc_colram);
|
av_freep(&c->mc_colram);
|
||||||
return 0;
|
return 0;
|
||||||
@ -215,7 +212,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
if (avctx->global_quality < 1) {
|
if (avctx->global_quality < 1) {
|
||||||
c->mc_lifetime = 4;
|
c->mc_lifetime = 4;
|
||||||
} else {
|
} else {
|
||||||
c->mc_lifetime = avctx->global_quality / FF_QP2LAMBDA;
|
c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
|
av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
|
||||||
@ -231,10 +228,11 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
a64_palette[mc_colors[a]][2] * 0.11;
|
a64_palette[mc_colors[a]][2] * 0.11;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(c->mc_meta_charset = av_calloc(c->mc_lifetime, 32000 * sizeof(int))) ||
|
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||||
!(c->mc_charmap = av_calloc(c->mc_lifetime, 1000 * sizeof(int))) ||
|
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)))) {
|
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||||
|
!(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
|
av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
@ -286,6 +284,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
|
|
||||||
int *charmap = c->mc_charmap;
|
int *charmap = c->mc_charmap;
|
||||||
uint8_t *colram = c->mc_colram;
|
uint8_t *colram = c->mc_colram;
|
||||||
|
uint8_t *charset = c->mc_charset;
|
||||||
int *meta = c->mc_meta_charset;
|
int *meta = c->mc_meta_charset;
|
||||||
int *best_cb = c->mc_best_cb;
|
int *best_cb = c->mc_best_cb;
|
||||||
|
|
||||||
@ -332,18 +331,25 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
/* any frames to encode? */
|
/* any frames to encode? */
|
||||||
if (c->mc_lifetime) {
|
if (c->mc_lifetime) {
|
||||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||||
if ((ret = ff_get_encode_buffer(avctx, pkt, alloc_size, 0)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = pkt->data;
|
buf = pkt->data;
|
||||||
|
|
||||||
/* calc optimal new charset + charmaps */
|
/* calc optimal new charset + charmaps */
|
||||||
ret = avpriv_elbg_do(&c->elbg, meta, 32, 1000 * c->mc_lifetime,
|
ret = avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
||||||
best_cb, CHARSET_CHARS, 50, charmap, &c->randctx, 0);
|
CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
ret = avpriv_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
||||||
|
CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* create colorram map and a c64 readable charset */
|
/* create colorram map and a c64 readable charset */
|
||||||
render_charset(avctx, buf, colram);
|
render_charset(avctx, charset, colram);
|
||||||
|
|
||||||
|
/* copy charset to buf */
|
||||||
|
memcpy(buf, charset, charset_size);
|
||||||
|
|
||||||
/* advance pointers */
|
/* advance pointers */
|
||||||
buf += charset_size;
|
buf += charset_size;
|
||||||
@ -384,39 +390,41 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
pkt->pts = pkt->dts = c->next_pts;
|
pkt->pts = pkt->dts = c->next_pts;
|
||||||
c->next_pts = AV_NOPTS_VALUE;
|
c->next_pts = AV_NOPTS_VALUE;
|
||||||
|
|
||||||
av_assert0(pkt->size == req_size);
|
av_assert0(pkt->size >= req_size);
|
||||||
|
pkt->size = req_size;
|
||||||
|
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||||
*got_packet = !!req_size;
|
*got_packet = !!req_size;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_A64MULTI_ENCODER
|
#if CONFIG_A64MULTI_ENCODER
|
||||||
const AVCodec ff_a64multi_encoder = {
|
AVCodec ff_a64multi_encoder = {
|
||||||
.name = "a64multi",
|
.name = "a64multi",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
|
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
.id = AV_CODEC_ID_A64_MULTI,
|
.id = AV_CODEC_ID_A64_MULTI,
|
||||||
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
|
|
||||||
.priv_data_size = sizeof(A64Context),
|
.priv_data_size = sizeof(A64Context),
|
||||||
.init = a64multi_encode_init,
|
.init = a64multi_encode_init,
|
||||||
.encode2 = a64multi_encode_frame,
|
.encode2 = a64multi_encode_frame,
|
||||||
.close = a64multi_close_encoder,
|
.close = a64multi_close_encoder,
|
||||||
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
|
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
|
||||||
|
.capabilities = AV_CODEC_CAP_DELAY,
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
|
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_A64MULTI5_ENCODER
|
#if CONFIG_A64MULTI5_ENCODER
|
||||||
const AVCodec ff_a64multi5_encoder = {
|
AVCodec ff_a64multi5_encoder = {
|
||||||
.name = "a64multi5",
|
.name = "a64multi5",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
|
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
.id = AV_CODEC_ID_A64_MULTI5,
|
.id = AV_CODEC_ID_A64_MULTI5,
|
||||||
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
|
|
||||||
.priv_data_size = sizeof(A64Context),
|
.priv_data_size = sizeof(A64Context),
|
||||||
.init = a64multi_encode_init,
|
.init = a64multi_encode_init,
|
||||||
.encode2 = a64multi_encode_frame,
|
.encode2 = a64multi_encode_frame,
|
||||||
.close = a64multi_close_encoder,
|
.close = a64multi_close_encoder,
|
||||||
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
|
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
|
||||||
|
.capabilities = AV_CODEC_CAP_DELAY,
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
|
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "libavutil/softfloat.h"
|
#include "libavutil/softfloat.h"
|
||||||
|
|
||||||
#define FFT_FLOAT 0
|
#define FFT_FLOAT 0
|
||||||
|
#define FFT_FIXED_32 1
|
||||||
|
|
||||||
#define AAC_RENAME(x) x ## _fixed
|
#define AAC_RENAME(x) x ## _fixed
|
||||||
#define AAC_RENAME_32(x) x ## _fixed_32
|
#define AAC_RENAME_32(x) x ## _fixed_32
|
||||||
@ -79,6 +80,7 @@ typedef int AAC_SIGNE;
|
|||||||
#else
|
#else
|
||||||
|
|
||||||
#define FFT_FLOAT 1
|
#define FFT_FLOAT 1
|
||||||
|
#define FFT_FIXED_32 0
|
||||||
|
|
||||||
#define AAC_RENAME(x) x
|
#define AAC_RENAME(x) x
|
||||||
#define AAC_RENAME_32(x) x
|
#define AAC_RENAME_32(x) x
|
||||||
|
@ -62,7 +62,7 @@ static av_cold int aac_parse_init(AVCodecParserContext *s1)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const AVCodecParser ff_aac_parser = {
|
AVCodecParser ff_aac_parser = {
|
||||||
.codec_ids = { AV_CODEC_ID_AAC },
|
.codec_ids = { AV_CODEC_ID_AAC },
|
||||||
.priv_data_size = sizeof(AACAC3ParseContext),
|
.priv_data_size = sizeof(AACAC3ParseContext),
|
||||||
.parser_init = aac_parse_init,
|
.parser_init = aac_parse_init,
|
||||||
|
@ -414,10 +414,11 @@ static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s,
|
|||||||
start = 0;
|
start = 0;
|
||||||
for (g = 0; g < sce->ics.num_swb; g++) {
|
for (g = 0; g < sce->ics.num_swb; g++) {
|
||||||
int nz = 0;
|
int nz = 0;
|
||||||
float uplim = 0.0f;
|
float uplim = 0.0f, energy = 0.0f;
|
||||||
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
|
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
|
||||||
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
|
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
|
||||||
uplim += band->threshold;
|
uplim += band->threshold;
|
||||||
|
energy += band->energy;
|
||||||
if (band->energy <= band->threshold || band->threshold == 0.0f) {
|
if (band->energy <= band->threshold || band->threshold == 0.0f) {
|
||||||
sce->zeroes[(w+w2)*16+g] = 1;
|
sce->zeroes[(w+w2)*16+g] = 1;
|
||||||
continue;
|
continue;
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define FFT_FLOAT 1
|
#define FFT_FLOAT 1
|
||||||
|
#define FFT_FIXED_32 0
|
||||||
#define USE_FIXED 0
|
#define USE_FIXED 0
|
||||||
|
|
||||||
#include "libavutil/float_dsp.h"
|
#include "libavutil/float_dsp.h"
|
||||||
@ -552,7 +553,7 @@ static av_cold int latm_decode_init(AVCodecContext *avctx)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
const AVCodec ff_aac_decoder = {
|
AVCodec ff_aac_decoder = {
|
||||||
.name = "aac",
|
.name = "aac",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
@ -577,7 +578,7 @@ const AVCodec ff_aac_decoder = {
|
|||||||
in MPEG transport streams which only contain one program.
|
in MPEG transport streams which only contain one program.
|
||||||
To do a more complex LATM demuxing a separate LATM demuxer should be used.
|
To do a more complex LATM demuxing a separate LATM demuxer should be used.
|
||||||
*/
|
*/
|
||||||
const AVCodec ff_aac_latm_decoder = {
|
AVCodec ff_aac_latm_decoder = {
|
||||||
.name = "aac_latm",
|
.name = "aac_latm",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
|
.long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
|
@ -59,6 +59,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define FFT_FLOAT 0
|
#define FFT_FLOAT 0
|
||||||
|
#define FFT_FIXED_32 1
|
||||||
#define USE_FIXED 1
|
#define USE_FIXED 1
|
||||||
|
|
||||||
#include "libavutil/fixed_dsp.h"
|
#include "libavutil/fixed_dsp.h"
|
||||||
@ -450,7 +451,7 @@ static void apply_independent_coupling_fixed(AACContext *ac,
|
|||||||
|
|
||||||
#include "aacdec_template.c"
|
#include "aacdec_template.c"
|
||||||
|
|
||||||
const AVCodec ff_aac_fixed_decoder = {
|
AVCodec ff_aac_fixed_decoder = {
|
||||||
.name = "aac_fixed",
|
.name = "aac_fixed",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
|
@ -89,7 +89,6 @@
|
|||||||
Parametric Stereo.
|
Parametric Stereo.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/thread.h"
|
#include "libavutil/thread.h"
|
||||||
|
|
||||||
static VLC vlc_scalefactors;
|
static VLC vlc_scalefactors;
|
||||||
@ -716,7 +715,9 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
|||||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
|
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
|
||||||
}
|
}
|
||||||
case 11:
|
case 11:
|
||||||
if (ac->tags_mapped == 3 && type == TYPE_SCE) {
|
if (ac->tags_mapped == 2 &&
|
||||||
|
ac->oc[1].m4ac.chan_config == 11 &&
|
||||||
|
type == TYPE_SCE) {
|
||||||
ac->tags_mapped++;
|
ac->tags_mapped++;
|
||||||
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
|
return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
|
||||||
}
|
}
|
||||||
@ -3441,11 +3442,11 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int buf_consumed;
|
int buf_consumed;
|
||||||
int buf_offset;
|
int buf_offset;
|
||||||
int err;
|
int err;
|
||||||
size_t new_extradata_size;
|
buffer_size_t new_extradata_size;
|
||||||
const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
|
const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
|
||||||
AV_PKT_DATA_NEW_EXTRADATA,
|
AV_PKT_DATA_NEW_EXTRADATA,
|
||||||
&new_extradata_size);
|
&new_extradata_size);
|
||||||
size_t jp_dualmono_size;
|
buffer_size_t jp_dualmono_size;
|
||||||
const uint8_t *jp_dualmono = av_packet_get_side_data(avpkt,
|
const uint8_t *jp_dualmono = av_packet_get_side_data(avpkt,
|
||||||
AV_PKT_DATA_JP_DUALMONO,
|
AV_PKT_DATA_JP_DUALMONO,
|
||||||
&jp_dualmono_size);
|
&jp_dualmono_size);
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 5, 5, 16, 5, 0 };
|
static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 4, 5, 16, 5, 0 };
|
||||||
|
|
||||||
static const uint8_t aac_channel_layout_map[16][16][3] = {
|
static const uint8_t aac_channel_layout_map[16][16][3] = {
|
||||||
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, },
|
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, },
|
||||||
@ -83,7 +83,7 @@ static const uint64_t aac_channel_layout[16] = {
|
|||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
AV_CH_LAYOUT_6POINT1_BACK,
|
AV_CH_LAYOUT_6POINT1,
|
||||||
AV_CH_LAYOUT_7POINT1,
|
AV_CH_LAYOUT_7POINT1,
|
||||||
AV_CH_LAYOUT_22POINT2,
|
AV_CH_LAYOUT_22POINT2,
|
||||||
0,
|
0,
|
||||||
|
@ -30,12 +30,10 @@
|
|||||||
***********************************/
|
***********************************/
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
|
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/libm.h"
|
#include "libavutil/libm.h"
|
||||||
#include "libavutil/float_dsp.h"
|
#include "libavutil/float_dsp.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "encode.h"
|
|
||||||
#include "put_bits.h"
|
#include "put_bits.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "mpeg4audio.h"
|
#include "mpeg4audio.h"
|
||||||
@ -49,7 +47,6 @@
|
|||||||
#include "aacenc_utils.h"
|
#include "aacenc_utils.h"
|
||||||
|
|
||||||
#include "psymodel.h"
|
#include "psymodel.h"
|
||||||
#include "mpeg4audio_sample_rates.h"
|
|
||||||
|
|
||||||
static void put_pce(PutBitContext *pb, AVCodecContext *avctx)
|
static void put_pce(PutBitContext *pb, AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
@ -119,7 +116,7 @@ static int put_audio_specific_config(AVCodecContext *avctx)
|
|||||||
put_bits(&pb, 5, AOT_SBR);
|
put_bits(&pb, 5, AOT_SBR);
|
||||||
put_bits(&pb, 1, 0);
|
put_bits(&pb, 1, 0);
|
||||||
flush_put_bits(&pb);
|
flush_put_bits(&pb);
|
||||||
avctx->extradata_size = put_bytes_output(&pb);
|
avctx->extradata_size = put_bits_count(&pb) >> 3;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -679,7 +676,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
}
|
}
|
||||||
start_ch += chans;
|
start_ch += chans;
|
||||||
}
|
}
|
||||||
if ((ret = ff_alloc_packet(avctx, avpkt, 8192 * s->channels)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
frame_bits = its = 0;
|
frame_bits = its = 0;
|
||||||
do {
|
do {
|
||||||
@ -885,7 +882,6 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
flush_put_bits(&s->pb);
|
flush_put_bits(&s->pb);
|
||||||
|
|
||||||
s->last_frame_pb_count = put_bits_count(&s->pb);
|
s->last_frame_pb_count = put_bits_count(&s->pb);
|
||||||
avpkt->size = put_bytes_output(&s->pb);
|
|
||||||
|
|
||||||
s->lambda_sum += s->lambda;
|
s->lambda_sum += s->lambda;
|
||||||
s->lambda_count++;
|
s->lambda_count++;
|
||||||
@ -893,6 +889,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
|
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
|
||||||
&avpkt->duration);
|
&avpkt->duration);
|
||||||
|
|
||||||
|
avpkt->size = put_bits_count(&s->pb) >> 3;
|
||||||
*got_packet_ptr = 1;
|
*got_packet_ptr = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -999,7 +996,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
/* Samplerate */
|
/* Samplerate */
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
if (avctx->sample_rate == ff_mpeg4audio_sample_rates[i])
|
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
|
||||||
break;
|
break;
|
||||||
s->samplerate_index = i;
|
s->samplerate_index = i;
|
||||||
ERROR_IF(s->samplerate_index == 16 ||
|
ERROR_IF(s->samplerate_index == 16 ||
|
||||||
@ -1107,7 +1104,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
|
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
|
||||||
static const AVOption aacenc_options[] = {
|
static const AVOption aacenc_options[] = {
|
||||||
{"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_TWOLOOP}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"},
|
{"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_FAST}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"},
|
||||||
{"anmr", "ANMR method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_ANMR}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
{"anmr", "ANMR method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_ANMR}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
||||||
{"twoloop", "Two loop searching method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_TWOLOOP}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
{"twoloop", "Two loop searching method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_TWOLOOP}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
||||||
{"fast", "Default fast search", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
{"fast", "Default fast search", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"},
|
||||||
@ -1134,7 +1131,7 @@ static const AVCodecDefault aac_encode_defaults[] = {
|
|||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
const AVCodec ff_aac_encoder = {
|
AVCodec ff_aac_encoder = {
|
||||||
.name = "aac",
|
.name = "aac",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
||||||
.type = AVMEDIA_TYPE_AUDIO,
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
@ -1144,7 +1141,7 @@ const AVCodec ff_aac_encoder = {
|
|||||||
.encode2 = aac_encode_frame,
|
.encode2 = aac_encode_frame,
|
||||||
.close = aac_encode_end,
|
.close = aac_encode_end,
|
||||||
.defaults = aac_encode_defaults,
|
.defaults = aac_encode_defaults,
|
||||||
.supported_samplerates = ff_mpeg4audio_sample_rates,
|
.supported_samplerates = mpeg4audio_sample_rates,
|
||||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
|
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
|
||||||
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
|
.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
|
||||||
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
|
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
#ifndef AVCODEC_AACENC_H
|
#ifndef AVCODEC_AACENC_H
|
||||||
#define AVCODEC_AACENC_H
|
#define AVCODEC_AACENC_H
|
||||||
|
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "libavutil/float_dsp.h"
|
#include "libavutil/float_dsp.h"
|
||||||
#include "libavutil/mem_internal.h"
|
#include "libavutil/mem_internal.h"
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#ifndef AVCODEC_AACENCTAB_H
|
#ifndef AVCODEC_AACENCTAB_H
|
||||||
#define AVCODEC_AACENCTAB_H
|
#define AVCODEC_AACENCTAB_H
|
||||||
|
|
||||||
#include "libavutil/channel_layout.h"
|
|
||||||
#include "aac.h"
|
#include "aac.h"
|
||||||
|
|
||||||
/** Total number of usable codebooks **/
|
/** Total number of usable codebooks **/
|
||||||
@ -81,6 +80,13 @@ static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
|
|||||||
{ 2, 0, 1, 6, 7, 4, 5, 3 },
|
{ 2, 0, 1, 6, 7, 4, 5, 3 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* duplicated from avpriv_mpeg4audio_sample_rates to avoid shared build
|
||||||
|
* failures */
|
||||||
|
static const int mpeg4audio_sample_rates[16] = {
|
||||||
|
96000, 88200, 64000, 48000, 44100, 32000,
|
||||||
|
24000, 22050, 16000, 12000, 11025, 8000, 7350
|
||||||
|
};
|
||||||
|
|
||||||
/** bits needed to code codebook run value for long windows */
|
/** bits needed to code codebook run value for long windows */
|
||||||
static const uint8_t run_value_bits_long[64] = {
|
static const uint8_t run_value_bits_long[64] = {
|
||||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
||||||
|
@ -51,8 +51,7 @@ static void ipdopd_reset(int8_t *ipd_hist, int8_t *opd_hist)
|
|||||||
|
|
||||||
/** Split one subband into 2 subsubbands with a symmetric real filter.
|
/** Split one subband into 2 subsubbands with a symmetric real filter.
|
||||||
* The filter must have its non-center even coefficients equal to zero. */
|
* The filter must have its non-center even coefficients equal to zero. */
|
||||||
static void hybrid2_re(INTFLOAT (*in)[2], INTFLOAT (*out)[32][2],
|
static void hybrid2_re(INTFLOAT (*in)[2], INTFLOAT (*out)[32][2], const INTFLOAT filter[8], int len, int reverse)
|
||||||
const INTFLOAT filter[7], int len, int reverse)
|
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
for (i = 0; i < len; i++, in++) {
|
for (i = 0; i < len; i++, in++) {
|
||||||
|
@ -370,7 +370,7 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pctx->ch = av_calloc(ctx->avctx->channels, sizeof(*pctx->ch));
|
pctx->ch = av_mallocz_array(ctx->avctx->channels, sizeof(AacPsyChannel));
|
||||||
if (!pctx->ch) {
|
if (!pctx->ch) {
|
||||||
av_freep(&ctx->model_priv_data);
|
av_freep(&ctx->model_priv_data);
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -858,8 +858,7 @@ static void psy_3gpp_analyze(FFPsyContext *ctx, int channel,
|
|||||||
static av_cold void psy_3gpp_end(FFPsyContext *apc)
|
static av_cold void psy_3gpp_end(FFPsyContext *apc)
|
||||||
{
|
{
|
||||||
AacPsyContext *pctx = (AacPsyContext*) apc->model_priv_data;
|
AacPsyContext *pctx = (AacPsyContext*) apc->model_priv_data;
|
||||||
if (pctx)
|
av_freep(&pctx->ch);
|
||||||
av_freep(&pctx->ch);
|
|
||||||
av_freep(&apc->model_priv_data);
|
av_freep(&apc->model_priv_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
#include "libavutil/mem.h"
|
||||||
#include "libavutil/mem_internal.h"
|
#include "libavutil/mem_internal.h"
|
||||||
#include "libavutil/thread.h"
|
#include "libavutil/thread.h"
|
||||||
#include "aac.h"
|
#include "aac.h"
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#include "libavutil/attributes.h"
|
|
||||||
#include "libavutil/aarch64/cpu.h"
|
#include "libavutil/aarch64/cpu.h"
|
||||||
#include "libavcodec/aacpsdsp.h"
|
#include "libavcodec/aacpsdsp.h"
|
||||||
|
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
|
|
||||||
|
|
||||||
function fft4_neon
|
function fft4_neon
|
||||||
AARCH64_VALID_JUMP_TARGET
|
|
||||||
ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
|
ld1 {v0.2s,v1.2s,v2.2s,v3.2s}, [x0]
|
||||||
|
|
||||||
fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1
|
fadd v4.2s, v0.2s, v1.2s // r0+r1,i0+i1
|
||||||
@ -59,7 +58,6 @@ function fft4_neon
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function fft8_neon
|
function fft8_neon
|
||||||
AARCH64_VALID_JUMP_TARGET
|
|
||||||
mov x1, x0
|
mov x1, x0
|
||||||
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
||||||
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
|
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0]
|
||||||
@ -110,7 +108,6 @@ function fft8_neon
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function fft16_neon
|
function fft16_neon
|
||||||
AARCH64_VALID_JUMP_TARGET
|
|
||||||
mov x1, x0
|
mov x1, x0
|
||||||
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
ld1 {v0.2s, v1.2s, v2.2s, v3.2s}, [x0], #32
|
||||||
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32
|
ld1 {v16.2s,v17.2s,v18.2s,v19.2s}, [x0], #32
|
||||||
@ -340,7 +337,6 @@ endfunc
|
|||||||
|
|
||||||
.macro def_fft n, n2, n4
|
.macro def_fft n, n2, n4
|
||||||
function fft\n\()_neon, align=6
|
function fft\n\()_neon, align=6
|
||||||
AARCH64_VALID_JUMP_TARGET
|
|
||||||
sub sp, sp, #16
|
sub sp, sp, #16
|
||||||
stp x28, x30, [sp]
|
stp x28, x30, [sp]
|
||||||
add x28, x0, #\n4*2*8
|
add x28, x0, #\n4*2*8
|
||||||
|
@ -69,42 +69,19 @@ void ff_h264_idct_add_neon(uint8_t *dst, int16_t *block, int stride);
|
|||||||
void ff_h264_idct_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
|
void ff_h264_idct_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
|
||||||
void ff_h264_idct_add16_neon(uint8_t *dst, const int *block_offset,
|
void ff_h264_idct_add16_neon(uint8_t *dst, const int *block_offset,
|
||||||
int16_t *block, int stride,
|
int16_t *block, int stride,
|
||||||
const uint8_t nnzc[5 * 8]);
|
const uint8_t nnzc[6*8]);
|
||||||
void ff_h264_idct_add16intra_neon(uint8_t *dst, const int *block_offset,
|
void ff_h264_idct_add16intra_neon(uint8_t *dst, const int *block_offset,
|
||||||
int16_t *block, int stride,
|
int16_t *block, int stride,
|
||||||
const uint8_t nnzc[5 * 8]);
|
const uint8_t nnzc[6*8]);
|
||||||
void ff_h264_idct_add8_neon(uint8_t **dest, const int *block_offset,
|
void ff_h264_idct_add8_neon(uint8_t **dest, const int *block_offset,
|
||||||
int16_t *block, int stride,
|
int16_t *block, int stride,
|
||||||
const uint8_t nnzc[15 * 8]);
|
const uint8_t nnzc[6*8]);
|
||||||
|
|
||||||
void ff_h264_idct8_add_neon(uint8_t *dst, int16_t *block, int stride);
|
void ff_h264_idct8_add_neon(uint8_t *dst, int16_t *block, int stride);
|
||||||
void ff_h264_idct8_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
|
void ff_h264_idct8_dc_add_neon(uint8_t *dst, int16_t *block, int stride);
|
||||||
void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset,
|
void ff_h264_idct8_add4_neon(uint8_t *dst, const int *block_offset,
|
||||||
int16_t *block, int stride,
|
int16_t *block, int stride,
|
||||||
const uint8_t nnzc[5 * 8]);
|
const uint8_t nnzc[6*8]);
|
||||||
|
|
||||||
void ff_h264_v_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta, int8_t *tc0);
|
|
||||||
void ff_h264_h_loop_filter_luma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta, int8_t *tc0);
|
|
||||||
void ff_h264_v_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta);
|
|
||||||
void ff_h264_h_loop_filter_luma_intra_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta);
|
|
||||||
void ff_h264_v_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta, int8_t *tc0);
|
|
||||||
void ff_h264_h_loop_filter_chroma_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta, int8_t *tc0);
|
|
||||||
void ff_h264_h_loop_filter_chroma422_neon_10(uint8_t *pix, ptrdiff_t stride, int alpha,
|
|
||||||
int beta, int8_t *tc0);
|
|
||||||
void ff_h264_v_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
|
|
||||||
int alpha, int beta);
|
|
||||||
void ff_h264_h_loop_filter_chroma_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
|
|
||||||
int alpha, int beta);
|
|
||||||
void ff_h264_h_loop_filter_chroma422_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
|
|
||||||
int alpha, int beta);
|
|
||||||
void ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10(uint8_t *pix, ptrdiff_t stride,
|
|
||||||
int alpha, int beta);
|
|
||||||
|
|
||||||
av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
||||||
const int chroma_format_idc)
|
const int chroma_format_idc)
|
||||||
@ -148,19 +125,5 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
|||||||
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
||||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
||||||
c->h264_idct8_add4 = ff_h264_idct8_add4_neon;
|
c->h264_idct8_add4 = ff_h264_idct8_add4_neon;
|
||||||
} else if (have_neon(cpu_flags) && bit_depth == 10) {
|
|
||||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon_10;
|
|
||||||
c->h264_v_loop_filter_chroma_intra = ff_h264_v_loop_filter_chroma_intra_neon_10;
|
|
||||||
|
|
||||||
if (chroma_format_idc <= 1) {
|
|
||||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon_10;
|
|
||||||
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
|
|
||||||
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10;
|
|
||||||
} else {
|
|
||||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma422_neon_10;
|
|
||||||
c->h264_h_loop_filter_chroma_mbaff = ff_h264_h_loop_filter_chroma_neon_10;
|
|
||||||
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma422_intra_neon_10;
|
|
||||||
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_intra_neon_10;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,6 +110,7 @@
|
|||||||
|
|
||||||
function ff_h264_v_loop_filter_luma_neon, export=1
|
function ff_h264_v_loop_filter_luma_neon, export=1
|
||||||
h264_loop_filter_start
|
h264_loop_filter_start
|
||||||
|
sxtw x1, w1
|
||||||
|
|
||||||
ld1 {v0.16B}, [x0], x1
|
ld1 {v0.16B}, [x0], x1
|
||||||
ld1 {v2.16B}, [x0], x1
|
ld1 {v2.16B}, [x0], x1
|
||||||
@ -133,6 +134,7 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_h_loop_filter_luma_neon, export=1
|
function ff_h264_h_loop_filter_luma_neon, export=1
|
||||||
h264_loop_filter_start
|
h264_loop_filter_start
|
||||||
|
sxtw x1, w1
|
||||||
|
|
||||||
sub x0, x0, #4
|
sub x0, x0, #4
|
||||||
ld1 {v6.8B}, [x0], x1
|
ld1 {v6.8B}, [x0], x1
|
||||||
@ -182,198 +184,199 @@ endfunc
|
|||||||
|
|
||||||
|
|
||||||
.macro h264_loop_filter_start_intra
|
.macro h264_loop_filter_start_intra
|
||||||
orr w4, w2, w3
|
orr w4, w2, w3
|
||||||
cbnz w4, 1f
|
cbnz w4, 1f
|
||||||
ret
|
ret
|
||||||
1:
|
1:
|
||||||
dup v30.16b, w2 // alpha
|
sxtw x1, w1
|
||||||
dup v31.16b, w3 // beta
|
dup v30.16b, w2 // alpha
|
||||||
|
dup v31.16b, w3 // beta
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro h264_loop_filter_luma_intra
|
.macro h264_loop_filter_luma_intra
|
||||||
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
|
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
|
||||||
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
|
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
|
||||||
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
|
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
|
||||||
cmhi v19.16b, v30.16b, v16.16b // < alpha
|
cmhi v19.16b, v30.16b, v16.16b // < alpha
|
||||||
cmhi v17.16b, v31.16b, v17.16b // < beta
|
cmhi v17.16b, v31.16b, v17.16b // < beta
|
||||||
cmhi v18.16b, v31.16b, v18.16b // < beta
|
cmhi v18.16b, v31.16b, v18.16b // < beta
|
||||||
|
|
||||||
movi v29.16b, #2
|
movi v29.16b, #2
|
||||||
ushr v30.16b, v30.16b, #2 // alpha >> 2
|
ushr v30.16b, v30.16b, #2 // alpha >> 2
|
||||||
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
|
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
|
||||||
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
|
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
|
||||||
|
|
||||||
and v19.16b, v19.16b, v17.16b
|
and v19.16b, v19.16b, v17.16b
|
||||||
and v19.16b, v19.16b, v18.16b
|
and v19.16b, v19.16b, v18.16b
|
||||||
shrn v20.8b, v19.8h, #4
|
shrn v20.8b, v19.8h, #4
|
||||||
mov x4, v20.d[0]
|
mov x4, v20.d[0]
|
||||||
cbz x4, 9f
|
cbz x4, 9f
|
||||||
|
|
||||||
ushll v20.8h, v6.8b, #1
|
ushll v20.8h, v6.8b, #1
|
||||||
ushll v22.8h, v1.8b, #1
|
ushll v22.8h, v1.8b, #1
|
||||||
ushll2 v21.8h, v6.16b, #1
|
ushll2 v21.8h, v6.16b, #1
|
||||||
ushll2 v23.8h, v1.16b, #1
|
ushll2 v23.8h, v1.16b, #1
|
||||||
uaddw v20.8h, v20.8h, v7.8b
|
uaddw v20.8h, v20.8h, v7.8b
|
||||||
uaddw v22.8h, v22.8h, v0.8b
|
uaddw v22.8h, v22.8h, v0.8b
|
||||||
uaddw2 v21.8h, v21.8h, v7.16b
|
uaddw2 v21.8h, v21.8h, v7.16b
|
||||||
uaddw2 v23.8h, v23.8h, v0.16b
|
uaddw2 v23.8h, v23.8h, v0.16b
|
||||||
uaddw v20.8h, v20.8h, v1.8b
|
uaddw v20.8h, v20.8h, v1.8b
|
||||||
uaddw v22.8h, v22.8h, v6.8b
|
uaddw v22.8h, v22.8h, v6.8b
|
||||||
uaddw2 v21.8h, v21.8h, v1.16b
|
uaddw2 v21.8h, v21.8h, v1.16b
|
||||||
uaddw2 v23.8h, v23.8h, v6.16b
|
uaddw2 v23.8h, v23.8h, v6.16b
|
||||||
|
|
||||||
rshrn v24.8b, v20.8h, #2 // p0'_1
|
rshrn v24.8b, v20.8h, #2 // p0'_1
|
||||||
rshrn v25.8b, v22.8h, #2 // q0'_1
|
rshrn v25.8b, v22.8h, #2 // q0'_1
|
||||||
rshrn2 v24.16b, v21.8h, #2 // p0'_1
|
rshrn2 v24.16b, v21.8h, #2 // p0'_1
|
||||||
rshrn2 v25.16b, v23.8h, #2 // q0'_1
|
rshrn2 v25.16b, v23.8h, #2 // q0'_1
|
||||||
|
|
||||||
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
|
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
|
||||||
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
|
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
|
||||||
cmhi v17.16b, v31.16b, v17.16b // < beta
|
cmhi v17.16b, v31.16b, v17.16b // < beta
|
||||||
cmhi v18.16b, v31.16b, v18.16b // < beta
|
cmhi v18.16b, v31.16b, v18.16b // < beta
|
||||||
|
|
||||||
and v17.16b, v16.16b, v17.16b // if_2 && if_3
|
and v17.16b, v16.16b, v17.16b // if_2 && if_3
|
||||||
and v18.16b, v16.16b, v18.16b // if_2 && if_4
|
and v18.16b, v16.16b, v18.16b // if_2 && if_4
|
||||||
|
|
||||||
not v30.16b, v17.16b
|
not v30.16b, v17.16b
|
||||||
not v31.16b, v18.16b
|
not v31.16b, v18.16b
|
||||||
|
|
||||||
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
|
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
|
||||||
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
|
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
|
||||||
|
|
||||||
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
|
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
|
||||||
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
|
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
|
||||||
|
|
||||||
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
|
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
|
||||||
uaddl v26.8h, v5.8b, v7.8b
|
uaddl v26.8h, v5.8b, v7.8b
|
||||||
uaddl2 v27.8h, v5.16b, v7.16b
|
uaddl2 v27.8h, v5.16b, v7.16b
|
||||||
uaddw v26.8h, v26.8h, v0.8b
|
uaddw v26.8h, v26.8h, v0.8b
|
||||||
uaddw2 v27.8h, v27.8h, v0.16b
|
uaddw2 v27.8h, v27.8h, v0.16b
|
||||||
add v20.8h, v20.8h, v26.8h
|
add v20.8h, v20.8h, v26.8h
|
||||||
add v21.8h, v21.8h, v27.8h
|
add v21.8h, v21.8h, v27.8h
|
||||||
uaddw v20.8h, v20.8h, v0.8b
|
uaddw v20.8h, v20.8h, v0.8b
|
||||||
uaddw2 v21.8h, v21.8h, v0.16b
|
uaddw2 v21.8h, v21.8h, v0.16b
|
||||||
rshrn v20.8b, v20.8h, #3 // p0'_2
|
rshrn v20.8b, v20.8h, #3 // p0'_2
|
||||||
rshrn2 v20.16b, v21.8h, #3 // p0'_2
|
rshrn2 v20.16b, v21.8h, #3 // p0'_2
|
||||||
uaddw v26.8h, v26.8h, v6.8b
|
uaddw v26.8h, v26.8h, v6.8b
|
||||||
uaddw2 v27.8h, v27.8h, v6.16b
|
uaddw2 v27.8h, v27.8h, v6.16b
|
||||||
rshrn v21.8b, v26.8h, #2 // p1'_2
|
rshrn v21.8b, v26.8h, #2 // p1'_2
|
||||||
rshrn2 v21.16b, v27.8h, #2 // p1'_2
|
rshrn2 v21.16b, v27.8h, #2 // p1'_2
|
||||||
uaddl v28.8h, v4.8b, v5.8b
|
uaddl v28.8h, v4.8b, v5.8b
|
||||||
uaddl2 v29.8h, v4.16b, v5.16b
|
uaddl2 v29.8h, v4.16b, v5.16b
|
||||||
shl v28.8h, v28.8h, #1
|
shl v28.8h, v28.8h, #1
|
||||||
shl v29.8h, v29.8h, #1
|
shl v29.8h, v29.8h, #1
|
||||||
add v28.8h, v28.8h, v26.8h
|
add v28.8h, v28.8h, v26.8h
|
||||||
add v29.8h, v29.8h, v27.8h
|
add v29.8h, v29.8h, v27.8h
|
||||||
rshrn v19.8b, v28.8h, #3 // p2'_2
|
rshrn v19.8b, v28.8h, #3 // p2'_2
|
||||||
rshrn2 v19.16b, v29.8h, #3 // p2'_2
|
rshrn2 v19.16b, v29.8h, #3 // p2'_2
|
||||||
|
|
||||||
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
|
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
|
||||||
uaddl v26.8h, v2.8b, v0.8b
|
uaddl v26.8h, v2.8b, v0.8b
|
||||||
uaddl2 v27.8h, v2.16b, v0.16b
|
uaddl2 v27.8h, v2.16b, v0.16b
|
||||||
uaddw v26.8h, v26.8h, v7.8b
|
uaddw v26.8h, v26.8h, v7.8b
|
||||||
uaddw2 v27.8h, v27.8h, v7.16b
|
uaddw2 v27.8h, v27.8h, v7.16b
|
||||||
add v22.8h, v22.8h, v26.8h
|
add v22.8h, v22.8h, v26.8h
|
||||||
add v23.8h, v23.8h, v27.8h
|
add v23.8h, v23.8h, v27.8h
|
||||||
uaddw v22.8h, v22.8h, v7.8b
|
uaddw v22.8h, v22.8h, v7.8b
|
||||||
uaddw2 v23.8h, v23.8h, v7.16b
|
uaddw2 v23.8h, v23.8h, v7.16b
|
||||||
rshrn v22.8b, v22.8h, #3 // q0'_2
|
rshrn v22.8b, v22.8h, #3 // q0'_2
|
||||||
rshrn2 v22.16b, v23.8h, #3 // q0'_2
|
rshrn2 v22.16b, v23.8h, #3 // q0'_2
|
||||||
uaddw v26.8h, v26.8h, v1.8b
|
uaddw v26.8h, v26.8h, v1.8b
|
||||||
uaddw2 v27.8h, v27.8h, v1.16b
|
uaddw2 v27.8h, v27.8h, v1.16b
|
||||||
rshrn v23.8b, v26.8h, #2 // q1'_2
|
rshrn v23.8b, v26.8h, #2 // q1'_2
|
||||||
rshrn2 v23.16b, v27.8h, #2 // q1'_2
|
rshrn2 v23.16b, v27.8h, #2 // q1'_2
|
||||||
uaddl v28.8h, v2.8b, v3.8b
|
uaddl v28.8h, v2.8b, v3.8b
|
||||||
uaddl2 v29.8h, v2.16b, v3.16b
|
uaddl2 v29.8h, v2.16b, v3.16b
|
||||||
shl v28.8h, v28.8h, #1
|
shl v28.8h, v28.8h, #1
|
||||||
shl v29.8h, v29.8h, #1
|
shl v29.8h, v29.8h, #1
|
||||||
add v28.8h, v28.8h, v26.8h
|
add v28.8h, v28.8h, v26.8h
|
||||||
add v29.8h, v29.8h, v27.8h
|
add v29.8h, v29.8h, v27.8h
|
||||||
rshrn v26.8b, v28.8h, #3 // q2'_2
|
rshrn v26.8b, v28.8h, #3 // q2'_2
|
||||||
rshrn2 v26.16b, v29.8h, #3 // q2'_2
|
rshrn2 v26.16b, v29.8h, #3 // q2'_2
|
||||||
|
|
||||||
bit v7.16b, v24.16b, v30.16b // p0'_1
|
bit v7.16b, v24.16b, v30.16b // p0'_1
|
||||||
bit v0.16b, v25.16b, v31.16b // q0'_1
|
bit v0.16b, v25.16b, v31.16b // q0'_1
|
||||||
bit v7.16b, v20.16b, v17.16b // p0'_2
|
bit v7.16b, v20.16b, v17.16b // p0'_2
|
||||||
bit v6.16b, v21.16b, v17.16b // p1'_2
|
bit v6.16b, v21.16b, v17.16b // p1'_2
|
||||||
bit v5.16b, v19.16b, v17.16b // p2'_2
|
bit v5.16b, v19.16b, v17.16b // p2'_2
|
||||||
bit v0.16b, v22.16b, v18.16b // q0'_2
|
bit v0.16b, v22.16b, v18.16b // q0'_2
|
||||||
bit v1.16b, v23.16b, v18.16b // q1'_2
|
bit v1.16b, v23.16b, v18.16b // q1'_2
|
||||||
bit v2.16b, v26.16b, v18.16b // q2'_2
|
bit v2.16b, v26.16b, v18.16b // q2'_2
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
function ff_h264_v_loop_filter_luma_intra_neon, export=1
|
function ff_h264_v_loop_filter_luma_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
|
|
||||||
ld1 {v0.16b}, [x0], x1 // q0
|
ld1 {v0.16b}, [x0], x1 // q0
|
||||||
ld1 {v1.16b}, [x0], x1 // q1
|
ld1 {v1.16b}, [x0], x1 // q1
|
||||||
ld1 {v2.16b}, [x0], x1 // q2
|
ld1 {v2.16b}, [x0], x1 // q2
|
||||||
ld1 {v3.16b}, [x0], x1 // q3
|
ld1 {v3.16b}, [x0], x1 // q3
|
||||||
sub x0, x0, x1, lsl #3
|
sub x0, x0, x1, lsl #3
|
||||||
ld1 {v4.16b}, [x0], x1 // p3
|
ld1 {v4.16b}, [x0], x1 // p3
|
||||||
ld1 {v5.16b}, [x0], x1 // p2
|
ld1 {v5.16b}, [x0], x1 // p2
|
||||||
ld1 {v6.16b}, [x0], x1 // p1
|
ld1 {v6.16b}, [x0], x1 // p1
|
||||||
ld1 {v7.16b}, [x0] // p0
|
ld1 {v7.16b}, [x0] // p0
|
||||||
|
|
||||||
h264_loop_filter_luma_intra
|
h264_loop_filter_luma_intra
|
||||||
|
|
||||||
sub x0, x0, x1, lsl #1
|
sub x0, x0, x1, lsl #1
|
||||||
st1 {v5.16b}, [x0], x1 // p2
|
st1 {v5.16b}, [x0], x1 // p2
|
||||||
st1 {v6.16b}, [x0], x1 // p1
|
st1 {v6.16b}, [x0], x1 // p1
|
||||||
st1 {v7.16b}, [x0], x1 // p0
|
st1 {v7.16b}, [x0], x1 // p0
|
||||||
st1 {v0.16b}, [x0], x1 // q0
|
st1 {v0.16b}, [x0], x1 // q0
|
||||||
st1 {v1.16b}, [x0], x1 // q1
|
st1 {v1.16b}, [x0], x1 // q1
|
||||||
st1 {v2.16b}, [x0] // q2
|
st1 {v2.16b}, [x0] // q2
|
||||||
9:
|
9:
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_luma_intra_neon, export=1
|
function ff_h264_h_loop_filter_luma_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
|
|
||||||
sub x0, x0, #4
|
sub x0, x0, #4
|
||||||
ld1 {v4.8b}, [x0], x1
|
ld1 {v4.8b}, [x0], x1
|
||||||
ld1 {v5.8b}, [x0], x1
|
ld1 {v5.8b}, [x0], x1
|
||||||
ld1 {v6.8b}, [x0], x1
|
ld1 {v6.8b}, [x0], x1
|
||||||
ld1 {v7.8b}, [x0], x1
|
ld1 {v7.8b}, [x0], x1
|
||||||
ld1 {v0.8b}, [x0], x1
|
ld1 {v0.8b}, [x0], x1
|
||||||
ld1 {v1.8b}, [x0], x1
|
ld1 {v1.8b}, [x0], x1
|
||||||
ld1 {v2.8b}, [x0], x1
|
ld1 {v2.8b}, [x0], x1
|
||||||
ld1 {v3.8b}, [x0], x1
|
ld1 {v3.8b}, [x0], x1
|
||||||
ld1 {v4.d}[1], [x0], x1
|
ld1 {v4.d}[1], [x0], x1
|
||||||
ld1 {v5.d}[1], [x0], x1
|
ld1 {v5.d}[1], [x0], x1
|
||||||
ld1 {v6.d}[1], [x0], x1
|
ld1 {v6.d}[1], [x0], x1
|
||||||
ld1 {v7.d}[1], [x0], x1
|
ld1 {v7.d}[1], [x0], x1
|
||||||
ld1 {v0.d}[1], [x0], x1
|
ld1 {v0.d}[1], [x0], x1
|
||||||
ld1 {v1.d}[1], [x0], x1
|
ld1 {v1.d}[1], [x0], x1
|
||||||
ld1 {v2.d}[1], [x0], x1
|
ld1 {v2.d}[1], [x0], x1
|
||||||
ld1 {v3.d}[1], [x0], x1
|
ld1 {v3.d}[1], [x0], x1
|
||||||
|
|
||||||
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
||||||
|
|
||||||
h264_loop_filter_luma_intra
|
h264_loop_filter_luma_intra
|
||||||
|
|
||||||
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
||||||
|
|
||||||
sub x0, x0, x1, lsl #4
|
sub x0, x0, x1, lsl #4
|
||||||
st1 {v4.8b}, [x0], x1
|
st1 {v4.8b}, [x0], x1
|
||||||
st1 {v5.8b}, [x0], x1
|
st1 {v5.8b}, [x0], x1
|
||||||
st1 {v6.8b}, [x0], x1
|
st1 {v6.8b}, [x0], x1
|
||||||
st1 {v7.8b}, [x0], x1
|
st1 {v7.8b}, [x0], x1
|
||||||
st1 {v0.8b}, [x0], x1
|
st1 {v0.8b}, [x0], x1
|
||||||
st1 {v1.8b}, [x0], x1
|
st1 {v1.8b}, [x0], x1
|
||||||
st1 {v2.8b}, [x0], x1
|
st1 {v2.8b}, [x0], x1
|
||||||
st1 {v3.8b}, [x0], x1
|
st1 {v3.8b}, [x0], x1
|
||||||
st1 {v4.d}[1], [x0], x1
|
st1 {v4.d}[1], [x0], x1
|
||||||
st1 {v5.d}[1], [x0], x1
|
st1 {v5.d}[1], [x0], x1
|
||||||
st1 {v6.d}[1], [x0], x1
|
st1 {v6.d}[1], [x0], x1
|
||||||
st1 {v7.d}[1], [x0], x1
|
st1 {v7.d}[1], [x0], x1
|
||||||
st1 {v0.d}[1], [x0], x1
|
st1 {v0.d}[1], [x0], x1
|
||||||
st1 {v1.d}[1], [x0], x1
|
st1 {v1.d}[1], [x0], x1
|
||||||
st1 {v2.d}[1], [x0], x1
|
st1 {v2.d}[1], [x0], x1
|
||||||
st1 {v3.d}[1], [x0], x1
|
st1 {v3.d}[1], [x0], x1
|
||||||
9:
|
9:
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
.macro h264_loop_filter_chroma
|
.macro h264_loop_filter_chroma
|
||||||
@ -411,6 +414,7 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_v_loop_filter_chroma_neon, export=1
|
function ff_h264_v_loop_filter_chroma_neon, export=1
|
||||||
h264_loop_filter_start
|
h264_loop_filter_start
|
||||||
|
sxtw x1, w1
|
||||||
|
|
||||||
sub x0, x0, x1, lsl #1
|
sub x0, x0, x1, lsl #1
|
||||||
ld1 {v18.8B}, [x0], x1
|
ld1 {v18.8B}, [x0], x1
|
||||||
@ -429,6 +433,7 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_neon, export=1
|
function ff_h264_h_loop_filter_chroma_neon, export=1
|
||||||
h264_loop_filter_start
|
h264_loop_filter_start
|
||||||
|
sxtw x1, w1
|
||||||
|
|
||||||
sub x0, x0, #2
|
sub x0, x0, #2
|
||||||
h_loop_filter_chroma420:
|
h_loop_filter_chroma420:
|
||||||
@ -461,6 +466,7 @@ h_loop_filter_chroma420:
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma422_neon, export=1
|
function ff_h264_h_loop_filter_chroma422_neon, export=1
|
||||||
|
sxtw x1, w1
|
||||||
h264_loop_filter_start
|
h264_loop_filter_start
|
||||||
add x5, x0, x1
|
add x5, x0, x1
|
||||||
sub x0, x0, #2
|
sub x0, x0, #2
|
||||||
@ -474,113 +480,113 @@ function ff_h264_h_loop_filter_chroma422_neon, export=1
|
|||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
.macro h264_loop_filter_chroma_intra
|
.macro h264_loop_filter_chroma_intra
|
||||||
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
|
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
|
||||||
uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
|
uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
|
||||||
uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
|
uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
|
||||||
cmhi v26.8b, v30.8b, v26.8b // < alpha
|
cmhi v26.8b, v30.8b, v26.8b // < alpha
|
||||||
cmhi v27.8b, v31.8b, v27.8b // < beta
|
cmhi v27.8b, v31.8b, v27.8b // < beta
|
||||||
cmhi v28.8b, v31.8b, v28.8b // < beta
|
cmhi v28.8b, v31.8b, v28.8b // < beta
|
||||||
and v26.8b, v26.8b, v27.8b
|
and v26.8b, v26.8b, v27.8b
|
||||||
and v26.8b, v26.8b, v28.8b
|
and v26.8b, v26.8b, v28.8b
|
||||||
mov x2, v26.d[0]
|
mov x2, v26.d[0]
|
||||||
|
|
||||||
ushll v4.8h, v18.8b, #1
|
ushll v4.8h, v18.8b, #1
|
||||||
ushll v6.8h, v19.8b, #1
|
ushll v6.8h, v19.8b, #1
|
||||||
cbz x2, 9f
|
cbz x2, 9f
|
||||||
uaddl v20.8h, v16.8b, v19.8b
|
uaddl v20.8h, v16.8b, v19.8b
|
||||||
uaddl v22.8h, v17.8b, v18.8b
|
uaddl v22.8h, v17.8b, v18.8b
|
||||||
add v20.8h, v20.8h, v4.8h
|
add v20.8h, v20.8h, v4.8h
|
||||||
add v22.8h, v22.8h, v6.8h
|
add v22.8h, v22.8h, v6.8h
|
||||||
uqrshrn v24.8b, v20.8h, #2
|
uqrshrn v24.8b, v20.8h, #2
|
||||||
uqrshrn v25.8b, v22.8h, #2
|
uqrshrn v25.8b, v22.8h, #2
|
||||||
bit v16.8b, v24.8b, v26.8b
|
bit v16.8b, v24.8b, v26.8b
|
||||||
bit v17.8b, v25.8b, v26.8b
|
bit v17.8b, v25.8b, v26.8b
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
function ff_h264_v_loop_filter_chroma_intra_neon, export=1
|
function ff_h264_v_loop_filter_chroma_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
|
|
||||||
sub x0, x0, x1, lsl #1
|
sub x0, x0, x1, lsl #1
|
||||||
ld1 {v18.8b}, [x0], x1
|
ld1 {v18.8b}, [x0], x1
|
||||||
ld1 {v16.8b}, [x0], x1
|
ld1 {v16.8b}, [x0], x1
|
||||||
ld1 {v17.8b}, [x0], x1
|
ld1 {v17.8b}, [x0], x1
|
||||||
ld1 {v19.8b}, [x0]
|
ld1 {v19.8b}, [x0]
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra
|
h264_loop_filter_chroma_intra
|
||||||
|
|
||||||
sub x0, x0, x1, lsl #1
|
sub x0, x0, x1, lsl #1
|
||||||
st1 {v16.8b}, [x0], x1
|
st1 {v16.8b}, [x0], x1
|
||||||
st1 {v17.8b}, [x0], x1
|
st1 {v17.8b}, [x0], x1
|
||||||
|
|
||||||
9:
|
9:
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon, export=1
|
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
|
|
||||||
sub x4, x0, #2
|
sub x4, x0, #2
|
||||||
sub x0, x0, #1
|
sub x0, x0, #1
|
||||||
ld1 {v18.8b}, [x4], x1
|
ld1 {v18.8b}, [x4], x1
|
||||||
ld1 {v16.8b}, [x4], x1
|
ld1 {v16.8b}, [x4], x1
|
||||||
ld1 {v17.8b}, [x4], x1
|
ld1 {v17.8b}, [x4], x1
|
||||||
ld1 {v19.8b}, [x4], x1
|
ld1 {v19.8b}, [x4], x1
|
||||||
|
|
||||||
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra
|
h264_loop_filter_chroma_intra
|
||||||
|
|
||||||
st2 {v16.b,v17.b}[0], [x0], x1
|
st2 {v16.b,v17.b}[0], [x0], x1
|
||||||
st2 {v16.b,v17.b}[1], [x0], x1
|
st2 {v16.b,v17.b}[1], [x0], x1
|
||||||
st2 {v16.b,v17.b}[2], [x0], x1
|
st2 {v16.b,v17.b}[2], [x0], x1
|
||||||
st2 {v16.b,v17.b}[3], [x0], x1
|
st2 {v16.b,v17.b}[3], [x0], x1
|
||||||
|
|
||||||
9:
|
9:
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_intra_neon, export=1
|
function ff_h264_h_loop_filter_chroma_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
|
|
||||||
sub x4, x0, #2
|
sub x4, x0, #2
|
||||||
sub x0, x0, #1
|
sub x0, x0, #1
|
||||||
h_loop_filter_chroma420_intra:
|
h_loop_filter_chroma420_intra:
|
||||||
ld1 {v18.8b}, [x4], x1
|
ld1 {v18.8b}, [x4], x1
|
||||||
ld1 {v16.8b}, [x4], x1
|
ld1 {v16.8b}, [x4], x1
|
||||||
ld1 {v17.8b}, [x4], x1
|
ld1 {v17.8b}, [x4], x1
|
||||||
ld1 {v19.8b}, [x4], x1
|
ld1 {v19.8b}, [x4], x1
|
||||||
ld1 {v18.s}[1], [x4], x1
|
ld1 {v18.s}[1], [x4], x1
|
||||||
ld1 {v16.s}[1], [x4], x1
|
ld1 {v16.s}[1], [x4], x1
|
||||||
ld1 {v17.s}[1], [x4], x1
|
ld1 {v17.s}[1], [x4], x1
|
||||||
ld1 {v19.s}[1], [x4], x1
|
ld1 {v19.s}[1], [x4], x1
|
||||||
|
|
||||||
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra
|
h264_loop_filter_chroma_intra
|
||||||
|
|
||||||
st2 {v16.b,v17.b}[0], [x0], x1
|
st2 {v16.b,v17.b}[0], [x0], x1
|
||||||
st2 {v16.b,v17.b}[1], [x0], x1
|
st2 {v16.b,v17.b}[1], [x0], x1
|
||||||
st2 {v16.b,v17.b}[2], [x0], x1
|
st2 {v16.b,v17.b}[2], [x0], x1
|
||||||
st2 {v16.b,v17.b}[3], [x0], x1
|
st2 {v16.b,v17.b}[3], [x0], x1
|
||||||
st2 {v16.b,v17.b}[4], [x0], x1
|
st2 {v16.b,v17.b}[4], [x0], x1
|
||||||
st2 {v16.b,v17.b}[5], [x0], x1
|
st2 {v16.b,v17.b}[5], [x0], x1
|
||||||
st2 {v16.b,v17.b}[6], [x0], x1
|
st2 {v16.b,v17.b}[6], [x0], x1
|
||||||
st2 {v16.b,v17.b}[7], [x0], x1
|
st2 {v16.b,v17.b}[7], [x0], x1
|
||||||
|
|
||||||
9:
|
9:
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma422_intra_neon, export=1
|
function ff_h264_h_loop_filter_chroma422_intra_neon, export=1
|
||||||
h264_loop_filter_start_intra
|
h264_loop_filter_start_intra
|
||||||
sub x4, x0, #2
|
sub x4, x0, #2
|
||||||
add x5, x0, x1, lsl #3
|
add x5, x0, x1, lsl #3
|
||||||
sub x0, x0, #1
|
sub x0, x0, #1
|
||||||
mov x7, x30
|
mov x7, x30
|
||||||
bl h_loop_filter_chroma420_intra
|
bl h_loop_filter_chroma420_intra
|
||||||
sub x0, x5, #1
|
sub x0, x5, #1
|
||||||
mov x30, x7
|
mov x30, x7
|
||||||
b h_loop_filter_chroma420_intra
|
b h_loop_filter_chroma420_intra
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
.macro biweight_16 macs, macd
|
.macro biweight_16 macs, macd
|
||||||
@ -685,6 +691,7 @@ endfunc
|
|||||||
|
|
||||||
.macro biweight_func w
|
.macro biweight_func w
|
||||||
function ff_biweight_h264_pixels_\w\()_neon, export=1
|
function ff_biweight_h264_pixels_\w\()_neon, export=1
|
||||||
|
sxtw x2, w2
|
||||||
lsr w8, w5, #31
|
lsr w8, w5, #31
|
||||||
add w7, w7, #1
|
add w7, w7, #1
|
||||||
eor w8, w8, w6, lsr #30
|
eor w8, w8, w6, lsr #30
|
||||||
@ -793,6 +800,7 @@ endfunc
|
|||||||
|
|
||||||
.macro weight_func w
|
.macro weight_func w
|
||||||
function ff_weight_h264_pixels_\w\()_neon, export=1
|
function ff_weight_h264_pixels_\w\()_neon, export=1
|
||||||
|
sxtw x1, w1
|
||||||
cmp w3, #1
|
cmp w3, #1
|
||||||
mov w6, #1
|
mov w6, #1
|
||||||
lsl w5, w5, w3
|
lsl w5, w5, w3
|
||||||
@ -819,258 +827,3 @@ endfunc
|
|||||||
weight_func 16
|
weight_func 16
|
||||||
weight_func 8
|
weight_func 8
|
||||||
weight_func 4
|
weight_func 4
|
||||||
|
|
||||||
.macro h264_loop_filter_start_10
|
|
||||||
cmp w2, #0
|
|
||||||
ldr w6, [x4]
|
|
||||||
ccmp w3, #0, #0, ne
|
|
||||||
lsl w2, w2, #2
|
|
||||||
mov v24.S[0], w6
|
|
||||||
lsl w3, w3, #2
|
|
||||||
and w8, w6, w6, lsl #16
|
|
||||||
b.eq 1f
|
|
||||||
ands w8, w8, w8, lsl #8
|
|
||||||
b.ge 2f
|
|
||||||
1:
|
|
||||||
ret
|
|
||||||
2:
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro h264_loop_filter_start_intra_10
|
|
||||||
orr w4, w2, w3
|
|
||||||
cbnz w4, 1f
|
|
||||||
ret
|
|
||||||
1:
|
|
||||||
lsl w2, w2, #2
|
|
||||||
lsl w3, w3, #2
|
|
||||||
dup v30.8h, w2 // alpha
|
|
||||||
dup v31.8h, w3 // beta
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro h264_loop_filter_chroma_10
|
|
||||||
dup v22.8h, w2 // alpha
|
|
||||||
dup v23.8h, w3 // beta
|
|
||||||
uxtl v24.8h, v24.8b // tc0
|
|
||||||
|
|
||||||
uabd v26.8h, v16.8h, v0.8h // abs(p0 - q0)
|
|
||||||
uabd v28.8h, v18.8h, v16.8h // abs(p1 - p0)
|
|
||||||
uabd v30.8h, v2.8h, v0.8h // abs(q1 - q0)
|
|
||||||
cmhi v26.8h, v22.8h, v26.8h // < alpha
|
|
||||||
cmhi v28.8h, v23.8h, v28.8h // < beta
|
|
||||||
cmhi v30.8h, v23.8h, v30.8h // < beta
|
|
||||||
|
|
||||||
and v26.16b, v26.16b, v28.16b
|
|
||||||
mov v4.16b, v0.16b
|
|
||||||
sub v4.8h, v4.8h, v16.8h
|
|
||||||
and v26.16b, v26.16b, v30.16b
|
|
||||||
shl v4.8h, v4.8h, #2
|
|
||||||
mov x8, v26.d[0]
|
|
||||||
mov x9, v26.d[1]
|
|
||||||
sli v24.8h, v24.8h, #8
|
|
||||||
uxtl v24.8h, v24.8b
|
|
||||||
add v4.8h, v4.8h, v18.8h
|
|
||||||
adds x8, x8, x9
|
|
||||||
shl v24.8h, v24.8h, #2
|
|
||||||
|
|
||||||
b.eq 9f
|
|
||||||
|
|
||||||
movi v31.8h, #3 // (tc0 - 1) << (BIT_DEPTH - 8)) + 1
|
|
||||||
uqsub v24.8h, v24.8h, v31.8h
|
|
||||||
sub v4.8h, v4.8h, v2.8h
|
|
||||||
srshr v4.8h, v4.8h, #3
|
|
||||||
smin v4.8h, v4.8h, v24.8h
|
|
||||||
neg v25.8h, v24.8h
|
|
||||||
smax v4.8h, v4.8h, v25.8h
|
|
||||||
and v4.16b, v4.16b, v26.16b
|
|
||||||
add v16.8h, v16.8h, v4.8h
|
|
||||||
sub v0.8h, v0.8h, v4.8h
|
|
||||||
|
|
||||||
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
|
|
||||||
movi v5.8h, #0
|
|
||||||
smin v0.8h, v0.8h, v4.8h
|
|
||||||
smin v16.8h, v16.8h, v4.8h
|
|
||||||
smax v0.8h, v0.8h, v5.8h
|
|
||||||
smax v16.8h, v16.8h, v5.8h
|
|
||||||
.endm
|
|
||||||
|
|
||||||
function ff_h264_v_loop_filter_chroma_neon_10, export=1
|
|
||||||
h264_loop_filter_start_10
|
|
||||||
|
|
||||||
mov x10, x0
|
|
||||||
sub x0, x0, x1, lsl #1
|
|
||||||
ld1 {v18.8h}, [x0 ], x1
|
|
||||||
ld1 {v0.8h}, [x10], x1
|
|
||||||
ld1 {v16.8h}, [x0 ], x1
|
|
||||||
ld1 {v2.8h}, [x10]
|
|
||||||
|
|
||||||
h264_loop_filter_chroma_10
|
|
||||||
|
|
||||||
sub x0, x10, x1, lsl #1
|
|
||||||
st1 {v16.8h}, [x0], x1
|
|
||||||
st1 {v0.8h}, [x0], x1
|
|
||||||
9:
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_neon_10, export=1
|
|
||||||
h264_loop_filter_start_10
|
|
||||||
|
|
||||||
sub x0, x0, #4 // access the 2nd left pixel
|
|
||||||
h_loop_filter_chroma420_10:
|
|
||||||
add x10, x0, x1, lsl #2
|
|
||||||
ld1 {v18.d}[0], [x0 ], x1
|
|
||||||
ld1 {v18.d}[1], [x10], x1
|
|
||||||
ld1 {v16.d}[0], [x0 ], x1
|
|
||||||
ld1 {v16.d}[1], [x10], x1
|
|
||||||
ld1 {v0.d}[0], [x0 ], x1
|
|
||||||
ld1 {v0.d}[1], [x10], x1
|
|
||||||
ld1 {v2.d}[0], [x0 ], x1
|
|
||||||
ld1 {v2.d}[1], [x10], x1
|
|
||||||
|
|
||||||
transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
|
|
||||||
|
|
||||||
h264_loop_filter_chroma_10
|
|
||||||
|
|
||||||
transpose_4x8H v18, v16, v0, v2, v28, v29, v30, v31
|
|
||||||
|
|
||||||
sub x0, x10, x1, lsl #3
|
|
||||||
st1 {v18.d}[0], [x0], x1
|
|
||||||
st1 {v16.d}[0], [x0], x1
|
|
||||||
st1 {v0.d}[0], [x0], x1
|
|
||||||
st1 {v2.d}[0], [x0], x1
|
|
||||||
st1 {v18.d}[1], [x0], x1
|
|
||||||
st1 {v16.d}[1], [x0], x1
|
|
||||||
st1 {v0.d}[1], [x0], x1
|
|
||||||
st1 {v2.d}[1], [x0], x1
|
|
||||||
9:
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma422_neon_10, export=1
|
|
||||||
h264_loop_filter_start_10
|
|
||||||
add x5, x0, x1
|
|
||||||
sub x0, x0, #4
|
|
||||||
add x1, x1, x1
|
|
||||||
mov x7, x30
|
|
||||||
bl h_loop_filter_chroma420_10
|
|
||||||
mov x30, x7
|
|
||||||
sub x0, x5, #4
|
|
||||||
mov v24.s[0], w6
|
|
||||||
b h_loop_filter_chroma420_10
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
.macro h264_loop_filter_chroma_intra_10
|
|
||||||
uabd v26.8h, v16.8h, v17.8h // abs(p0 - q0)
|
|
||||||
uabd v27.8h, v18.8h, v16.8h // abs(p1 - p0)
|
|
||||||
uabd v28.8h, v19.8h, v17.8h // abs(q1 - q0)
|
|
||||||
cmhi v26.8h, v30.8h, v26.8h // < alpha
|
|
||||||
cmhi v27.8h, v31.8h, v27.8h // < beta
|
|
||||||
cmhi v28.8h, v31.8h, v28.8h // < beta
|
|
||||||
and v26.16b, v26.16b, v27.16b
|
|
||||||
and v26.16b, v26.16b, v28.16b
|
|
||||||
mov x2, v26.d[0]
|
|
||||||
mov x3, v26.d[1]
|
|
||||||
|
|
||||||
shl v4.8h, v18.8h, #1
|
|
||||||
shl v6.8h, v19.8h, #1
|
|
||||||
|
|
||||||
adds x2, x2, x3
|
|
||||||
b.eq 9f
|
|
||||||
|
|
||||||
add v20.8h, v16.8h, v19.8h
|
|
||||||
add v22.8h, v17.8h, v18.8h
|
|
||||||
add v20.8h, v20.8h, v4.8h
|
|
||||||
add v22.8h, v22.8h, v6.8h
|
|
||||||
urshr v24.8h, v20.8h, #2
|
|
||||||
urshr v25.8h, v22.8h, #2
|
|
||||||
bit v16.16b, v24.16b, v26.16b
|
|
||||||
bit v17.16b, v25.16b, v26.16b
|
|
||||||
.endm
|
|
||||||
|
|
||||||
function ff_h264_v_loop_filter_chroma_intra_neon_10, export=1
|
|
||||||
h264_loop_filter_start_intra_10
|
|
||||||
mov x9, x0
|
|
||||||
sub x0, x0, x1, lsl #1
|
|
||||||
ld1 {v18.8h}, [x0], x1
|
|
||||||
ld1 {v17.8h}, [x9], x1
|
|
||||||
ld1 {v16.8h}, [x0], x1
|
|
||||||
ld1 {v19.8h}, [x9]
|
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra_10
|
|
||||||
|
|
||||||
sub x0, x9, x1, lsl #1
|
|
||||||
st1 {v16.8h}, [x0], x1
|
|
||||||
st1 {v17.8h}, [x0], x1
|
|
||||||
|
|
||||||
9:
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon_10, export=1
|
|
||||||
h264_loop_filter_start_intra_10
|
|
||||||
|
|
||||||
sub x4, x0, #4
|
|
||||||
sub x0, x0, #2
|
|
||||||
add x9, x4, x1, lsl #1
|
|
||||||
ld1 {v18.8h}, [x4], x1
|
|
||||||
ld1 {v17.8h}, [x9], x1
|
|
||||||
ld1 {v16.8h}, [x4], x1
|
|
||||||
ld1 {v19.8h}, [x9], x1
|
|
||||||
|
|
||||||
transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
|
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra_10
|
|
||||||
|
|
||||||
st2 {v16.h,v17.h}[0], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[1], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[2], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[3], [x0], x1
|
|
||||||
|
|
||||||
9:
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma_intra_neon_10, export=1
|
|
||||||
h264_loop_filter_start_intra_10
|
|
||||||
sub x4, x0, #4
|
|
||||||
sub x0, x0, #2
|
|
||||||
h_loop_filter_chroma420_intra_10:
|
|
||||||
add x9, x4, x1, lsl #2
|
|
||||||
ld1 {v18.4h}, [x4], x1
|
|
||||||
ld1 {v18.d}[1], [x9], x1
|
|
||||||
ld1 {v16.4h}, [x4], x1
|
|
||||||
ld1 {v16.d}[1], [x9], x1
|
|
||||||
ld1 {v17.4h}, [x4], x1
|
|
||||||
ld1 {v17.d}[1], [x9], x1
|
|
||||||
ld1 {v19.4h}, [x4], x1
|
|
||||||
ld1 {v19.d}[1], [x9], x1
|
|
||||||
|
|
||||||
transpose_4x8H v18, v16, v17, v19, v26, v27, v28, v29
|
|
||||||
|
|
||||||
h264_loop_filter_chroma_intra_10
|
|
||||||
|
|
||||||
st2 {v16.h,v17.h}[0], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[1], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[2], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[3], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[4], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[5], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[6], [x0], x1
|
|
||||||
st2 {v16.h,v17.h}[7], [x0], x1
|
|
||||||
|
|
||||||
9:
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_h264_h_loop_filter_chroma422_intra_neon_10, export=1
|
|
||||||
h264_loop_filter_start_intra_10
|
|
||||||
sub x4, x0, #4
|
|
||||||
add x5, x0, x1, lsl #3
|
|
||||||
sub x0, x0, #2
|
|
||||||
mov x7, x30
|
|
||||||
bl h_loop_filter_chroma420_intra_10
|
|
||||||
mov x4, x9
|
|
||||||
sub x0, x5, #2
|
|
||||||
mov x30, x7
|
|
||||||
b h_loop_filter_chroma420_intra_10
|
|
||||||
endfunc
|
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
|
|
||||||
function ff_h264_idct_add_neon, export=1
|
function ff_h264_idct_add_neon, export=1
|
||||||
.L_ff_h264_idct_add_neon:
|
.L_ff_h264_idct_add_neon:
|
||||||
AARCH64_VALID_CALL_TARGET
|
|
||||||
ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
|
ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
|
||||||
sxtw x2, w2
|
sxtw x2, w2
|
||||||
movi v30.8H, #0
|
movi v30.8H, #0
|
||||||
@ -80,7 +79,6 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_idct_dc_add_neon, export=1
|
function ff_h264_idct_dc_add_neon, export=1
|
||||||
.L_ff_h264_idct_dc_add_neon:
|
.L_ff_h264_idct_dc_add_neon:
|
||||||
AARCH64_VALID_CALL_TARGET
|
|
||||||
sxtw x2, w2
|
sxtw x2, w2
|
||||||
mov w3, #0
|
mov w3, #0
|
||||||
ld1r {v2.8H}, [x1]
|
ld1r {v2.8H}, [x1]
|
||||||
@ -268,7 +266,6 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_idct8_add_neon, export=1
|
function ff_h264_idct8_add_neon, export=1
|
||||||
.L_ff_h264_idct8_add_neon:
|
.L_ff_h264_idct8_add_neon:
|
||||||
AARCH64_VALID_CALL_TARGET
|
|
||||||
movi v19.8H, #0
|
movi v19.8H, #0
|
||||||
sxtw x2, w2
|
sxtw x2, w2
|
||||||
ld1 {v24.8H, v25.8H}, [x1]
|
ld1 {v24.8H, v25.8H}, [x1]
|
||||||
@ -333,7 +330,6 @@ endfunc
|
|||||||
|
|
||||||
function ff_h264_idct8_dc_add_neon, export=1
|
function ff_h264_idct8_dc_add_neon, export=1
|
||||||
.L_ff_h264_idct8_dc_add_neon:
|
.L_ff_h264_idct8_dc_add_neon:
|
||||||
AARCH64_VALID_CALL_TARGET
|
|
||||||
mov w3, #0
|
mov w3, #0
|
||||||
sxtw x2, w2
|
sxtw x2, w2
|
||||||
ld1r {v31.8H}, [x1]
|
ld1r {v31.8H}, [x1]
|
||||||
|
@ -45,84 +45,42 @@ void ff_pred8x8_0lt_dc_neon(uint8_t *src, ptrdiff_t stride);
|
|||||||
void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride);
|
void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride);
|
||||||
void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride);
|
void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride);
|
||||||
|
|
||||||
void ff_pred16x16_vert_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred16x16_hor_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred16x16_plane_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred16x16_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred16x16_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
|
|
||||||
void ff_pred8x8_vert_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_hor_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_plane_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_128_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_left_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_top_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_l0t_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_0lt_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_l00_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
void ff_pred8x8_0l0_dc_neon_10(uint8_t *src, ptrdiff_t stride);
|
|
||||||
|
|
||||||
static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
|
static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
|
||||||
const int bit_depth,
|
const int bit_depth,
|
||||||
const int chroma_format_idc)
|
const int chroma_format_idc)
|
||||||
{
|
{
|
||||||
if (bit_depth == 8) {
|
const int high_depth = bit_depth > 8;
|
||||||
if (chroma_format_idc <= 1) {
|
|
||||||
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon;
|
|
||||||
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon;
|
|
||||||
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
|
||||||
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon;
|
|
||||||
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
|
|
||||||
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
|
|
||||||
codec_id != AV_CODEC_ID_VP8) {
|
|
||||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
|
|
||||||
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
|
|
||||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon;
|
if (high_depth)
|
||||||
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon;
|
return;
|
||||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon;
|
|
||||||
h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
|
|
||||||
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
|
|
||||||
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
|
|
||||||
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
|
|
||||||
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
|
||||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
|
|
||||||
}
|
|
||||||
if (bit_depth == 10) {
|
|
||||||
if (chroma_format_idc <= 1) {
|
|
||||||
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon_10;
|
|
||||||
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon_10;
|
|
||||||
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
|
||||||
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon_10;
|
|
||||||
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon_10;
|
|
||||||
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
|
|
||||||
codec_id != AV_CODEC_ID_VP8) {
|
|
||||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon_10;
|
|
||||||
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon_10;
|
|
||||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon_10;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon_10;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon_10;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon_10;
|
|
||||||
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon_10;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon_10;
|
if (chroma_format_idc <= 1) {
|
||||||
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon_10;
|
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon;
|
||||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon_10;
|
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon;
|
||||||
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon_10;
|
if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
||||||
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
|
h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon;
|
||||||
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
|
||||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon_10;
|
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
|
||||||
|
codec_id != AV_CODEC_ID_VP8) {
|
||||||
|
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
|
||||||
|
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
|
||||||
|
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
|
||||||
|
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
|
||||||
|
h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon;
|
||||||
|
h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon;
|
||||||
|
h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon;
|
||||||
|
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon;
|
||||||
|
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon;
|
||||||
|
h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon;
|
||||||
|
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon;
|
||||||
|
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon;
|
||||||
|
if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 &&
|
||||||
|
codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8)
|
||||||
|
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_cold void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
|
av_cold void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
|
||||||
|
@ -81,8 +81,8 @@ function ff_pred16x16_dc_neon, export=1
|
|||||||
.L_pred16x16_dc_end:
|
.L_pred16x16_dc_end:
|
||||||
mov w3, #8
|
mov w3, #8
|
||||||
6: st1 {v0.16b}, [x0], x1
|
6: st1 {v0.16b}, [x0], x1
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.16b}, [x0], x1
|
st1 {v0.16b}, [x0], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 6b
|
b.ne 6b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -91,8 +91,8 @@ function ff_pred16x16_hor_neon, export=1
|
|||||||
sub x2, x0, #1
|
sub x2, x0, #1
|
||||||
mov w3, #16
|
mov w3, #16
|
||||||
1: ld1r {v0.16b}, [x2], x1
|
1: ld1r {v0.16b}, [x2], x1
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.16b}, [x0], x1
|
st1 {v0.16b}, [x0], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -102,9 +102,9 @@ function ff_pred16x16_vert_neon, export=1
|
|||||||
add x1, x1, x1
|
add x1, x1, x1
|
||||||
ld1 {v0.16b}, [x2], x1
|
ld1 {v0.16b}, [x2], x1
|
||||||
mov w3, #8
|
mov w3, #8
|
||||||
1: subs w3, w3, #1
|
1: st1 {v0.16b}, [x0], x1
|
||||||
st1 {v0.16b}, [x0], x1
|
|
||||||
st1 {v0.16b}, [x2], x1
|
st1 {v0.16b}, [x2], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -158,8 +158,8 @@ function ff_pred16x16_plane_neon, export=1
|
|||||||
add v1.8h, v1.8h, v2.8h
|
add v1.8h, v1.8h, v2.8h
|
||||||
sqshrun2 v0.16b, v1.8h, #5
|
sqshrun2 v0.16b, v1.8h, #5
|
||||||
add v1.8h, v1.8h, v3.8h
|
add v1.8h, v1.8h, v3.8h
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.16b}, [x0], x1
|
st1 {v0.16b}, [x0], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -175,8 +175,8 @@ function ff_pred8x8_hor_neon, export=1
|
|||||||
sub x2, x0, #1
|
sub x2, x0, #1
|
||||||
mov w3, #8
|
mov w3, #8
|
||||||
1: ld1r {v0.8b}, [x2], x1
|
1: ld1r {v0.8b}, [x2], x1
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.8b}, [x0], x1
|
st1 {v0.8b}, [x0], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -186,9 +186,9 @@ function ff_pred8x8_vert_neon, export=1
|
|||||||
lsl x1, x1, #1
|
lsl x1, x1, #1
|
||||||
ld1 {v0.8b}, [x2], x1
|
ld1 {v0.8b}, [x2], x1
|
||||||
mov w3, #4
|
mov w3, #4
|
||||||
1: subs w3, w3, #1
|
1: st1 {v0.8b}, [x0], x1
|
||||||
st1 {v0.8b}, [x0], x1
|
|
||||||
st1 {v0.8b}, [x2], x1
|
st1 {v0.8b}, [x2], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -232,9 +232,9 @@ function ff_pred8x8_plane_neon, export=1
|
|||||||
mov w3, #8
|
mov w3, #8
|
||||||
1:
|
1:
|
||||||
sqshrun v0.8b, v1.8h, #5
|
sqshrun v0.8b, v1.8h, #5
|
||||||
subs w3, w3, #1
|
|
||||||
add v1.8h, v1.8h, v2.8h
|
add v1.8h, v1.8h, v2.8h
|
||||||
st1 {v0.8b}, [x0], x1
|
st1 {v0.8b}, [x0], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -290,9 +290,9 @@ function ff_pred8x8_dc_neon, export=1
|
|||||||
.L_pred8x8_dc_end:
|
.L_pred8x8_dc_end:
|
||||||
mov w3, #4
|
mov w3, #4
|
||||||
add x2, x0, x1, lsl #2
|
add x2, x0, x1, lsl #2
|
||||||
6: subs w3, w3, #1
|
6: st1 {v0.8b}, [x0], x1
|
||||||
st1 {v0.8b}, [x0], x1
|
|
||||||
st1 {v1.8b}, [x2], x1
|
st1 {v1.8b}, [x2], x1
|
||||||
|
subs w3, w3, #1
|
||||||
b.ne 6b
|
b.ne 6b
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -359,407 +359,3 @@ function ff_pred8x8_0l0_dc_neon, export=1
|
|||||||
dup v1.8b, v1.b[0]
|
dup v1.8b, v1.b[0]
|
||||||
b .L_pred8x8_dc_end
|
b .L_pred8x8_dc_end
|
||||||
endfunc
|
endfunc
|
||||||
|
|
||||||
.macro ldcol.16 rd, rs, rt, n=4, hi=0
|
|
||||||
.if \n >= 4 && \hi == 0
|
|
||||||
ld1 {\rd\().h}[0], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[1], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[2], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[3], [\rs], \rt
|
|
||||||
.endif
|
|
||||||
.if \n == 8 || \hi == 1
|
|
||||||
ld1 {\rd\().h}[4], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[5], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[6], [\rs], \rt
|
|
||||||
ld1 {\rd\().h}[7], [\rs], \rt
|
|
||||||
.endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
// slower than C
|
|
||||||
/*
|
|
||||||
function ff_pred16x16_128_dc_neon_10, export=1
|
|
||||||
movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
|
|
||||||
|
|
||||||
b .L_pred16x16_dc_10_end
|
|
||||||
endfunc
|
|
||||||
*/
|
|
||||||
|
|
||||||
function ff_pred16x16_top_dc_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
|
|
||||||
ld1 {v0.8h, v1.8h}, [x2]
|
|
||||||
|
|
||||||
add v0.8h, v0.8h, v1.8h
|
|
||||||
addv h0, v0.8h
|
|
||||||
|
|
||||||
urshr v0.4h, v0.4h, #4
|
|
||||||
dup v0.8h, v0.h[0]
|
|
||||||
b .L_pred16x16_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
// slower than C
|
|
||||||
/*
|
|
||||||
function ff_pred16x16_left_dc_neon_10, export=1
|
|
||||||
sub x2, x0, #2 // access to the "left" column
|
|
||||||
ldcol.16 v0, x2, x1, 8
|
|
||||||
ldcol.16 v1, x2, x1, 8 // load "left" column
|
|
||||||
|
|
||||||
add v0.8h, v0.8h, v1.8h
|
|
||||||
addv h0, v0.8h
|
|
||||||
|
|
||||||
urshr v0.4h, v0.4h, #4
|
|
||||||
dup v0.8h, v0.h[0]
|
|
||||||
b .L_pred16x16_dc_10_end
|
|
||||||
endfunc
|
|
||||||
*/
|
|
||||||
|
|
||||||
function ff_pred16x16_dc_neon_10, export=1
|
|
||||||
sub x2, x0, x1 // access to the "top" row
|
|
||||||
sub x3, x0, #2 // access to the "left" column
|
|
||||||
|
|
||||||
ld1 {v0.8h, v1.8h}, [x2]
|
|
||||||
ldcol.16 v2, x3, x1, 8
|
|
||||||
ldcol.16 v3, x3, x1, 8 // load pixels in "top" row and "left" col
|
|
||||||
|
|
||||||
add v0.8h, v0.8h, v1.8h
|
|
||||||
add v2.8h, v2.8h, v3.8h
|
|
||||||
add v0.8h, v0.8h, v2.8h
|
|
||||||
addv h0, v0.8h
|
|
||||||
|
|
||||||
urshr v0.4h, v0.4h, #5
|
|
||||||
dup v0.8h, v0.h[0]
|
|
||||||
.L_pred16x16_dc_10_end:
|
|
||||||
mov v1.16b, v0.16b
|
|
||||||
mov w3, #8
|
|
||||||
6: st1 {v0.8h, v1.8h}, [x0], x1
|
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.8h, v1.8h}, [x0], x1
|
|
||||||
b.ne 6b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred16x16_hor_neon_10, export=1
|
|
||||||
sub x2, x0, #2
|
|
||||||
add x3, x0, #16
|
|
||||||
|
|
||||||
mov w4, #16
|
|
||||||
1: ld1r {v0.8h}, [x2], x1
|
|
||||||
subs w4, w4, #1
|
|
||||||
st1 {v0.8h}, [x0], x1
|
|
||||||
st1 {v0.8h}, [x3], x1
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred16x16_vert_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
add x1, x1, x1
|
|
||||||
|
|
||||||
ld1 {v0.8h, v1.8h}, [x2], x1
|
|
||||||
|
|
||||||
mov w3, #8
|
|
||||||
1: subs w3, w3, #1
|
|
||||||
st1 {v0.8h, v1.8h}, [x0], x1
|
|
||||||
st1 {v0.8h, v1.8h}, [x2], x1
|
|
||||||
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred16x16_plane_neon_10, export=1
|
|
||||||
sub x3, x0, x1
|
|
||||||
movrel x4, p16weight
|
|
||||||
add x2, x3, #16
|
|
||||||
sub x3, x3, #2
|
|
||||||
ld1 {v0.8h}, [x3]
|
|
||||||
ld1 {v2.8h}, [x2], x1
|
|
||||||
ldcol.16 v1, x3, x1, 8
|
|
||||||
add x3, x3, x1
|
|
||||||
ldcol.16 v3, x3, x1, 8
|
|
||||||
|
|
||||||
rev64 v16.8h, v0.8h
|
|
||||||
rev64 v17.8h, v1.8h
|
|
||||||
ext v0.16b, v16.16b, v16.16b, #8
|
|
||||||
ext v1.16b, v17.16b, v17.16b, #8
|
|
||||||
|
|
||||||
add v7.8h, v2.8h, v3.8h
|
|
||||||
sub v2.8h, v2.8h, v0.8h
|
|
||||||
sub v3.8h, v3.8h, v1.8h
|
|
||||||
ld1 {v0.8h}, [x4]
|
|
||||||
mul v2.8h, v2.8h, v0.8h
|
|
||||||
mul v3.8h, v3.8h, v0.8h
|
|
||||||
addp v2.8h, v2.8h, v3.8h
|
|
||||||
addp v2.8h, v2.8h, v2.8h
|
|
||||||
addp v2.4h, v2.4h, v2.4h
|
|
||||||
sshll v3.4s, v2.4h, #2
|
|
||||||
saddw v2.4s, v3.4s, v2.4h
|
|
||||||
rshrn v4.4h, v2.4s, #6
|
|
||||||
trn2 v5.4h, v4.4h, v4.4h
|
|
||||||
add v2.4h, v4.4h, v5.4h
|
|
||||||
shl v3.4h, v2.4h, #3
|
|
||||||
ext v7.16b, v7.16b, v7.16b, #14
|
|
||||||
sub v3.4h, v3.4h, v2.4h // 7 * (b + c)
|
|
||||||
add v7.4h, v7.4h, v0.4h
|
|
||||||
shl v2.4h, v7.4h, #4
|
|
||||||
ssubl v2.4s, v2.4h, v3.4h
|
|
||||||
shl v3.4h, v4.4h, #4
|
|
||||||
ext v0.16b, v0.16b, v0.16b, #14
|
|
||||||
ssubl v6.4s, v5.4h, v3.4h
|
|
||||||
|
|
||||||
mov v0.h[0], wzr
|
|
||||||
mul v0.8h, v0.8h, v4.h[0]
|
|
||||||
dup v16.4s, v2.s[0]
|
|
||||||
dup v17.4s, v2.s[0]
|
|
||||||
dup v2.8h, v4.h[0]
|
|
||||||
dup v3.4s, v6.s[0]
|
|
||||||
shl v2.8h, v2.8h, #3
|
|
||||||
saddw v16.4s, v16.4s, v0.4h
|
|
||||||
saddw2 v17.4s, v17.4s, v0.8h
|
|
||||||
saddw v3.4s, v3.4s, v2.4h
|
|
||||||
|
|
||||||
mov w3, #16
|
|
||||||
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
|
|
||||||
1:
|
|
||||||
sqshrun v0.4h, v16.4s, #5
|
|
||||||
sqshrun2 v0.8h, v17.4s, #5
|
|
||||||
saddw v16.4s, v16.4s, v2.4h
|
|
||||||
saddw v17.4s, v17.4s, v2.4h
|
|
||||||
sqshrun v1.4h, v16.4s, #5
|
|
||||||
sqshrun2 v1.8h, v17.4s, #5
|
|
||||||
add v16.4s, v16.4s, v3.4s
|
|
||||||
add v17.4s, v17.4s, v3.4s
|
|
||||||
|
|
||||||
subs w3, w3, #1
|
|
||||||
|
|
||||||
smin v0.8h, v0.8h, v4.8h
|
|
||||||
smin v1.8h, v1.8h, v4.8h
|
|
||||||
|
|
||||||
st1 {v0.8h, v1.8h}, [x0], x1
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_hor_neon_10, export=1
|
|
||||||
sub x2, x0, #2
|
|
||||||
mov w3, #8
|
|
||||||
|
|
||||||
1: ld1r {v0.8h}, [x2], x1
|
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v0.8h}, [x0], x1
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_vert_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
lsl x1, x1, #1
|
|
||||||
|
|
||||||
ld1 {v0.8h}, [x2], x1
|
|
||||||
mov w3, #4
|
|
||||||
1: subs w3, w3, #1
|
|
||||||
st1 {v0.8h}, [x0], x1
|
|
||||||
st1 {v0.8h}, [x2], x1
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_plane_neon_10, export=1
|
|
||||||
sub x3, x0, x1
|
|
||||||
movrel x4, p8weight
|
|
||||||
movrel x5, p16weight
|
|
||||||
add x2, x3, #8
|
|
||||||
sub x3, x3, #2
|
|
||||||
ld1 {v0.d}[0], [x3]
|
|
||||||
ld1 {v2.d}[0], [x2], x1
|
|
||||||
ldcol.16 v0, x3, x1, hi=1
|
|
||||||
add x3, x3, x1
|
|
||||||
ldcol.16 v3, x3, x1, 4
|
|
||||||
add v7.8h, v2.8h, v3.8h
|
|
||||||
rev64 v0.8h, v0.8h
|
|
||||||
trn1 v2.2d, v2.2d, v3.2d
|
|
||||||
sub v2.8h, v2.8h, v0.8h
|
|
||||||
ld1 {v6.8h}, [x4]
|
|
||||||
mul v2.8h, v2.8h, v6.8h
|
|
||||||
ld1 {v0.8h}, [x5]
|
|
||||||
saddlp v2.4s, v2.8h
|
|
||||||
addp v2.4s, v2.4s, v2.4s
|
|
||||||
shl v3.4s, v2.4s, #4
|
|
||||||
add v2.4s, v3.4s, v2.4s
|
|
||||||
rshrn v5.4h, v2.4s, #5
|
|
||||||
addp v2.4h, v5.4h, v5.4h
|
|
||||||
shl v3.4h, v2.4h, #1
|
|
||||||
add v3.4h, v3.4h, v2.4h
|
|
||||||
rev64 v7.4h, v7.4h
|
|
||||||
add v7.4h, v7.4h, v0.4h
|
|
||||||
shl v2.4h, v7.4h, #4
|
|
||||||
ssubl v2.4s, v2.4h, v3.4h
|
|
||||||
ext v0.16b, v0.16b, v0.16b, #14
|
|
||||||
mov v0.h[0], wzr
|
|
||||||
mul v0.8h, v0.8h, v5.h[0]
|
|
||||||
dup v1.4s, v2.s[0]
|
|
||||||
dup v2.4s, v2.s[0]
|
|
||||||
dup v3.8h, v5.h[1]
|
|
||||||
saddw v1.4s, v1.4s, v0.4h
|
|
||||||
saddw2 v2.4s, v2.4s, v0.8h
|
|
||||||
mov w3, #8
|
|
||||||
mvni v4.8h, #0xFC, lsl #8 // 1023 for clipping
|
|
||||||
1:
|
|
||||||
sqshrun v0.4h, v1.4s, #5
|
|
||||||
sqshrun2 v0.8h, v2.4s, #5
|
|
||||||
|
|
||||||
saddw v1.4s, v1.4s, v3.4h
|
|
||||||
saddw v2.4s, v2.4s, v3.4h
|
|
||||||
|
|
||||||
subs w3, w3, #1
|
|
||||||
|
|
||||||
smin v0.8h, v0.8h, v4.8h
|
|
||||||
|
|
||||||
st1 {v0.8h}, [x0], x1
|
|
||||||
b.ne 1b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_128_dc_neon_10, export=1
|
|
||||||
movi v0.8h, #2, lsl #8 // 512, 1 << (bit_depth - 1)
|
|
||||||
movi v1.8h, #2, lsl #8
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_top_dc_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
ld1 {v0.8h}, [x2]
|
|
||||||
|
|
||||||
addp v0.8h, v0.8h, v0.8h
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
zip1 v0.4h, v0.4h, v0.4h
|
|
||||||
urshr v2.4h, v0.4h, #2
|
|
||||||
zip1 v0.8h, v2.8h, v2.8h
|
|
||||||
zip1 v1.8h, v2.8h, v2.8h
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_left_dc_neon_10, export=1
|
|
||||||
sub x2, x0, #2
|
|
||||||
ldcol.16 v0, x2, x1, 8
|
|
||||||
|
|
||||||
addp v0.8h, v0.8h, v0.8h
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
urshr v2.4h, v0.4h, #2
|
|
||||||
dup v1.8h, v2.h[1]
|
|
||||||
dup v0.8h, v2.h[0]
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_dc_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
sub x3, x0, #2
|
|
||||||
|
|
||||||
ld1 {v0.8h}, [x2]
|
|
||||||
ldcol.16 v1, x3, x1, 8
|
|
||||||
|
|
||||||
addp v0.8h, v0.8h, v0.8h
|
|
||||||
addp v1.8h, v1.8h, v1.8h
|
|
||||||
trn1 v2.2s, v0.2s, v1.2s
|
|
||||||
trn2 v3.2s, v0.2s, v1.2s
|
|
||||||
addp v4.4h, v2.4h, v3.4h
|
|
||||||
addp v5.4h, v4.4h, v4.4h
|
|
||||||
urshr v6.4h, v5.4h, #3
|
|
||||||
urshr v7.4h, v4.4h, #2
|
|
||||||
dup v0.8h, v6.h[0]
|
|
||||||
dup v2.8h, v7.h[2]
|
|
||||||
dup v1.8h, v7.h[3]
|
|
||||||
dup v3.8h, v6.h[1]
|
|
||||||
zip1 v0.2d, v0.2d, v2.2d
|
|
||||||
zip1 v1.2d, v1.2d, v3.2d
|
|
||||||
.L_pred8x8_dc_10_end:
|
|
||||||
mov w3, #4
|
|
||||||
add x2, x0, x1, lsl #2
|
|
||||||
|
|
||||||
6: st1 {v0.8h}, [x0], x1
|
|
||||||
subs w3, w3, #1
|
|
||||||
st1 {v1.8h}, [x2], x1
|
|
||||||
b.ne 6b
|
|
||||||
ret
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_l0t_dc_neon_10, export=1
|
|
||||||
sub x2, x0, x1
|
|
||||||
sub x3, x0, #2
|
|
||||||
|
|
||||||
ld1 {v0.8h}, [x2]
|
|
||||||
ldcol.16 v1, x3, x1, 4
|
|
||||||
|
|
||||||
addp v0.8h, v0.8h, v0.8h
|
|
||||||
addp v1.4h, v1.4h, v1.4h
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
addp v1.4h, v1.4h, v1.4h
|
|
||||||
add v1.4h, v1.4h, v0.4h
|
|
||||||
|
|
||||||
urshr v2.4h, v0.4h, #2
|
|
||||||
urshr v3.4h, v1.4h, #3 // the pred4x4 part
|
|
||||||
|
|
||||||
dup v4.4h, v3.h[0]
|
|
||||||
dup v5.4h, v2.h[0]
|
|
||||||
dup v6.4h, v2.h[1]
|
|
||||||
|
|
||||||
zip1 v0.2d, v4.2d, v6.2d
|
|
||||||
zip1 v1.2d, v5.2d, v6.2d
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_l00_dc_neon_10, export=1
|
|
||||||
sub x2, x0, #2
|
|
||||||
|
|
||||||
ldcol.16 v0, x2, x1, 4
|
|
||||||
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
urshr v0.4h, v0.4h, #2
|
|
||||||
|
|
||||||
movi v1.8h, #2, lsl #8 // 512
|
|
||||||
dup v0.8h, v0.h[0]
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_0lt_dc_neon_10, export=1
|
|
||||||
add x3, x0, x1, lsl #2
|
|
||||||
sub x2, x0, x1
|
|
||||||
sub x3, x3, #2
|
|
||||||
|
|
||||||
ld1 {v0.8h}, [x2]
|
|
||||||
ldcol.16 v1, x3, x1, hi=1
|
|
||||||
|
|
||||||
addp v0.8h, v0.8h, v0.8h
|
|
||||||
addp v1.8h, v1.8h, v1.8h
|
|
||||||
addp v0.4h, v0.4h, v0.4h
|
|
||||||
addp v1.4h, v1.4h, v1.4h
|
|
||||||
zip1 v0.2s, v0.2s, v1.2s
|
|
||||||
add v1.4h, v0.4h, v1.4h
|
|
||||||
|
|
||||||
urshr v2.4h, v0.4h, #2
|
|
||||||
urshr v3.4h, v1.4h, #3
|
|
||||||
|
|
||||||
dup v4.4h, v2.h[0]
|
|
||||||
dup v5.4h, v2.h[3]
|
|
||||||
dup v6.4h, v2.h[1]
|
|
||||||
dup v7.4h, v3.h[1]
|
|
||||||
|
|
||||||
zip1 v0.2d, v4.2d, v6.2d
|
|
||||||
zip1 v1.2d, v5.2d, v7.2d
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
|
||||||
function ff_pred8x8_0l0_dc_neon_10, export=1
|
|
||||||
add x2, x0, x1, lsl #2
|
|
||||||
sub x2, x2, #2
|
|
||||||
|
|
||||||
ldcol.16 v1, x2, x1, 4
|
|
||||||
|
|
||||||
addp v2.8h, v1.8h, v1.8h
|
|
||||||
addp v2.4h, v2.4h, v2.4h
|
|
||||||
urshr v1.4h, v2.4h, #2
|
|
||||||
|
|
||||||
movi v0.8h, #2, lsl #8 // 512
|
|
||||||
dup v1.8h, v1.h[0]
|
|
||||||
b .L_pred8x8_dc_10_end
|
|
||||||
endfunc
|
|
||||||
|
@ -58,24 +58,6 @@
|
|||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
//trashes v0-v4
|
|
||||||
.macro lowpass_8_v r0, r1, r2, r3, r4, r5, r6, d0, d1, narrow=1
|
|
||||||
uaddl v2.8H, \r2\().8B, \r3\().8B
|
|
||||||
uaddl v0.8H, \r3\().8B, \r4\().8B
|
|
||||||
uaddl v4.8H, \r1\().8B, \r4\().8B
|
|
||||||
uaddl v1.8H, \r2\().8B, \r5\().8B
|
|
||||||
uaddl \d0\().8H, \r0\().8B, \r5\().8B
|
|
||||||
uaddl \d1\().8H, \r1\().8B, \r6\().8B
|
|
||||||
mla \d0\().8H, v2.8H, v6.H[1]
|
|
||||||
mls \d0\().8H, v4.8H, v6.H[0]
|
|
||||||
mla \d1\().8H, v0.8H, v6.H[1]
|
|
||||||
mls \d1\().8H, v1.8H, v6.H[0]
|
|
||||||
.if \narrow
|
|
||||||
sqrshrun \d0\().8B, \d0\().8H, #5
|
|
||||||
sqrshrun \d1\().8B, \d1\().8H, #5
|
|
||||||
.endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
//trashes v0-v5, v7, v30-v31
|
//trashes v0-v5, v7, v30-v31
|
||||||
.macro lowpass_8H r0, r1
|
.macro lowpass_8H r0, r1
|
||||||
ext v0.16B, \r0\().16B, \r0\().16B, #2
|
ext v0.16B, \r0\().16B, \r0\().16B, #2
|
||||||
@ -118,13 +100,18 @@
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
// trashed v0-v7
|
// trashed v0-v7
|
||||||
.macro lowpass_8.16 r0, r1, r2, r3, r4, r5
|
.macro lowpass_8.16 r0, r1, r2
|
||||||
saddl v5.4S, \r2\().4H, \r3\().4H
|
ext v1.16B, \r0\().16B, \r1\().16B, #4
|
||||||
saddl2 v1.4S, \r2\().8H, \r3\().8H
|
ext v0.16B, \r0\().16B, \r1\().16B, #6
|
||||||
saddl v6.4S, \r1\().4H, \r4\().4H
|
saddl v5.4S, v1.4H, v0.4H
|
||||||
saddl2 v2.4S, \r1\().8H, \r4\().8H
|
ext v2.16B, \r0\().16B, \r1\().16B, #2
|
||||||
saddl v0.4S, \r0\().4H, \r5\().4H
|
saddl2 v1.4S, v1.8H, v0.8H
|
||||||
saddl2 v4.4S, \r0\().8H, \r5\().8H
|
ext v3.16B, \r0\().16B, \r1\().16B, #8
|
||||||
|
saddl v6.4S, v2.4H, v3.4H
|
||||||
|
ext \r1\().16B, \r0\().16B, \r1\().16B, #10
|
||||||
|
saddl2 v2.4S, v2.8H, v3.8H
|
||||||
|
saddl v0.4S, \r0\().4H, \r1\().4H
|
||||||
|
saddl2 v4.4S, \r0\().8H, \r1\().8H
|
||||||
|
|
||||||
shl v3.4S, v5.4S, #4
|
shl v3.4S, v5.4S, #4
|
||||||
shl v5.4S, v5.4S, #2
|
shl v5.4S, v5.4S, #2
|
||||||
@ -147,7 +134,7 @@
|
|||||||
rshrn v5.4H, v5.4S, #10
|
rshrn v5.4H, v5.4S, #10
|
||||||
rshrn2 v5.8H, v1.4S, #10
|
rshrn2 v5.8H, v1.4S, #10
|
||||||
|
|
||||||
sqxtun \r0\().8B, v5.8H
|
sqxtun \r2\().8B, v5.8H
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
function put_h264_qpel16_h_lowpass_neon_packed
|
function put_h264_qpel16_h_lowpass_neon_packed
|
||||||
@ -182,8 +169,8 @@ function \type\()_h264_qpel8_h_lowpass_neon
|
|||||||
lowpass_8 v28, v29, v16, v17, v28, v16
|
lowpass_8 v28, v29, v16, v17, v28, v16
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v2.8B}, [x0], x3
|
ld1 {v2.8B}, [x0], x3
|
||||||
ld1 {v3.8B}, [x0]
|
|
||||||
urhadd v28.8B, v28.8B, v2.8B
|
urhadd v28.8B, v28.8B, v2.8B
|
||||||
|
ld1 {v3.8B}, [x0]
|
||||||
urhadd v16.8B, v16.8B, v3.8B
|
urhadd v16.8B, v16.8B, v3.8B
|
||||||
sub x0, x0, x3
|
sub x0, x0, x3
|
||||||
.endif
|
.endif
|
||||||
@ -223,8 +210,8 @@ function \type\()_h264_qpel8_h_lowpass_l2_neon
|
|||||||
urhadd v27.8B, v27.8B, v29.8B
|
urhadd v27.8B, v27.8B, v29.8B
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v2.8B}, [x0], x2
|
ld1 {v2.8B}, [x0], x2
|
||||||
ld1 {v3.8B}, [x0]
|
|
||||||
urhadd v26.8B, v26.8B, v2.8B
|
urhadd v26.8B, v26.8B, v2.8B
|
||||||
|
ld1 {v3.8B}, [x0]
|
||||||
urhadd v27.8B, v27.8B, v3.8B
|
urhadd v27.8B, v27.8B, v3.8B
|
||||||
sub x0, x0, x2
|
sub x0, x0, x2
|
||||||
.endif
|
.endif
|
||||||
@ -271,39 +258,43 @@ endfunc
|
|||||||
|
|
||||||
function \type\()_h264_qpel8_v_lowpass_neon
|
function \type\()_h264_qpel8_v_lowpass_neon
|
||||||
ld1 {v16.8B}, [x1], x3
|
ld1 {v16.8B}, [x1], x3
|
||||||
ld1 {v17.8B}, [x1], x3
|
|
||||||
ld1 {v18.8B}, [x1], x3
|
ld1 {v18.8B}, [x1], x3
|
||||||
ld1 {v19.8B}, [x1], x3
|
|
||||||
ld1 {v20.8B}, [x1], x3
|
ld1 {v20.8B}, [x1], x3
|
||||||
ld1 {v21.8B}, [x1], x3
|
|
||||||
ld1 {v22.8B}, [x1], x3
|
ld1 {v22.8B}, [x1], x3
|
||||||
ld1 {v23.8B}, [x1], x3
|
|
||||||
ld1 {v24.8B}, [x1], x3
|
ld1 {v24.8B}, [x1], x3
|
||||||
ld1 {v25.8B}, [x1], x3
|
|
||||||
ld1 {v26.8B}, [x1], x3
|
ld1 {v26.8B}, [x1], x3
|
||||||
ld1 {v27.8B}, [x1], x3
|
ld1 {v28.8B}, [x1], x3
|
||||||
ld1 {v28.8B}, [x1]
|
ld1 {v30.8B}, [x1], x3
|
||||||
|
ld1 {v17.8B}, [x1], x3
|
||||||
|
ld1 {v19.8B}, [x1], x3
|
||||||
|
ld1 {v21.8B}, [x1], x3
|
||||||
|
ld1 {v23.8B}, [x1], x3
|
||||||
|
ld1 {v25.8B}, [x1]
|
||||||
|
|
||||||
|
transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1
|
||||||
|
transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1
|
||||||
|
lowpass_8 v16, v17, v18, v19, v16, v17
|
||||||
|
lowpass_8 v20, v21, v22, v23, v18, v19
|
||||||
|
lowpass_8 v24, v25, v26, v27, v20, v21
|
||||||
|
lowpass_8 v28, v29, v30, v31, v22, v23
|
||||||
|
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
|
||||||
|
|
||||||
lowpass_8_v v16, v17, v18, v19, v20, v21, v22, v16, v17
|
|
||||||
lowpass_8_v v18, v19, v20, v21, v22, v23, v24, v18, v19
|
|
||||||
lowpass_8_v v20, v21, v22, v23, v24, v25, v26, v20, v21
|
|
||||||
lowpass_8_v v22, v23, v24, v25, v26, v27, v28, v22, v23
|
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v24.8B}, [x0], x2
|
ld1 {v24.8B}, [x0], x2
|
||||||
ld1 {v25.8B}, [x0], x2
|
|
||||||
ld1 {v26.8B}, [x0], x2
|
|
||||||
urhadd v16.8B, v16.8B, v24.8B
|
urhadd v16.8B, v16.8B, v24.8B
|
||||||
ld1 {v27.8B}, [x0], x2
|
ld1 {v25.8B}, [x0], x2
|
||||||
urhadd v17.8B, v17.8B, v25.8B
|
urhadd v17.8B, v17.8B, v25.8B
|
||||||
ld1 {v28.8B}, [x0], x2
|
ld1 {v26.8B}, [x0], x2
|
||||||
urhadd v18.8B, v18.8B, v26.8B
|
urhadd v18.8B, v18.8B, v26.8B
|
||||||
ld1 {v29.8B}, [x0], x2
|
ld1 {v27.8B}, [x0], x2
|
||||||
urhadd v19.8B, v19.8B, v27.8B
|
urhadd v19.8B, v19.8B, v27.8B
|
||||||
ld1 {v30.8B}, [x0], x2
|
ld1 {v28.8B}, [x0], x2
|
||||||
urhadd v20.8B, v20.8B, v28.8B
|
urhadd v20.8B, v20.8B, v28.8B
|
||||||
ld1 {v31.8B}, [x0], x2
|
ld1 {v29.8B}, [x0], x2
|
||||||
urhadd v21.8B, v21.8B, v29.8B
|
urhadd v21.8B, v21.8B, v29.8B
|
||||||
|
ld1 {v30.8B}, [x0], x2
|
||||||
urhadd v22.8B, v22.8B, v30.8B
|
urhadd v22.8B, v22.8B, v30.8B
|
||||||
|
ld1 {v31.8B}, [x0], x2
|
||||||
urhadd v23.8B, v23.8B, v31.8B
|
urhadd v23.8B, v23.8B, v31.8B
|
||||||
sub x0, x0, x2, lsl #3
|
sub x0, x0, x2, lsl #3
|
||||||
.endif
|
.endif
|
||||||
@ -344,23 +335,26 @@ endfunc
|
|||||||
|
|
||||||
function \type\()_h264_qpel8_v_lowpass_l2_neon
|
function \type\()_h264_qpel8_v_lowpass_l2_neon
|
||||||
ld1 {v16.8B}, [x1], x3
|
ld1 {v16.8B}, [x1], x3
|
||||||
ld1 {v17.8B}, [x1], x3
|
|
||||||
ld1 {v18.8B}, [x1], x3
|
ld1 {v18.8B}, [x1], x3
|
||||||
ld1 {v19.8B}, [x1], x3
|
|
||||||
ld1 {v20.8B}, [x1], x3
|
ld1 {v20.8B}, [x1], x3
|
||||||
ld1 {v21.8B}, [x1], x3
|
|
||||||
ld1 {v22.8B}, [x1], x3
|
ld1 {v22.8B}, [x1], x3
|
||||||
ld1 {v23.8B}, [x1], x3
|
|
||||||
ld1 {v24.8B}, [x1], x3
|
ld1 {v24.8B}, [x1], x3
|
||||||
ld1 {v25.8B}, [x1], x3
|
|
||||||
ld1 {v26.8B}, [x1], x3
|
ld1 {v26.8B}, [x1], x3
|
||||||
ld1 {v27.8B}, [x1], x3
|
ld1 {v28.8B}, [x1], x3
|
||||||
ld1 {v28.8B}, [x1]
|
ld1 {v30.8B}, [x1], x3
|
||||||
|
ld1 {v17.8B}, [x1], x3
|
||||||
|
ld1 {v19.8B}, [x1], x3
|
||||||
|
ld1 {v21.8B}, [x1], x3
|
||||||
|
ld1 {v23.8B}, [x1], x3
|
||||||
|
ld1 {v25.8B}, [x1]
|
||||||
|
|
||||||
lowpass_8_v v16, v17, v18, v19, v20, v21, v22, v16, v17
|
transpose_8x8B v16, v18, v20, v22, v24, v26, v28, v30, v0, v1
|
||||||
lowpass_8_v v18, v19, v20, v21, v22, v23, v24, v18, v19
|
transpose_8x8B v17, v19, v21, v23, v25, v27, v29, v31, v0, v1
|
||||||
lowpass_8_v v20, v21, v22, v23, v24, v25, v26, v20, v21
|
lowpass_8 v16, v17, v18, v19, v16, v17
|
||||||
lowpass_8_v v22, v23, v24, v25, v26, v27, v28, v22, v23
|
lowpass_8 v20, v21, v22, v23, v18, v19
|
||||||
|
lowpass_8 v24, v25, v26, v27, v20, v21
|
||||||
|
lowpass_8 v28, v29, v30, v31, v22, v23
|
||||||
|
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
|
||||||
|
|
||||||
ld1 {v24.8B}, [x12], x2
|
ld1 {v24.8B}, [x12], x2
|
||||||
ld1 {v25.8B}, [x12], x2
|
ld1 {v25.8B}, [x12], x2
|
||||||
@ -381,20 +375,20 @@ function \type\()_h264_qpel8_v_lowpass_l2_neon
|
|||||||
|
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v24.8B}, [x0], x3
|
ld1 {v24.8B}, [x0], x3
|
||||||
ld1 {v25.8B}, [x0], x3
|
|
||||||
ld1 {v26.8B}, [x0], x3
|
|
||||||
urhadd v16.8B, v16.8B, v24.8B
|
urhadd v16.8B, v16.8B, v24.8B
|
||||||
ld1 {v27.8B}, [x0], x3
|
ld1 {v25.8B}, [x0], x3
|
||||||
urhadd v17.8B, v17.8B, v25.8B
|
urhadd v17.8B, v17.8B, v25.8B
|
||||||
ld1 {v28.8B}, [x0], x3
|
ld1 {v26.8B}, [x0], x3
|
||||||
urhadd v18.8B, v18.8B, v26.8B
|
urhadd v18.8B, v18.8B, v26.8B
|
||||||
ld1 {v29.8B}, [x0], x3
|
ld1 {v27.8B}, [x0], x3
|
||||||
urhadd v19.8B, v19.8B, v27.8B
|
urhadd v19.8B, v19.8B, v27.8B
|
||||||
ld1 {v30.8B}, [x0], x3
|
ld1 {v28.8B}, [x0], x3
|
||||||
urhadd v20.8B, v20.8B, v28.8B
|
urhadd v20.8B, v20.8B, v28.8B
|
||||||
ld1 {v31.8B}, [x0], x3
|
ld1 {v29.8B}, [x0], x3
|
||||||
urhadd v21.8B, v21.8B, v29.8B
|
urhadd v21.8B, v21.8B, v29.8B
|
||||||
|
ld1 {v30.8B}, [x0], x3
|
||||||
urhadd v22.8B, v22.8B, v30.8B
|
urhadd v22.8B, v22.8B, v30.8B
|
||||||
|
ld1 {v31.8B}, [x0], x3
|
||||||
urhadd v23.8B, v23.8B, v31.8B
|
urhadd v23.8B, v23.8B, v31.8B
|
||||||
sub x0, x0, x3, lsl #3
|
sub x0, x0, x3, lsl #3
|
||||||
.endif
|
.endif
|
||||||
@ -438,17 +432,22 @@ function put_h264_qpel8_hv_lowpass_neon_top
|
|||||||
lowpass_8H v26, v27
|
lowpass_8H v26, v27
|
||||||
lowpass_8H v28, v29
|
lowpass_8H v28, v29
|
||||||
|
|
||||||
lowpass_8.16 v16, v17, v18, v19, v20, v21
|
transpose_8x8H v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
|
||||||
lowpass_8.16 v17, v18, v19, v20, v21, v22
|
transpose_8x8H v24, v25, v26, v27, v28, v29, v30, v31, v0, v1
|
||||||
|
|
||||||
lowpass_8.16 v18, v19, v20, v21, v22, v23
|
lowpass_8.16 v16, v24, v16
|
||||||
lowpass_8.16 v19, v20, v21, v22, v23, v24
|
lowpass_8.16 v17, v25, v17
|
||||||
|
|
||||||
lowpass_8.16 v20, v21, v22, v23, v24, v25
|
lowpass_8.16 v18, v26, v18
|
||||||
lowpass_8.16 v21, v22, v23, v24, v25, v26
|
lowpass_8.16 v19, v27, v19
|
||||||
|
|
||||||
lowpass_8.16 v22, v23, v24, v25, v26, v27
|
lowpass_8.16 v20, v28, v20
|
||||||
lowpass_8.16 v23, v24, v25, v26, v27, v28
|
lowpass_8.16 v21, v29, v21
|
||||||
|
|
||||||
|
lowpass_8.16 v22, v30, v22
|
||||||
|
lowpass_8.16 v23, v31, v23
|
||||||
|
|
||||||
|
transpose_8x8B v16, v17, v18, v19, v20, v21, v22, v23, v0, v1
|
||||||
|
|
||||||
ret
|
ret
|
||||||
endfunc
|
endfunc
|
||||||
@ -459,20 +458,20 @@ function \type\()_h264_qpel8_hv_lowpass_neon
|
|||||||
bl put_h264_qpel8_hv_lowpass_neon_top
|
bl put_h264_qpel8_hv_lowpass_neon_top
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v0.8B}, [x0], x2
|
ld1 {v0.8B}, [x0], x2
|
||||||
ld1 {v1.8B}, [x0], x2
|
|
||||||
ld1 {v2.8B}, [x0], x2
|
|
||||||
urhadd v16.8B, v16.8B, v0.8B
|
urhadd v16.8B, v16.8B, v0.8B
|
||||||
ld1 {v3.8B}, [x0], x2
|
ld1 {v1.8B}, [x0], x2
|
||||||
urhadd v17.8B, v17.8B, v1.8B
|
urhadd v17.8B, v17.8B, v1.8B
|
||||||
ld1 {v4.8B}, [x0], x2
|
ld1 {v2.8B}, [x0], x2
|
||||||
urhadd v18.8B, v18.8B, v2.8B
|
urhadd v18.8B, v18.8B, v2.8B
|
||||||
ld1 {v5.8B}, [x0], x2
|
ld1 {v3.8B}, [x0], x2
|
||||||
urhadd v19.8B, v19.8B, v3.8B
|
urhadd v19.8B, v19.8B, v3.8B
|
||||||
ld1 {v6.8B}, [x0], x2
|
ld1 {v4.8B}, [x0], x2
|
||||||
urhadd v20.8B, v20.8B, v4.8B
|
urhadd v20.8B, v20.8B, v4.8B
|
||||||
ld1 {v7.8B}, [x0], x2
|
ld1 {v5.8B}, [x0], x2
|
||||||
urhadd v21.8B, v21.8B, v5.8B
|
urhadd v21.8B, v21.8B, v5.8B
|
||||||
|
ld1 {v6.8B}, [x0], x2
|
||||||
urhadd v22.8B, v22.8B, v6.8B
|
urhadd v22.8B, v22.8B, v6.8B
|
||||||
|
ld1 {v7.8B}, [x0], x2
|
||||||
urhadd v23.8B, v23.8B, v7.8B
|
urhadd v23.8B, v23.8B, v7.8B
|
||||||
sub x0, x0, x2, lsl #3
|
sub x0, x0, x2, lsl #3
|
||||||
.endif
|
.endif
|
||||||
@ -512,20 +511,20 @@ function \type\()_h264_qpel8_hv_lowpass_l2_neon
|
|||||||
urhadd v7.8B, v7.8B, v23.8B
|
urhadd v7.8B, v7.8B, v23.8B
|
||||||
.ifc \type,avg
|
.ifc \type,avg
|
||||||
ld1 {v16.8B}, [x0], x3
|
ld1 {v16.8B}, [x0], x3
|
||||||
ld1 {v17.8B}, [x0], x3
|
|
||||||
ld1 {v18.8B}, [x0], x3
|
|
||||||
urhadd v0.8B, v0.8B, v16.8B
|
urhadd v0.8B, v0.8B, v16.8B
|
||||||
ld1 {v19.8B}, [x0], x3
|
ld1 {v17.8B}, [x0], x3
|
||||||
urhadd v1.8B, v1.8B, v17.8B
|
urhadd v1.8B, v1.8B, v17.8B
|
||||||
ld1 {v20.8B}, [x0], x3
|
ld1 {v18.8B}, [x0], x3
|
||||||
urhadd v2.8B, v2.8B, v18.8B
|
urhadd v2.8B, v2.8B, v18.8B
|
||||||
ld1 {v21.8B}, [x0], x3
|
ld1 {v19.8B}, [x0], x3
|
||||||
urhadd v3.8B, v3.8B, v19.8B
|
urhadd v3.8B, v3.8B, v19.8B
|
||||||
ld1 {v22.8B}, [x0], x3
|
ld1 {v20.8B}, [x0], x3
|
||||||
urhadd v4.8B, v4.8B, v20.8B
|
urhadd v4.8B, v4.8B, v20.8B
|
||||||
ld1 {v23.8B}, [x0], x3
|
ld1 {v21.8B}, [x0], x3
|
||||||
urhadd v5.8B, v5.8B, v21.8B
|
urhadd v5.8B, v5.8B, v21.8B
|
||||||
|
ld1 {v22.8B}, [x0], x3
|
||||||
urhadd v6.8B, v6.8B, v22.8B
|
urhadd v6.8B, v6.8B, v22.8B
|
||||||
|
ld1 {v23.8B}, [x0], x3
|
||||||
urhadd v7.8B, v7.8B, v23.8B
|
urhadd v7.8B, v7.8B, v23.8B
|
||||||
sub x0, x0, x3, lsl #3
|
sub x0, x0, x3, lsl #3
|
||||||
.endif
|
.endif
|
||||||
|
@ -75,11 +75,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
|
|||||||
c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_neon;
|
c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_neon;
|
||||||
c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_neon;
|
c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_neon;
|
||||||
c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_neon;
|
c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_neon;
|
||||||
// This function is disabled, as it doesn't handle widths that aren't
|
c->sao_band_filter[0] = ff_hevc_sao_band_filter_8x8_8_neon;
|
||||||
// an even multiple of 8 correctly. fate-hevc doesn't exercise that
|
|
||||||
// for the current size, but if enabled for bigger sizes, the cases
|
|
||||||
// of non-multiple of 8 seem to arise.
|
|
||||||
// c->sao_band_filter[0] = ff_hevc_sao_band_filter_8x8_8_neon;
|
|
||||||
}
|
}
|
||||||
if (bit_depth == 10) {
|
if (bit_depth == 10) {
|
||||||
c->add_residual[0] = ff_hevc_add_residual_4x4_10_neon;
|
c->add_residual[0] = ff_hevc_add_residual_4x4_10_neon;
|
||||||
|
@ -109,25 +109,12 @@
|
|||||||
trn2 \r5\().4H, \r0\().4H, \r1\().4H
|
trn2 \r5\().4H, \r0\().4H, \r1\().4H
|
||||||
trn1 \r6\().4H, \r2\().4H, \r3\().4H
|
trn1 \r6\().4H, \r2\().4H, \r3\().4H
|
||||||
trn2 \r7\().4H, \r2\().4H, \r3\().4H
|
trn2 \r7\().4H, \r2\().4H, \r3\().4H
|
||||||
|
|
||||||
trn1 \r0\().2S, \r4\().2S, \r6\().2S
|
trn1 \r0\().2S, \r4\().2S, \r6\().2S
|
||||||
trn2 \r2\().2S, \r4\().2S, \r6\().2S
|
trn2 \r2\().2S, \r4\().2S, \r6\().2S
|
||||||
trn1 \r1\().2S, \r5\().2S, \r7\().2S
|
trn1 \r1\().2S, \r5\().2S, \r7\().2S
|
||||||
trn2 \r3\().2S, \r5\().2S, \r7\().2S
|
trn2 \r3\().2S, \r5\().2S, \r7\().2S
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro transpose_4x8H r0, r1, r2, r3, t4, t5, t6, t7
|
|
||||||
trn1 \t4\().8H, \r0\().8H, \r1\().8H
|
|
||||||
trn2 \t5\().8H, \r0\().8H, \r1\().8H
|
|
||||||
trn1 \t6\().8H, \r2\().8H, \r3\().8H
|
|
||||||
trn2 \t7\().8H, \r2\().8H, \r3\().8H
|
|
||||||
|
|
||||||
trn1 \r0\().4S, \t4\().4S, \t6\().4S
|
|
||||||
trn2 \r2\().4S, \t4\().4S, \t6\().4S
|
|
||||||
trn1 \r1\().4S, \t5\().4S, \t7\().4S
|
|
||||||
trn2 \r3\().4S, \t5\().4S, \t7\().4S
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro transpose_8x8H r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
|
.macro transpose_8x8H r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
|
||||||
trn1 \r8\().8H, \r0\().8H, \r1\().8H
|
trn1 \r8\().8H, \r0\().8H, \r1\().8H
|
||||||
trn2 \r9\().8H, \r0\().8H, \r1\().8H
|
trn2 \r9\().8H, \r0\().8H, \r1\().8H
|
||||||
|
@ -29,6 +29,41 @@ wrap(avcodec_open2(AVCodecContext *avctx,
|
|||||||
testneonclobbers(avcodec_open2, avctx, codec, options);
|
testneonclobbers(avcodec_open2, avctx, codec, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_OLD_ENCDEC
|
||||||
|
wrap(avcodec_decode_audio4(AVCodecContext *avctx,
|
||||||
|
AVFrame *frame,
|
||||||
|
int *got_frame_ptr,
|
||||||
|
AVPacket *avpkt))
|
||||||
|
{
|
||||||
|
testneonclobbers(avcodec_decode_audio4, avctx, frame,
|
||||||
|
got_frame_ptr, avpkt);
|
||||||
|
}
|
||||||
|
|
||||||
|
wrap(avcodec_decode_video2(AVCodecContext *avctx,
|
||||||
|
AVFrame *picture,
|
||||||
|
int *got_picture_ptr,
|
||||||
|
AVPacket *avpkt))
|
||||||
|
{
|
||||||
|
testneonclobbers(avcodec_decode_video2, avctx, picture,
|
||||||
|
got_picture_ptr, avpkt);
|
||||||
|
}
|
||||||
|
|
||||||
|
wrap(avcodec_encode_audio2(AVCodecContext *avctx,
|
||||||
|
AVPacket *avpkt,
|
||||||
|
const AVFrame *frame,
|
||||||
|
int *got_packet_ptr))
|
||||||
|
{
|
||||||
|
testneonclobbers(avcodec_encode_audio2, avctx, avpkt, frame,
|
||||||
|
got_packet_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
wrap(avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
|
||||||
|
const AVFrame *frame, int *got_packet_ptr))
|
||||||
|
{
|
||||||
|
testneonclobbers(avcodec_encode_video2, avctx, avpkt, frame, got_packet_ptr);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
wrap(avcodec_decode_subtitle2(AVCodecContext *avctx,
|
wrap(avcodec_decode_subtitle2(AVCodecContext *avctx,
|
||||||
AVSubtitle *sub,
|
AVSubtitle *sub,
|
||||||
int *got_sub_ptr,
|
int *got_sub_ptr,
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#include "libavutil/attributes.h"
|
|
||||||
#include "libavutil/aarch64/cpu.h"
|
#include "libavutil/aarch64/cpu.h"
|
||||||
#include "libavcodec/opusdsp.h"
|
#include "libavcodec/opusdsp.h"
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ endconst
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro idct_end
|
.macro idct_end
|
||||||
ret x10
|
br x10
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro smull1 a, b, c
|
.macro smull1 a, b, c
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user