mirror of
https://github.com/jellyfin/jellyfin-ffmpeg.git
synced 2024-11-23 05:49:42 +00:00
New upstream version 4.2.1
This commit is contained in:
parent
ce9c7e0073
commit
1901e784e5
@ -19,7 +19,7 @@ cache:
|
||||
directories:
|
||||
- ffmpeg-samples
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update --all; fi
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
|
||||
install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install nasm; fi
|
||||
script:
|
||||
|
457
Changelog
457
Changelog
@ -1,320 +1,154 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 4.0.4:
|
||||
- avcodec/hevcdec: Avoid only partly skiping duplicate first slices
|
||||
- lavc/bmp: Avoid a heap buffer overwrite for 1bpp input.
|
||||
- avcodec/mpegpicture: Check size of edge_emu_buffer
|
||||
- avformat/mov: Fix potential integer overflow in entry check in mov_read_trun()
|
||||
- avcodec/truemotion2: Fix integer overflow in tm2_null_res_block()
|
||||
- avcodec/dfa: Check the chunk header is not truncated
|
||||
- avcodec/clearvideo: Check remaining data in P frames
|
||||
- avcodec/dvbsubdec: Check object position
|
||||
- avcodec/cdgraphics: Use ff_set_dimensions()
|
||||
- avformat/gdv: Check fps
|
||||
- configure: use vpx_codec_vp8_dx/cx for libvpx-vp8 checking
|
||||
- configure: add missing pthreads extralibs dependency for libvpx-vp9
|
||||
- avcodec/mpeg4videodec: Check idx in mpeg4_decode_studio_block()
|
||||
- avcodec/dxv: Correct integer overflow in get_opcodes()
|
||||
- avcodec/scpr: Fix use of uninitialized variable
|
||||
- avcodec/qpeg: Limit copy in qpeg_decode_intra() to the available bytes
|
||||
- avcodec/aic: Check remaining bits in aic_decode_coeffs()
|
||||
- avcodec/gdv: Check for truncated tags in decompress_5()
|
||||
- avcodec/bethsoftvideo: Check block_type
|
||||
- avcodec/jpeg2000dwt: Fix integer overflow in dwt_decode97_int()
|
||||
- avcodec/error_resilience: Use a symmetric check for skipping MV estimation
|
||||
- avcodec/mlpdec: Insuffient typo
|
||||
- avcodec/zmbv: obtain frame later
|
||||
- avcodec/jvdec: Check available input space before decode8x8()
|
||||
- avcodec/h264_direct: Fix overflow in POC comparission
|
||||
- avformat/webmdashenc: Check id in adaption_sets
|
||||
- avformat/http: Fix Out-of-Bounds access in process_line()
|
||||
- avformat/ftp: Fix Out-of-Bounds Access and Information Leak in ftp.c:393
|
||||
- avcodec/htmlsubtitles: Fixes denial of service due to use of sscanf in inner loop for handling braces
|
||||
- avcodec/htmlsubtitles: Fixes denial of service due to use of sscanf in inner loop for tag scaning
|
||||
- avformat/matroskadec: Do not leak queued packets on sync errors
|
||||
- avcodec/mpeg4videodec: Clear interlaced_dct for studio profile
|
||||
- avformat/mov: Do not use reference stream in mov_read_sidx() if there is no reference stream
|
||||
- avcodec/sbrdsp_fixed.c: remove input value limit for sbr_sum_square_c()
|
||||
- avformat/mov: validate chunk_count vs stsc_data
|
||||
- avformat/mov.c: require tfhd to begin parsing trun
|
||||
- avcodec/pgssubdec: Check for duplicate display segments
|
||||
- avformat/rtsp: Check number of streams in sdp_parse_line()
|
||||
- avformat/rtsp: Clear reply in every iteration in ff_rtsp_connect()
|
||||
- avcodec/fic: Check that there is input left in fic_decode_block()
|
||||
- avcodec/tiff: Check for 12bit gray fax
|
||||
- avutil/imgutils: Optimize memset_bytes() by using av_memcpy_backptr()
|
||||
- avutil/mem: Optimize fill32() by unrolling and using 64bit
|
||||
- configure: bump year
|
||||
- avcodec/diracdec: Check component quant
|
||||
- avcodec/tests/rangecoder: initialize array to avoid valgrind warning
|
||||
- avcodec/h264_slice: Fix integer overflow in implicit_weight_table()
|
||||
- avcodec/exr: set layer_match in all branches
|
||||
- avcodec/exr: Check for duplicate channel index
|
||||
- avcodec/4xm: Fix returned error codes
|
||||
- avformat/libopenmpt: Fix successfull typo
|
||||
- avcodec/v4l2_m2m: fix cant typo
|
||||
- avcodec/mjpegbdec: Fix some misplaced {} and spaces
|
||||
- avformat/wvdec: detect and error out on WavPack DSD files
|
||||
- avcodec/mips: Fix failed case: hevc-conformance-AMP_A_Samsung_* when enable msa
|
||||
- avcodec/fic: Fail on invalid slice size/off
|
||||
- postproc/postprocess_template: remove FF_REG_sp from clobber list
|
||||
- postproc/postprocess_template: Avoid using %4 for the threshold compare
|
||||
- avcodec/rpza: Check that there is enough data for all the blocks
|
||||
- avcodec/rpza: Move frame allocation to a later point
|
||||
- avcodec/avcodec: Document the data type for AV_PKT_DATA_MPEGTS_STREAM_ID
|
||||
- avformat/mpegts: Fix side data type for stream id
|
||||
- tests/fate/filter-video: increase fuzz for fate-filter-refcmp-psnr-rgb
|
||||
- avcodec/mjpegdec: Fix indention of ljpeg_decode_yuv_scan()
|
||||
- lavf/id3v2: fail read_apic on EOF reading mimetype
|
||||
- avformat/nutenc: Document trailer index assert better
|
||||
- lavf/mov: ensure only one tkhd per trak
|
||||
- avcodec/clearvideo: Check remaining input bits in P macro block loop
|
||||
- avcodec/dxv: Check that there is enough data to decompress
|
||||
- avcodec/ppc/hevcdsp: Fix build failures with powerpc-linux-gnu-gcc-4.8 with --disable-optimizations
|
||||
- avcodec/msvideo1: Check for too small dimensions
|
||||
- avcodec/wmv2dec: Skip I frame if its smaller than 1/8 of the minimal size
|
||||
- avcodec/msmpeg4dec: Skip frame if its smaller than 1/8 of the minimal size
|
||||
- avcodec/truemotion2rt: Fix rounding in input size check
|
||||
- avcodec/truemotion2: fix integer overflows in tm2_low_chroma()
|
||||
- avcodec/pngdec: Check compression method
|
||||
- fftools/ffmpeg: Repair reinit_filter feature
|
||||
- avcodec/shorten: Fix integer overflow with offset
|
||||
- h264_redundant_pps: Fix logging context
|
||||
- avcodec/cavsdec: Propagate error codes inside decode_mb_i()
|
||||
- avcodec/mpeg4videodec: Clear partitioned frame in decode_studio_vop_header()
|
||||
- avcodec/mpegaudio_parser: Consume more than 0 bytes in case of the unsupported mp3adu case
|
||||
- avcodec/hevcdec: decode at most one slice reporting being the first in the picture
|
||||
- avformat/dsfdec: fix calculation of size of data chunk
|
||||
- avformat/dsfdec: properly handle padded last packet
|
||||
- avcodec/hevcdec: fix non-ref frame judgement
|
||||
- avcodec/libaomenc: remove AVOption related to frame partitions
|
||||
version 4.2.1:
|
||||
- avformat/vividas: check for tiny blocks using alignment
|
||||
- avcodec/vc1_pred: Fix refdist in scaleforopp()
|
||||
- avcodec/vorbisdec: fix FASTDIV usage for vr_type == 2
|
||||
- avcodec/iff: Check for overlap in cmap_read_palette()
|
||||
- avcodec/apedec: Fix 32bit int overflow in do_apply_filter()
|
||||
- lavf/rawenc: Only accept the appropriate stream type for raw muxers.
|
||||
- avformat/matroskadec: use av_fast_realloc to reallocate ebml list arrays
|
||||
- avformat/matroskadec: use proper types for some EbmlSyntax fields
|
||||
- avcodec/ralf: fix undefined shift in extend_code()
|
||||
- avcodec/ralf: fix undefined shift
|
||||
- avcodec/bgmc: Check input space in ff_bgmc_decode_init()
|
||||
- avcodec/vp3: Check for end of input in 2 places of vp4_unpack_macroblocks()
|
||||
- avcodec/truemotion2: Fix multiple integer overflows in tm2_null_res_block()
|
||||
- avcodec/vc1_block: Check the return code from vc1_decode_p_block()
|
||||
- avcodec/vc1dec: Require res_sprite for wmv3images
|
||||
- avcodec/vc1_block: Check for double escapes
|
||||
- avcodec/vorbisdec: Check get_vlc2() failure
|
||||
- avcodec/tta: Fix integer overflow in prediction
|
||||
- avcodec/vb: Check input packet size to be large enough to contain flags
|
||||
- avcodec/cavsdec: Limit the number of access units per packet to 2
|
||||
- avcodec/atrac9dec: Check block_align
|
||||
- avcodec/alac: Check for bps of 0
|
||||
- avcodec/alac: Fix multiple integer overflows in lpc_prediction()
|
||||
- avcodec/rl2: set dimensions
|
||||
- avcodec/aacdec: Add FF_CODEC_CAP_INIT_CLEANUP
|
||||
- avcodec/idcinvideo: Add 320x240 default maximum resolution
|
||||
- avformat/realtextdec: free queue on error
|
||||
- avcodec/vp5/6/8: use vpX_rac_is_end()
|
||||
- avformat/vividas: Check av_xiphlacing() return value before use
|
||||
- avcodec/alsdec: Fix integer overflow in decode_var_block_data()
|
||||
- avcodec/alsdec: Limit maximum channels to 512
|
||||
- avcodec/anm: Check input size for a frame with just a stop code
|
||||
- avcodec/flicvideo: Optimize and Simplify FLI_COPY in flic_decode_frame_24BPP() by using bytestream2_get_buffer()
|
||||
- avcodec/loco: Check left column value
|
||||
- avcodec/ffwavesynth: Fixes invalid shift with pink noise seeking
|
||||
- avcodec/ffwavesynth: Fix integer overflow for some corner case values
|
||||
- avcodec/indeo2: Check remaining input more often
|
||||
- avcodec/diracdec: Check that slices are fewer than pixels
|
||||
- avcodec/vp56: Consider the alpha start as end of the prior header
|
||||
- avcodec/4xm: Check for end of input in decode_p_block()
|
||||
- avcodec/hevcdec: Check delta_luma_weight_l0/1
|
||||
- avcodec/hnm4video: Optimize postprocess_current_frame()
|
||||
- avcodec/hevc_refs: Optimize 16bit generate_missing_ref()
|
||||
- avcodec/scpr: Use av_memcpy_backptr() in type 17 and 33
|
||||
- avcodec/tiff: Enforce increasing offsets
|
||||
- avcodec/dds: Use ff_set_dimensions()
|
||||
- avformat/vividas: Fix another infinite loop
|
||||
- avformat/vividas: Fix infinite loop in header parser
|
||||
- avcodec/mpc8: Fix 32bit mask/enum
|
||||
- avcodec/alsdec: Fix integer overflows of raw_samples in decode_var_block_data()
|
||||
- avcodec/alsdec: Fix integer overflow of raw_samples in decode_blocks()
|
||||
- avcodec/alsdec: fix mantisse shift
|
||||
- avcodec/pngdec: consider chunk size in minimal size check
|
||||
- avcodec/vc1_block: Fix invalid shifts in vc1_decode_i_blocks()
|
||||
- avcodec/vc1_block: fix invalid shift in vc1_decode_p_mb()
|
||||
- avcodec/aacdec_template: fix integer overflow in imdct_and_windowing()
|
||||
- avformat/mpegts: Check if ready on SCTE reception
|
||||
- avcodec/omx: fix xFramerate calculation
|
||||
- avformat/avidec: add support for recognizing HEVC fourcc when demuxing
|
||||
- avformat/mpegts: fix teletext PTS when selecting teletext streams only
|
||||
- avcodec/h2645_parse: zero initialize the rbsp buffer
|
||||
- avcodec/omx: Fix handling of fragmented buffers
|
||||
- avcodec/omx: ensure zerocopy mode can be disabled on rpi builds
|
||||
- avformat/mxfdec: do not ignore bad size errors
|
||||
- avformat/matroskadec: Fix seeking
|
||||
- ffplay: properly detect all window size changes
|
||||
|
||||
version 4.0.3:
|
||||
- avutil/integer: Fix integer overflow in av_mul_i()
|
||||
- avcodec/msrle: Check that the input is large enough to contain a end of picture code
|
||||
- avformat/ftp: return AVERROR_EOF for EOF
|
||||
- avcodec/libx264: remove FF_CODEC_CAP_INIT_THREADSAFE flag
|
||||
- avcodec/jpeg2000dec: Fix off by 1 error in JPEG2000_PGOD_CPRL handling
|
||||
- avcodec/mpeg4videodec: Fix typo in sprite delta check
|
||||
- avcodec/h264_cavlc: Check mb_skip_run
|
||||
- avcodec/ra144: Fix integer overflow in add_wav()
|
||||
- avformat/utils: Never store negative values in last_IP_duration
|
||||
- avformat/utils: Fix integer overflow in discontinuity check
|
||||
- Revert "avcodec/cbs_h264: silence errors about end_of_seq nalus"
|
||||
- avcodec/cbs: ensure user_data is padded for GBC parsing
|
||||
- avcodec/cbs: fix crash in sei_pic_timestamp
|
||||
- avcodec/cbs_h264: silence errors about end_of_seq nalus
|
||||
- avcodec/cuviddec: properly take deinterlacing and display delay into account for buffer_full check
|
||||
- avcodec/h2645_parse: skip NALUs with no content after stripping all the trailing zeros
|
||||
- configure: <fflib>_deps: validate, reduce sensitivity
|
||||
- configure: speed up check_deps()
|
||||
- configure: speed up print_enabled_components()
|
||||
- configure: speed up flatten_extralibs_wrapper()
|
||||
- avformat/utils: Fix potential integer overflow in extract_extradata()
|
||||
- avcodec/unary: Improve get_unary() docs
|
||||
- avcodec/gdv: Replace divisions by shifts in rescale()
|
||||
- avcodec/ac3dec: Fix shift signedness in mask creation
|
||||
- avcodec/eac3dec: Check that channel_map does not contain more than EAC3_MAX_CHANNELS
|
||||
- doc/examples/vaapi_transcode: Fix the typo
|
||||
- avcodec/dvdsubdec: Sanity check len in decode_rle()
|
||||
- avcodec/mpeg4videodec: Fix undefined shift in get_amv()
|
||||
- avcodec/zmbv: Check that the decompressed data size is correct
|
||||
- avcodec/zmbv: Update decomp_len in raw frames
|
||||
- avcodec/shorten: Fix bitstream end check in read_header()
|
||||
- avcodec/dvdsubdec: Avoid branch in decode_run_8bit()
|
||||
- avcodec/h264_refs: Document last if() in ff_h264_execute_ref_pic_marking()
|
||||
- avcodec/ra144: Fix undefined integer overflow in add_wav()
|
||||
- avcodec/indeo4: Check dimensions in decode_pic_hdr()
|
||||
- avformat/mov: Error on too large stsd entry counts.
|
||||
- examples: Fix use of AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
- avcodec/hq_hqa: Check remaining input bits in hqa_decode_mb()
|
||||
- avcodec/vb: Check for end of bytestream before reading blocktype
|
||||
- avcodec/snowdec: Fix integer overflow with motion vector residual
|
||||
- avcodec/mpeg4videodec: Fix slice end detection in mpeg4_decode_studio_mb()
|
||||
- avformat/nsvdec: Do not parse multiple NSVf
|
||||
- avformat/dashdec: Fix strlen(rep_id_val) with it being NULL
|
||||
- avformat/mlvdec: read_string() received unsigned size, make the argument unsigned
|
||||
- avformat/rmdec: Fix EOF check in the stream loop in ivr_read_header()
|
||||
- avcodec/scpr: Check for min > max in decompress_p()
|
||||
- avcodec/shorten: Fix signed 32bit overflow in shift in shorten_decode_frame()
|
||||
- avcodec/shorten: Fix integer overflow in residual/LPC combination
|
||||
- avcodec/shorten: Check verbatim length
|
||||
- avcodec/mpegaudio_parser: Initialize poutbuf*
|
||||
- avcodec/aacpsdsp_template: Fix integer overflow in ps_stereo_interpolate_c()
|
||||
- avformat/flvenc: Check audio packet size
|
||||
- lavc/svq3: Fix regression decoding some files.
|
||||
- avcodec/mlp_parser: Check if synccode is within buffer
|
||||
- avcodec/qtrle: Check remaining bytestream in qtrle_decode_XYbpp()
|
||||
- avcodec/diracdec: Check bytes count in else branch in decode_lowdelay() too
|
||||
- avcodec/diracdec: Check slice numbers for overflows in relation to picture dimensions
|
||||
- avcodec/diracdec: Change frame_number to 64bit as its a 32bit from the bitstream and we also have a -1 special case
|
||||
- avcodec/dirac_dwt_template: Fix several integer overflows in horizontal_compose_daub97i()
|
||||
- avcodec/diracdec: Prevent integer overflow in intermediate in global_mv()
|
||||
- swresample/swresample: Fix input channel count in resample_first computation
|
||||
- avutil/pixfmt: Document chroma plane size for odd resolutions
|
||||
- lavf/libsmbclient: return AVERROR_EOF for EOF.
|
||||
- lavc/videotoolboxenc: Fix compilation on osx 10.10.5 Yosemite
|
||||
- avcodec/mediacodecdec: fix SEGV on modern nvidia decoders
|
||||
- avcodec/bitstream_filters: check the input argument of av_bsf_get_by_name() for NULL
|
||||
- avformat/librtmp: fix returning EOF from Read/Write
|
||||
- avcodec/videotoolboxenc: fix undefined behavior with rc_max_rate=0
|
||||
version 4.2:
|
||||
- tpad filter
|
||||
- AV1 decoding support through libdav1d
|
||||
- dedot filter
|
||||
- chromashift and rgbashift filters
|
||||
- freezedetect filter
|
||||
- truehd_core bitstream filter
|
||||
- dhav demuxer
|
||||
- PCM-DVD encoder
|
||||
- GIF parser
|
||||
- vividas demuxer
|
||||
- hymt decoder
|
||||
- anlmdn filter
|
||||
- maskfun filter
|
||||
- hcom demuxer and decoder
|
||||
- ARBC decoder
|
||||
- libaribb24 based ARIB STD-B24 caption support (profiles A and C)
|
||||
- Support decoding of HEVC 4:4:4 content in nvdec and cuviddec
|
||||
- removed libndi-newtek
|
||||
- agm decoder
|
||||
- KUX demuxer
|
||||
- AV1 frame split bitstream filter
|
||||
- lscr decoder
|
||||
- lagfun filter
|
||||
- asoftclip filter
|
||||
- Support decoding of HEVC 4:4:4 content in vdpau
|
||||
- colorhold filter
|
||||
- xmedian filter
|
||||
- asr filter
|
||||
- showspatial multimedia filter
|
||||
- VP4 video decoder
|
||||
- IFV demuxer
|
||||
- derain filter
|
||||
- deesser filter
|
||||
- mov muxer writes tracks with unspecified language instead of English by default
|
||||
- add support for using clang to compile CUDA kernels
|
||||
|
||||
|
||||
version 4.0.2:
|
||||
- avcodec/dvdsub_parser: Allocate input padding
|
||||
- avcodec/dvdsub_parser: Init output buf/size
|
||||
- avcodec/dirac_dwt_template: Fix signedness regression in interleave()
|
||||
- avformat/mov: Simplify last element computation in mov_estimate_video_delay()
|
||||
- avformat/mov: Break out of inner loop early in mov_estimate_video_delay()
|
||||
- avformat/mov: Eliminate variable buf_size from mov_estimate_video_delay()
|
||||
- avformat/mov: remove modulo operations from mov_estimate_video_delay()
|
||||
- avformat/movenc: Write version 2 of audio atom if channels is not known
|
||||
- swresample/arm: rename labels to fix xcode build error
|
||||
- avformat/movenc: Check input sample count
|
||||
- avcodec/mjpegdec: Check for odd progressive RGB
|
||||
- avcodec/vp8_parser: Do not leave data/size uninitialized
|
||||
- avformat/mms: Add missing chunksize check
|
||||
- avformat/pva: Check for EOF before retrying in read_part_of_packet()
|
||||
- avformat/rmdec: Do not pass mime type in rm_read_multi() to ff_rm_read_mdpr_codecdata()
|
||||
- avformat/asfdec_o: Check size_bmp more fully
|
||||
- avformat/mxfdec: Fix av_log context
|
||||
- avcodec/mpeg4videodec: Check for bitstream end in read_quant_matrix_ext()
|
||||
- avcodec/indeo4: Check for end of bitstream in decode_mb_info()
|
||||
- avcodec/ac3dec: Check channel_map index
|
||||
- avcodec/mpeg4videodec: Remove use of FF_PROFILE_MPEG4_SIMPLE_STUDIO as indicator of studio profile
|
||||
- avcodec/shorten: Fix undefined addition in shorten_decode_frame()
|
||||
- avcodec/shorten: Fix undefined integer overflow
|
||||
- avcodec/jpeg2000dec: Fixes invalid shifts in jpeg2000_decode_packets_po_iteration()
|
||||
- avcodec/jpeg2000dec: Check that there are enough bytes for all tiles
|
||||
- avformat/movenc: Use mov->fc consistently for av_log()
|
||||
- avcodec/mpeg4videodec: Check read profile before setting it
|
||||
- avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample
|
||||
- avcodec/ac3_parser: Check init_get_bits8() for failure
|
||||
- avformat/movenc: Check that frame_types other than EAC3_FRAME_TYPE_INDEPENDENT have a supported substream id
|
||||
- avcodec/dpx: Check elements in 12bps planar path
|
||||
- avcodec/escape124: Fix spelling errors in comment
|
||||
- avcodec/ra144: Fix integer overflow in ff_eval_refl()
|
||||
- avcodec/cscd: Check output buffer size for lzo.
|
||||
- avcodec/escape124: Check buf_size against num_superblocks
|
||||
- avcodec/h264_parser: Reduce needed history for parsing mb index
|
||||
- avcodec/magicyuv: Check bits left in flags&1 branch
|
||||
- avcodec/mjpegdec: Check for end of bitstream in ljpeg_decode_rgb_scan()
|
||||
- ffmpeg: fix -stream_loop with multiple inputs
|
||||
- ffmpeg: factorize input thread creation and destruction
|
||||
- avformat/mpegts: parse large PMTs with multiple tables
|
||||
- Revert "avcodec/mediacodecdec: wait on first frame after input buffers are full"
|
||||
- avcodec/videotoolboxenc: fix invalid session on iOS
|
||||
- avcodec/videotoolboxenc: split initialization
|
||||
- avcodec/videotoolboxenc: fix mutex/cond leak in error path
|
||||
|
||||
version 4.0.1:
|
||||
- avcodec/aacdec_fixed: Fix undefined integer overflow in apply_independent_coupling_fixed()
|
||||
- avcodec/dirac_dwt_template: Fix undefined behavior in interleave()
|
||||
- avutil/common: Fix undefined behavior in av_clip_uintp2_c()
|
||||
- fftools/ffmpeg: Fallback to duration if sample rate is unavailable
|
||||
- avformat/mov: Only set pkt->duration to non negative values
|
||||
- avcodec/mpeg4videodec: Clear bits_per_raw_sample if it has originated from a previous instance
|
||||
- avformat/movenc: fix recognization of cover image streams
|
||||
- avformat/movenc: properly handle cover image codecs
|
||||
- avcodec/h264_slice: Fix overflow in recovery_frame computation
|
||||
- avcodec/h264_ps: Move MAX_LOG2_MAX_FRAME_NUM to header so it can be used in h264_sei
|
||||
- avcodec/h264_mc_template: Only prefetch motion if the list is used.
|
||||
- avcodec/xwddec: Use ff_set_dimensions()
|
||||
- avcodec/wavpack: Fix overflow in adding tail
|
||||
- avcodec/shorten: Fix multiple integer overflows
|
||||
- avcodec/shorten: Fix undefined shift in fix_bitshift()
|
||||
- avcodec/shorten: Fix a negative left shift in shorten_decode_frame()
|
||||
- avcodec/shorten: Sanity check nmeans
|
||||
- avcodec/shorten: Check non COMM chunk len before skip in decode_aiff_header()
|
||||
- avcodec/mjpegdec: Fix integer overflow in ljpeg_decode_rgb_scan()
|
||||
- avcodec/truemotion2: Fix overflow in tm2_apply_deltas()
|
||||
- avcodec/opus_silk: Change silk_lsf2lpc() slightly toward silk/NLSF2A.c
|
||||
- avcodec/amrwbdec: Fix division by 0 in find_hb_gain()
|
||||
- avcodec/h263dec: Reinitialize idct context if it has not been setup for the active profile
|
||||
- avcodec/idctdsp: Clear idct/idct_add for studio profile
|
||||
- avformat/mov: replace a value error by clipping into valid range in mov_read_stsc()
|
||||
- avformat/bintext: Reduce detection for random .bin files as it more likely is not a multimedia related file
|
||||
- avformat/mov: Break out early if chunk_count is 0 in mov_build_index()
|
||||
- avcodec/fic: Avoid some magic numbers related to cursors
|
||||
- avcodec/mpeg4video: Detect reference studio streams as studio streams
|
||||
- avcodec/mpeg4videodec: Do not corrupt bits_per_raw_sample
|
||||
- avcodec/mpeg4videode: Eliminate out of loop VOP startcode reading for studio profile
|
||||
- avcodec/g2meet: ask for sample with overflowing RGB
|
||||
- avcodec/idctdsp: Transmit studio_profile to init instead of using AVCodecContext profile
|
||||
- avcodec/ac3dec: Check that the number of channels with dependant streams is valid
|
||||
- avcodec/ac3dec: Fix null pointer dereference in ac3_decode_frame()
|
||||
- avcodec/aacdec_fixed: use 64bit to avoid overflow in rounding in apply_dependent_coupling_fixed()
|
||||
- oavcodec/aacpsdsp_template: Use unsigned for hs0X to prevent undefined behavior
|
||||
- avcodec/g723_1dec: Clip bits2 in both directions
|
||||
- avcodec/mpeg4videoenc: Use 64 bit for times in mpeg4_encode_gop_header()
|
||||
- avcodec/mlpdec: Only change noise_type if the related fields are valid
|
||||
- indeo4: Decode all or nothing of a band header.
|
||||
- avcodec/ac3dec: Use frame_size if superframe_size is 0
|
||||
- avformat/mov: Only fail for STCO/STSC contradictions if both exist
|
||||
- avcodec/dirac_dwt: Fix integer overflow in COMPOSE_DD97iH0 / COMPOSE_DD137iL0
|
||||
- avcodec/fic: Check available input space for cursor
|
||||
- avcodec/mpeg4videodec: Check bps (VOL header) before VOP for studio profile
|
||||
- avcodec/g2meet: Check RGB upper limit
|
||||
- avcodec/jpeg2000dec: Fix undefined shift in the jpeg2000_decode_packets_po_iteration() CPRL case
|
||||
- avcodec/jpeg2000dec: Skip init for component in CPRL if nothing is to be done
|
||||
- avcodec/g2meet: Change order of operations to avoid undefined behavior
|
||||
- avcodec/flac_parser: Fix infinite loop
|
||||
- avcodec/mpeg4videodec: Split decode_studio_vol_header() out of decode_studiovisualobject()
|
||||
- avcodec/mpeg4videodec: Move decode_studiovisualobject() parsing in the branch for visual object parsing
|
||||
- avcodec/mpeg4video_parser: Avoid litteral 0x1B6, use named constant instead
|
||||
- avcodec/mpeg4video_parser: Fix incorrect spliting of MPEG-4 studio frames
|
||||
- avformat/m4vdec: Use the same constant names as libavcodec
|
||||
- avformat/m4vdec: Fix detection of raw MPEG-4 ES Studio
|
||||
- avcodec/wavpack: Fix integer overflow in DEC_MED() / INC_MED()
|
||||
- avcodec/wavpack: Fix integer overflow in wv_unpack_stereo()
|
||||
- avcodec/error_resilience: Fix integer overflow in filter181()
|
||||
- avcodec/h263dec: Check slice_ret in mspeg4 slice loop
|
||||
- avcodec/elsdec: Fix memleaks
|
||||
- avcodec/vc1_block: simplify ac_val computation
|
||||
- avcodec/ffv1enc: Check that the crc + version combination is supported
|
||||
- configure: The eac3_core bitstream filter needs the ac3 parser.
|
||||
- configure: fix arm inline asm checks
|
||||
- lavf/libssh: translate a read of 0 to EOF
|
||||
- ffprobe: fix SEGV when new streams are added
|
||||
- avformat/mpegts: fix incorrect indentation
|
||||
- avformat/mpegts: initialize section_buf to fix valgrind test failure
|
||||
- avformat/mpegts: reindent after last change
|
||||
- avformat/mpegts: parse sections with multiple tables
|
||||
- avformat/mpegts: clean up whitespace
|
||||
- avformat/mpegts: use MAX_SECTION_SIZE instead of hardcoded value
|
||||
- avformat/mpegts: skip non-PMT tids earlier
|
||||
- avcodec/mediacodecdec: add workaround for buggy amlogic mpeg2 decoder
|
||||
- avcodec/mediacodecdec: wait on first frame after input buffers are full
|
||||
- avcodec/mediacodecdec: restructure mediacodec_receive_frame
|
||||
- avcodec/mediacodec_wrapper: add helper to fetch SDK_INT
|
||||
- avcodec/mediacodecdec: refactor pts handling
|
||||
- avcodec/mediacodecdec: use AV_TIME_BASE_Q
|
||||
- avcodec/mediacodecdec: clarify delay_flush specific code
|
||||
- avcodec/videotoolbox: fix decoding of some HEVC videos
|
||||
- avcodec/hevc: remove videotoolbox hack
|
||||
- avcodec/videotoolbox: split h264/hevc callbacks
|
||||
- avcodec/videotoolbox: cleanups
|
||||
- avcodec/videotoolbox: fix kVTCouldNotFindVideoDecoderErr trying to decode HEVC on iOS
|
||||
- avcodec/videotoolbox: improve logging of decoder errors
|
||||
- avcodec/xwddec: fix palette alpha
|
||||
- avformat/webm_chunk: always use a static buffer for get_chunk_filename
|
||||
- configure: fix configure check for lilv-0
|
||||
- avcodec/nvdec_hevc: fix scaling lists
|
||||
- avcodec/hevcdec: make ff_hevc_frame_nb_refs take a const pointer
|
||||
- lavf/bluray: translate a read of 0 to EOF
|
||||
- lavf/dashenc: don't call flush_init_segment before avformat_write_header
|
||||
- avdevice/decklink_dec: unref packets on avpacket_queue_put error
|
||||
- avcodec/hnm4video: fix palette alpha
|
||||
- avcodec/anm: fix palette alpha
|
||||
- avformat/qtpalette: parse color table according to the QuickTime file format specs
|
||||
- ffplay: Fix realloc_texture when input texture is NULL.
|
||||
- hwcontext_vaapi: Fix compilation with libva versions < 1.4.0
|
||||
- lavf/qsv: clone the frame which may be managed by framework
|
||||
- lavf: make overlay_qsv work based on framesync
|
||||
- avformat/segafilm - revert keyframe detection
|
||||
- avformat/utils: refactor upstream_stream_timings
|
||||
- avformat/utils: ignore outlier durations on subtitle/data streams as well
|
||||
version 4.1:
|
||||
- deblock filter
|
||||
- tmix filter
|
||||
- amplify filter
|
||||
- fftdnoiz filter
|
||||
- aderivative and aintegral audio filters
|
||||
- pal75bars and pal100bars video filter sources
|
||||
- support mbedTLS based TLS
|
||||
- adeclick filter
|
||||
- adeclip filter
|
||||
- libtensorflow backend for DNN based filters like srcnn
|
||||
- vc1 decoder is now bit-exact
|
||||
- ATRAC9 decoder
|
||||
- lensfun wrapper filter
|
||||
- colorconstancy filter
|
||||
- AVS2 video decoder via libdavs2
|
||||
- IMM4 video decoder
|
||||
- Brooktree ProSumer video decoder
|
||||
- MatchWare Screen Capture Codec decoder
|
||||
- WinCam Motion Video decoder
|
||||
- 1D LUT filter (lut1d)
|
||||
- RemotelyAnywhere Screen Capture decoder
|
||||
- cue and acue filters
|
||||
- support for AV1 in MP4
|
||||
- transpose_npp filter
|
||||
- AVS2 video encoder via libxavs2
|
||||
- amultiply filter
|
||||
- Block-Matching 3d (bm3d) denoising filter
|
||||
- acrossover filter
|
||||
- ilbc decoder
|
||||
- audio denoiser as afftdn filter
|
||||
- AV1 parser
|
||||
- SER demuxer
|
||||
- sinc audio filter source
|
||||
- chromahold filter
|
||||
- setparams filter
|
||||
- vibrance filter
|
||||
- decoding S12M timecode in h264
|
||||
- xstack filter
|
||||
- pcm vidc decoder and encoder
|
||||
- (a)graphmonitor filter
|
||||
- yadif_cuda filter
|
||||
|
||||
|
||||
version 4.0:
|
||||
@ -370,6 +204,7 @@ version 4.0:
|
||||
- Haivision SRT protocol via libsrt
|
||||
- segafilm muxer
|
||||
- vfrdet filter
|
||||
- SRCNN filter
|
||||
|
||||
|
||||
version 3.4:
|
||||
|
@ -1,4 +1,4 @@
|
||||
#Installing FFmpeg:
|
||||
## Installing FFmpeg
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
11
LICENSE.md
11
LICENSE.md
@ -103,18 +103,9 @@ license version needs to be upgraded by passing `--enable-version3` to configure
|
||||
There are certain libraries you can combine with FFmpeg whose licenses are not
|
||||
compatible with the GPL and/or the LGPL. If you wish to enable these
|
||||
libraries, even in circumstances that their license may be incompatible, pass
|
||||
`--enable-nonfree` to configure. But note that if you enable any of these
|
||||
libraries the resulting binary will be under a complex license mix that is
|
||||
more restrictive than the LGPL and that may result in additional obligations.
|
||||
It is possible that these restrictions cause the resulting binary to be
|
||||
`--enable-nonfree` to configure. This will cause the resulting binary to be
|
||||
unredistributable.
|
||||
|
||||
The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
|
||||
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
|
||||
compatible with the LGPL.
|
||||
|
||||
The NVENC library, while its header file is licensed under the compatible MIT
|
||||
license, requires a proprietary binary blob at run time, and is deemed to be
|
||||
incompatible with the GPL. We are not certain if it is compatible with the
|
||||
LGPL, but we require `--enable-nonfree` even with LGPL configurations in case
|
||||
it is not.
|
||||
|
29
MAINTAINERS
29
MAINTAINERS
@ -39,7 +39,7 @@ QuickTime faststart:
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan, Gyan Doshi
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
@ -52,8 +52,8 @@ Communication
|
||||
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Baptiste Coudurier, Lou Logan
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
mailing lists Baptiste Coudurier
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
@ -121,7 +121,6 @@ Generic Parts:
|
||||
motion* Michael Niedermayer
|
||||
rate control:
|
||||
ratecontrol.c Michael Niedermayer
|
||||
libxvid_rc.c Michael Niedermayer
|
||||
simple IDCT:
|
||||
simple_idct.c, simple_idct.h Michael Niedermayer
|
||||
postprocessing:
|
||||
@ -144,6 +143,7 @@ Codecs:
|
||||
asv* Michael Niedermayer
|
||||
atrac3plus* Maxim Poliakovski
|
||||
audiotoolbox* Rodger Combs
|
||||
avs2* Huiwen Ren
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
binkaudio.c Peter Ross
|
||||
cavs* Stefan Gehrer
|
||||
@ -168,7 +168,6 @@ Codecs:
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
evrc* Paul B Mahol
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
exr.c Martin Vignali
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fifo.c Jan Sebechlebsky
|
||||
@ -190,6 +189,7 @@ Codecs:
|
||||
libcelt_dec.c Nicolas George
|
||||
libcodec2.c Tomas Härdin
|
||||
libdirac* David Conrad
|
||||
libdavs2.c Huiwen Ren
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
@ -220,7 +220,7 @@ Codecs:
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni
|
||||
qsv* Mark Thompson
|
||||
qsv* Mark Thompson, Zhong Li
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@ -333,6 +333,7 @@ Filters:
|
||||
vf_bwdif Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_chromakey.c Timo Rothenpieler
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorconstancy.c Mina Sami (CC <minas.gorgy@gmail.com>)
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_colorkey.c Timo Rothenpieler
|
||||
vf_colorlevels.c Paul B Mahol
|
||||
@ -360,6 +361,7 @@ Filters:
|
||||
vf_ssim.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_tonemap_opencl.c Ruiling Song
|
||||
vf_yadif.c Michael Niedermayer
|
||||
vf_zoompan.c Paul B Mahol
|
||||
|
||||
@ -413,7 +415,6 @@ Muxers/Demuxers:
|
||||
flvenc.c Michael Niedermayer, Steven Liu
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
hlsenc.c Christian Suloway, Steven Liu
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
@ -524,9 +525,10 @@ Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Lauri Kasanen
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Matthew Oliver, Hendrik Leppkes
|
||||
@ -575,8 +577,11 @@ Releases
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
==================================================
|
||||
GnuPG Fingerprints and IRC nicknames of maintainers and contributors
|
||||
====================================================================
|
||||
|
||||
IRC nicknames are in parentheses. These apply
|
||||
to the IRC channels listed on the website.
|
||||
|
||||
Alexander Strasser 1C96 78B7 83CB 8AA7 9AF5 D1EB A7D8 A57B A876 E58F
|
||||
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
|
||||
@ -594,7 +599,7 @@ Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Lou Logan (llogan) 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
@ -611,5 +616,5 @@ Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Tomas Härdin (thardin) A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
|
8
Makefile
8
Makefile
@ -50,6 +50,9 @@ $(TOOLS): %$(EXESUF): %.o
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
@ -58,6 +61,7 @@ tools/target_dec_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavcodec/parsers.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config.h: ffbuild/.config
|
||||
@ -134,7 +138,7 @@ uninstall-data:
|
||||
|
||||
clean::
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES))
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES)) $(addprefix compat/*/*/,$(CLEANSUFFIXES))
|
||||
$(RM) -r coverage-html
|
||||
$(RM) -rf coverage.info coverage.info.in lcov
|
||||
|
||||
@ -158,7 +162,7 @@ check: all alltools examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
|
||||
$(sort $(OBJDIRS)):
|
||||
$(sort $(OUTDIRS)):
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
|
25
README.md
25
README.md
@ -4,31 +4,6 @@ FFmpeg README
|
||||
FFmpeg is a collection of libraries and tools to process multimedia content
|
||||
such as audio, video, subtitles and related metadata.
|
||||
|
||||
## For Jellyfin
|
||||
|
||||
This particular repository is designed to support building a static, portable,
|
||||
FFMPEG release of 4.0.3 for the [Jellyfin project](https://github.com/jellyfin).
|
||||
|
||||
To build packages, use `./build <release> <arch>`, where `release` is one of:
|
||||
* `stretch` (Debian 9.X "Stretch")
|
||||
* `buster` (Debian 10.X "Buster")
|
||||
* `xenial` (Ubuntu 16.04 "Xenial Xerus")
|
||||
* `bionic` (Ubuntu 18.04 "Bionic Beaver")
|
||||
* `cosmic` (Ubuntu 18.10 "Cosmic Cuttlefish")
|
||||
|
||||
And `arch` is one of:
|
||||
* `amd64` (Standard 64-bit x86)
|
||||
* `armhf` (ARMv6, Raspberry Pi)
|
||||
|
||||
The build setup requires `docker` support and may use a significant amount of
|
||||
disk space. Binary releases are available in the [repository](https://repo.jellyfin.org/releases/server).
|
||||
|
||||
For older Ubuntu releases in between these officially supported versions, the
|
||||
oldest should generally be compatible.
|
||||
|
||||
The build setup will attempt to generate both `amd64` and `armhf` binary packages
|
||||
if the release supports it.
|
||||
|
||||
## Libraries
|
||||
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
|
@ -1,10 +1,10 @@
|
||||
|
||||
┌───────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.0 "Wu" │
|
||||
└───────────────────────────────────┘
|
||||
┌────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.2 "Ada" │
|
||||
└────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 4.0 "Wu", about 6
|
||||
months after the release of FFmpeg 3.4.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.2 "Ada", about 8
|
||||
months after the release of FFmpeg 4.1.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
@ -34,6 +34,22 @@
|
||||
// NOTE: this is a partial update of the Avisynth C interface to recognize
|
||||
// new color spaces added in Avisynth 2.60. By no means is this document
|
||||
// completely Avisynth 2.60 compliant.
|
||||
// 170103: added new CPU constants (FMA4, AVX512xx)
|
||||
// 171102: define SIZETMOD. do not use yet, experimental. Offsets are size_t instead of int. Affects x64.
|
||||
// 171106: avs_get_row_size calls into avs_get_row_size_p, instead of direct field access
|
||||
// 171106: avs_get_height calls into avs_get_row_size_p, instead of direct field access
|
||||
// 180524: AVSC_EXPORT to dllexport in capi.h for avisynth_c_plugin_init
|
||||
// 180524: avs_is_same_colorspace VideoInfo parameters to const
|
||||
// 181230: Readability: functions regrouped to mix less AVSC_API and AVSC_INLINE, put together Avisynth+ specific stuff
|
||||
// 181230: use #ifndef AVSC_NO_DECLSPEC for AVSC_INLINE functions which are calling API functions
|
||||
// 181230: comments on avs_load_library (helper for loading API entries dynamically into a struct using AVSC_NO_DECLSPEC define)
|
||||
// 181230: define alias AVS_FRAME_ALIGN as FRAME_ALIGN
|
||||
// 181230: remove unused form of avs_get_rowsize and avs_get_height (kept earlier for reference)
|
||||
// 190104: avs_load_library: smart fallback mechanism for Avisynth+ specific functions:
|
||||
// if they are not loadable, they will work in a classic Avisynth compatible mode
|
||||
// Example#1: e.g. avs_is_444 will call the existing avs_is_yv24 instead
|
||||
// Example#2: avs_bits_per_component will return 8 for all colorspaces (Classic Avisynth supports only 8 bits/pixel)
|
||||
// Thus the Avisynth+ specific API functions are safely callable even when connected to classic Avisynth DLL
|
||||
|
||||
#ifndef __AVISYNTH_C__
|
||||
#define __AVISYNTH_C__
|
||||
@ -42,7 +58,7 @@
|
||||
#include "avs/capi.h"
|
||||
#include "avs/types.h"
|
||||
|
||||
|
||||
#define AVS_FRAME_ALIGN FRAME_ALIGN
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
@ -124,7 +140,7 @@ enum {
|
||||
AVS_CS_GENERIC_YUVA444 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1 }; // 4:4:4:A planar
|
||||
|
||||
|
||||
// Specific colorformats
|
||||
// Specific color formats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
@ -134,18 +150,18 @@ enum {
|
||||
// AVS_CS_I420 = 1<<4 Reserved
|
||||
AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED,
|
||||
|
||||
AVS_CS_YV24 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_8, // YVU 4:4:4 planar
|
||||
AVS_CS_YV16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:2 planar
|
||||
AVS_CS_YV12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:0 planar
|
||||
AVS_CS_YV24 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_8, // YUV 4:4:4 planar
|
||||
AVS_CS_YV16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_8, // YUV 4:2:2 planar
|
||||
AVS_CS_YV12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_8, // YUV 4:2:0 planar
|
||||
AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar
|
||||
AVS_CS_IYUV = AVS_CS_I420,
|
||||
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar
|
||||
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar
|
||||
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YUV 4:1:1 planar
|
||||
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YUV 4:1:0 planar
|
||||
AVS_CS_Y8 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_8, // Y 4:0:0 planar
|
||||
|
||||
//-------------------------
|
||||
// AVS16: new planar constants go live! Experimental PF 160613
|
||||
// 10-12-14 bit + planar RGB + BRG48/64 160725
|
||||
// 10-12-14-16 bit + planar RGB + BGR48/64 160725
|
||||
AVS_CS_YUV444P10 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_10, // YUV 4:4:4 10bit samples
|
||||
AVS_CS_YUV422P10 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:2 10bit samples
|
||||
AVS_CS_YUV420P10 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:0 10bit samples
|
||||
@ -246,9 +262,9 @@ enum { //SUBTYPES
|
||||
enum {
|
||||
// New 2.6 explicitly defined cache hints.
|
||||
AVS_CACHE_NOTHING=10, // Do not cache video.
|
||||
AVS_CACHE_WINDOW=11, // Hard protect upto X frames within a range of X from the current frame N.
|
||||
AVS_CACHE_GENERIC=12, // LRU cache upto X frames.
|
||||
AVS_CACHE_FORCE_GENERIC=13, // LRU cache upto X frames, override any previous CACHE_WINDOW.
|
||||
AVS_CACHE_WINDOW=11, // Hard protect up to X frames within a range of X from the current frame N.
|
||||
AVS_CACHE_GENERIC=12, // LRU cache up to X frames.
|
||||
AVS_CACHE_FORCE_GENERIC=13, // LRU cache up to X frames, override any previous CACHE_WINDOW.
|
||||
|
||||
AVS_CACHE_GET_POLICY=30, // Get the current policy.
|
||||
AVS_CACHE_GET_WINDOW=31, // Get the current window h_span.
|
||||
@ -256,8 +272,8 @@ enum {
|
||||
|
||||
AVS_CACHE_AUDIO=50, // Explicitly do cache audio, X byte cache.
|
||||
AVS_CACHE_AUDIO_NOTHING=51, // Explicitly do not cache audio.
|
||||
AVS_CACHE_AUDIO_NONE=52, // Audio cache off (auto mode), X byte intial cache.
|
||||
AVS_CACHE_AUDIO_AUTO=53, // Audio cache on (auto mode), X byte intial cache.
|
||||
AVS_CACHE_AUDIO_NONE=52, // Audio cache off (auto mode), X byte initial cache.
|
||||
AVS_CACHE_AUDIO_AUTO=53, // Audio cache on (auto mode), X byte initial cache.
|
||||
|
||||
AVS_CACHE_GET_AUDIO_POLICY=70, // Get the current audio policy.
|
||||
AVS_CACHE_GET_AUDIO_SIZE=71, // Get the current audio cache size.
|
||||
@ -284,7 +300,7 @@ enum {
|
||||
AVS_CACHE_COST_MED=224, // Child response of medium cost. (Real time)
|
||||
AVS_CACHE_COST_HI=225, // Child response of heavy cost. (Slow)
|
||||
|
||||
AVS_CACHE_GETCHILD_THREAD_MODE=240, // Cache ask Child for thread safetyness.
|
||||
AVS_CACHE_GETCHILD_THREAD_MODE=240, // Cache ask Child for thread safety.
|
||||
AVS_CACHE_THREAD_UNSAFE=241, // Only 1 thread allowed for all instances. 2.5 filters default!
|
||||
AVS_CACHE_THREAD_CLASS=242, // Only 1 thread allowed for each instance. 2.6 filters default!
|
||||
AVS_CACHE_THREAD_SAFE=243, // Allow all threads in any instance.
|
||||
@ -297,6 +313,8 @@ enum {
|
||||
};
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
AVSValue create_c_video_filter(AVSValue args, void * user_data, IScriptEnvironment * e0);
|
||||
|
||||
struct AVS_ScriptEnvironment {
|
||||
IScriptEnvironment * env;
|
||||
const char * error;
|
||||
@ -313,7 +331,7 @@ typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
// AVS_VideoInfo is laid out identically to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
@ -326,7 +344,7 @@ typedef struct AVS_VideoInfo {
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
// Image type properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
@ -353,77 +371,20 @@ AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_API(int, avs_is_rgb48)(const AVS_VideoInfo * p);
|
||||
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p); // avs+: for generic 444 check, use avs_is_yuv444
|
||||
|
||||
AVSC_API(int, avs_is_rgb64)(const AVS_VideoInfo * p);
|
||||
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p); // avs+: for generic 422 check, use avs_is_yuv422
|
||||
|
||||
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yv12)(const AVS_VideoInfo * p) ;
|
||||
AVSC_API(int, avs_is_yv12)(const AVS_VideoInfo * p) ; // avs+: for generic 420 check, use avs_is_yuv420
|
||||
|
||||
AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv444p16)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv422p16)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv420p16)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_y16)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv444ps)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv422ps)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv420ps)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_y32)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_444)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_422)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_420)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_y)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuva)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_planar_rgb)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_planar_rgba)(const AVS_VideoInfo * p);
|
||||
|
||||
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->image_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_API(int, avs_is_color_space)(const AVS_VideoInfo * p, int c_space);
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p); // avs+: for generic grayscale, use avs_is_y
|
||||
|
||||
AVSC_API(int, avs_get_plane_width_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
|
||||
AVSC_API(int, avs_get_plane_height_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
|
||||
|
||||
AVSC_API(int, avs_bits_per_pixel)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_bytes_from_pixels)(const AVS_VideoInfo * p, int pixels);
|
||||
@ -432,10 +393,42 @@ AVSC_API(int, avs_row_size)(const AVS_VideoInfo * p, int plane);
|
||||
|
||||
AVSC_API(int, avs_bmp_size)(const AVS_VideoInfo * vi);
|
||||
|
||||
AVSC_API(int, avs_is_color_space)(const AVS_VideoInfo * p, int c_space);
|
||||
|
||||
// no API for these, inline helper functions
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{
|
||||
return ((p->image_type & property) == property);
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{
|
||||
return !!(p->pixel_type & AVS_CS_PLANAR);
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{
|
||||
return !!(p->image_type & AVS_IT_FIELDBASED);
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{
|
||||
return ((p->image_type & AVS_IT_FIELDBASED) && (p->image_type & (AVS_IT_BFF | AVS_IT_TFF)));
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{
|
||||
return !!(p->image_type & AVS_IT_BFF);
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{
|
||||
return !!(p->image_type & AVS_IT_TFF);
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
@ -447,6 +440,7 @@ AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
@ -488,19 +482,56 @@ AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned den
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE int avs_is_same_colorspace(const AVS_VideoInfo * x, const AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
#endif
|
||||
|
||||
// Avisynth+ extensions
|
||||
AVSC_API(int, avs_is_rgb48)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_rgb64)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuv444p16)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv444
|
||||
|
||||
AVSC_API(int, avs_is_yuv422p16)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv422
|
||||
|
||||
AVSC_API(int, avs_is_yuv420p16)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv420
|
||||
|
||||
AVSC_API(int, avs_is_y16)(const AVS_VideoInfo * p); // obsolete, use avs_is_y
|
||||
|
||||
AVSC_API(int, avs_is_yuv444ps)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv444
|
||||
|
||||
AVSC_API(int, avs_is_yuv422ps)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv422
|
||||
|
||||
AVSC_API(int, avs_is_yuv420ps)(const AVS_VideoInfo * p); // obsolete, use avs_is_yuv420
|
||||
|
||||
AVSC_API(int, avs_is_y32)(const AVS_VideoInfo * p); // obsolete, use avs_is_y
|
||||
|
||||
AVSC_API(int, avs_is_444)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_422)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_420)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_y)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_yuva)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_planar_rgb)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_is_planar_rgba)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_num_components)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_component_size)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_bits_per_component)(const AVS_VideoInfo * p);
|
||||
// end of Avisynth+ specific
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
@ -513,11 +544,15 @@ AVSC_API(int, avs_bits_per_component)(const AVS_VideoInfo * p);
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// AVS_VideoFrameBuffer is laid out identically to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
BYTE * data;
|
||||
#ifdef SIZETMOD
|
||||
size_t data_size;
|
||||
#else
|
||||
int data_size;
|
||||
#endif
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
volatile long sequence_number;
|
||||
@ -527,56 +562,94 @@ typedef struct AVS_VideoFrameBuffer {
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// AVS_VideoFrame is laid out identically to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
volatile long refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
int row_sizeUV, heightUV;
|
||||
#ifdef SIZETMOD
|
||||
size_t offset;
|
||||
#else
|
||||
int offset;
|
||||
#endif
|
||||
int pitch, row_size, height;
|
||||
#ifdef SIZETMOD
|
||||
size_t offsetU, offsetV;
|
||||
#else
|
||||
int offsetU, offsetV;
|
||||
#endif
|
||||
int pitchUV; // U&V offsets are from top of picture.
|
||||
int row_sizeUV, heightUV; // for Planar RGB offsetU, offsetV is for the 2nd and 3rd Plane.
|
||||
// for Planar RGB pitchUV and row_sizeUV = 0, because when no VideoInfo (MakeWriteable)
|
||||
// the decision on existence of UV is checked by zero pitch
|
||||
// AVS+ extension, avisynth.h: class does not break plugins if appended here
|
||||
#ifdef SIZETMOD
|
||||
size_t offsetA;
|
||||
#else
|
||||
int offsetA;
|
||||
#endif
|
||||
int pitchA, row_sizeA; // 4th alpha plane support, pitch and row_size is 0 is none
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_API(int, avs_get_pitch_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return avs_get_pitch_p(p, 0);}
|
||||
#endif
|
||||
|
||||
AVSC_API(int, avs_get_row_size_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_API(int, avs_get_height_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_API(const BYTE *, avs_get_read_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_read_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
AVSC_API(int, avs_is_writable)(const AVS_VideoFrame * p);
|
||||
|
||||
AVSC_API(BYTE *, avs_get_write_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_write_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
|
||||
// no API for these, inline helper functions
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return avs_get_pitch_p(p, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return avs_get_row_size_p(p, 0); }
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return avs_get_height_p(p, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_read_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_write_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
@ -587,14 +660,14 @@ AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// and avs_release_value appropriately as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
// AVS_Value is laid out identically to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
@ -610,15 +683,19 @@ struct AVS_Value {
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// AVS_Value should be initialized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// with avs_copy_value. Consider it the equivalent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
|
||||
|
||||
// no API for these, inline helper functions
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
@ -628,9 +705,6 @@ AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
@ -661,11 +735,13 @@ AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = (short)size; return v; }
|
||||
// end of inline helper functions
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
@ -722,7 +798,7 @@ struct AVS_FilterInfo
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// modify it once it is initialized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
@ -753,10 +829,26 @@ enum {
|
||||
AVS_CPUF_SSSE3 = 0x200, // Core 2
|
||||
AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield
|
||||
AVS_CPUF_SSE4_1 = 0x400,
|
||||
//AVS_CPUF_AVX = 0x800, // Sandy Bridge, Bulldozer
|
||||
AVS_CPUF_AVX = 0x800, // Sandy Bridge, Bulldozer
|
||||
AVS_CPUF_SSE4_2 = 0x1000, // Nehalem
|
||||
//AVS_CPUF_AVX2 = 0x2000, // Haswell
|
||||
//AVS_CPUF_AVX512 = 0x4000, // Knights Landing
|
||||
// AVS+
|
||||
AVS_CPUF_AVX2 = 0x2000, // Haswell
|
||||
AVS_CPUF_FMA3 = 0x4000,
|
||||
AVS_CPUF_F16C = 0x8000,
|
||||
AVS_CPUF_MOVBE = 0x10000, // Big Endian Move
|
||||
AVS_CPUF_POPCNT = 0x20000,
|
||||
AVS_CPUF_AES = 0x40000,
|
||||
AVS_CPUF_FMA4 = 0x80000,
|
||||
|
||||
AVS_CPUF_AVX512F = 0x100000, // AVX-512 Foundation.
|
||||
AVS_CPUF_AVX512DQ = 0x200000, // AVX-512 DQ (Double/Quad granular) Instructions
|
||||
AVS_CPUF_AVX512PF = 0x400000, // AVX-512 Prefetch
|
||||
AVS_CPUF_AVX512ER = 0x800000, // AVX-512 Exponential and Reciprocal
|
||||
AVS_CPUF_AVX512CD = 0x1000000, // AVX-512 Conflict Detection
|
||||
AVS_CPUF_AVX512BW = 0x2000000, // AVX-512 BW (Byte/Word granular) Instructions
|
||||
AVS_CPUF_AVX512VL = 0x4000000, // AVX-512 VL (128/256 Vector Length) Extensions
|
||||
AVS_CPUF_AVX512IFMA = 0x8000000, // AVX-512 IFMA integer 52 bit
|
||||
AVS_CPUF_AVX512VBMI = 0x10000000 // AVX-512 VBMI
|
||||
};
|
||||
|
||||
|
||||
@ -793,20 +885,23 @@ AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, con
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
// align should be at least 16 for classic Avisynth
|
||||
// Avisynth+: any value, Avs+ ensures a minimum alignment if too small align is provided
|
||||
|
||||
// no API for these, inline helper functions
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
// an older compatibility alias
|
||||
// this inline function is calling an API function
|
||||
AVSC_INLINE AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
// end of inline helper functions
|
||||
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
@ -839,7 +934,10 @@ AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_Vid
|
||||
// The returned video frame must be be released
|
||||
|
||||
#ifdef AVSC_NO_DECLSPEC
|
||||
// use LoadLibrary and related functions to dynamically load Avisynth instead of declspec(dllimport)
|
||||
// This part uses LoadLibrary and related functions to dynamically load Avisynth instead of declspec(dllimport)
|
||||
// When AVSC_NO_DECLSPEC is defined, you can use avs_load_library to populate API functions into a struct
|
||||
// AVSC_INLINE functions which call onto an API functions should be treated specially (todo)
|
||||
|
||||
/*
|
||||
The following functions needs to have been declared, probably from windows.h
|
||||
|
||||
@ -856,6 +954,14 @@ typedef struct AVS_Library AVS_Library;
|
||||
|
||||
#define AVSC_DECLARE_FUNC(name) name##_func name
|
||||
|
||||
// AVSC_DECLARE_FUNC helps keeping naming convention: type is xxxxx_func, function name is xxxxx
|
||||
// e.g. "AVSC_DECLARE_FUNC(avs_add_function);"
|
||||
// is a shortcut for "avs_add_function_func avs_add_function;"
|
||||
|
||||
// Note: AVSC_INLINE functions which call into API,
|
||||
// are guarded by #ifndef AVSC_NO_DECLSPEC
|
||||
// They should call the appropriate library-> API entry
|
||||
|
||||
struct AVS_Library {
|
||||
HMODULE handle;
|
||||
|
||||
@ -898,28 +1004,11 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_is_rgb48);
|
||||
AVSC_DECLARE_FUNC(avs_is_rgb64);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv24);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv12);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv411);
|
||||
AVSC_DECLARE_FUNC(avs_is_y8);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv444p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv422p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv420p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_y16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv444ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv422ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv420ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_y32);
|
||||
AVSC_DECLARE_FUNC(avs_is_444);
|
||||
AVSC_DECLARE_FUNC(avs_is_422);
|
||||
AVSC_DECLARE_FUNC(avs_is_420);
|
||||
AVSC_DECLARE_FUNC(avs_is_y);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuva);
|
||||
AVSC_DECLARE_FUNC(avs_is_planar_rgb);
|
||||
AVSC_DECLARE_FUNC(avs_is_planar_rgba);
|
||||
AVSC_DECLARE_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling);
|
||||
@ -935,14 +1024,73 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_is_writable);
|
||||
AVSC_DECLARE_FUNC(avs_get_write_ptr_p);
|
||||
|
||||
// Avisynth+ specific
|
||||
// Note: these functions are simulated/use fallback to existing functions
|
||||
AVSC_DECLARE_FUNC(avs_is_rgb48);
|
||||
AVSC_DECLARE_FUNC(avs_is_rgb64);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv444p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv422p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv420p16);
|
||||
AVSC_DECLARE_FUNC(avs_is_y16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv444ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv422ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuv420ps);
|
||||
AVSC_DECLARE_FUNC(avs_is_y32);
|
||||
AVSC_DECLARE_FUNC(avs_is_444);
|
||||
AVSC_DECLARE_FUNC(avs_is_422);
|
||||
AVSC_DECLARE_FUNC(avs_is_420);
|
||||
AVSC_DECLARE_FUNC(avs_is_y);
|
||||
AVSC_DECLARE_FUNC(avs_is_yuva);
|
||||
AVSC_DECLARE_FUNC(avs_is_planar_rgb);
|
||||
AVSC_DECLARE_FUNC(avs_is_planar_rgba);
|
||||
AVSC_DECLARE_FUNC(avs_num_components);
|
||||
AVSC_DECLARE_FUNC(avs_component_size);
|
||||
AVSC_DECLARE_FUNC(avs_bits_per_component);
|
||||
// end of Avisynth+ specific
|
||||
|
||||
};
|
||||
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
|
||||
// Helper functions for fallback simulation
|
||||
// Avisynth+ extensions do not exist in classic Avisynth so they are simulated
|
||||
AVSC_INLINE int avs_is_xx_fallback_return_false(const AVS_VideoInfo * p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Avisynth+ extensions do not exist in classic Avisynth so they are simulated
|
||||
AVSC_INLINE int avs_num_components_fallback(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_UNKNOWN:
|
||||
return 0;
|
||||
case AVS_CS_RAW32:
|
||||
case AVS_CS_Y8:
|
||||
return 1;
|
||||
case AVS_CS_BGR32:
|
||||
return 4; // not planar but return the count
|
||||
default:
|
||||
return 3;
|
||||
}
|
||||
}
|
||||
|
||||
// Avisynth+ extensions do not exist in classic Avisynth so they are simulated
|
||||
AVSC_INLINE int avs_component_size_fallback(const AVS_VideoInfo * p)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Avisynth+ extensions do not exist in classic Avisynth so they are simulated
|
||||
AVSC_INLINE int avs_bits_per_component_fallback(const AVS_VideoInfo * p)
|
||||
{
|
||||
return 8;
|
||||
}
|
||||
// End of helper functions for fallback simulation
|
||||
|
||||
// avs_load_library() allocates an array for API procedure entries
|
||||
// reads and fills the entries with live procedure addresses.
|
||||
// AVSC_INLINE helpers which are calling into API procedures are not treated here (todo)
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
@ -960,6 +1108,55 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
goto fail;\
|
||||
}
|
||||
|
||||
#if 0
|
||||
// FFmpeg-specific: we don't use the FALLBACK stuff, and it causes build errors,
|
||||
// so ifdef it out on our side.
|
||||
|
||||
// When an API function is not loadable, let's try a replacement
|
||||
// Missing Avisynth+ functions will be substituted with classic Avisynth compatible methods
|
||||
/*
|
||||
Avisynth+ When method is missing (classic Avisynth)
|
||||
avs_is_rgb48 constant false
|
||||
avs_is_rgb64 constant false
|
||||
avs_is_yuv444p16 constant false
|
||||
avs_is_yuv422p16 constant false
|
||||
avs_is_yuv420p16 constant false
|
||||
avs_is_y16 constant false
|
||||
avs_is_yuv444ps constant false
|
||||
avs_is_yuv422ps constant false
|
||||
avs_is_yuv420ps constant false
|
||||
avs_is_y32 constant false
|
||||
avs_is_444 avs_is_yv24
|
||||
avs_is_422 avs_is_yv16
|
||||
avs_is_420 avs_is_yv12
|
||||
avs_is_y avs_is_y8
|
||||
avs_is_yuva constant false
|
||||
avs_is_planar_rgb constant false
|
||||
avs_is_planar_rgba constant false
|
||||
avs_num_components special: avs_num_components_fake Y8:1 RGB32:4 else 3
|
||||
avs_component_size constant 1 (1 bytes/component)
|
||||
avs_bits_per_component constant 8 (8 bits/component)
|
||||
*/
|
||||
|
||||
// try to load an alternative function
|
||||
#define AVSC_LOAD_FUNC_FALLBACK(name,name2) {\
|
||||
library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name));\
|
||||
if (library->name == NULL)\
|
||||
library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name2));\
|
||||
if (library->name == NULL)\
|
||||
goto fail;\
|
||||
}
|
||||
|
||||
// try to assign a replacement function
|
||||
#define AVSC_LOAD_FUNC_FALLBACK_SIMULATED(name,name2) {\
|
||||
library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name));\
|
||||
if (library->name == NULL)\
|
||||
library->name = name2;\
|
||||
if (library->name == NULL)\
|
||||
goto fail;\
|
||||
}
|
||||
#endif
|
||||
|
||||
AVSC_LOAD_FUNC(avs_add_function);
|
||||
AVSC_LOAD_FUNC(avs_at_exit);
|
||||
AVSC_LOAD_FUNC(avs_bit_blt);
|
||||
@ -999,28 +1196,11 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_is_rgb48);
|
||||
AVSC_LOAD_FUNC(avs_is_rgb64);
|
||||
AVSC_LOAD_FUNC(avs_is_yv24);
|
||||
AVSC_LOAD_FUNC(avs_is_yv16);
|
||||
AVSC_LOAD_FUNC(avs_is_yv12);
|
||||
AVSC_LOAD_FUNC(avs_is_yv411);
|
||||
AVSC_LOAD_FUNC(avs_is_y8);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv444p16);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv422p16);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv420p16);
|
||||
AVSC_LOAD_FUNC(avs_is_y16);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv444ps);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv422ps);
|
||||
AVSC_LOAD_FUNC(avs_is_yuv420ps);
|
||||
AVSC_LOAD_FUNC(avs_is_y32);
|
||||
AVSC_LOAD_FUNC(avs_is_444);
|
||||
AVSC_LOAD_FUNC(avs_is_422);
|
||||
AVSC_LOAD_FUNC(avs_is_420);
|
||||
AVSC_LOAD_FUNC(avs_is_y);
|
||||
AVSC_LOAD_FUNC(avs_is_yuva);
|
||||
AVSC_LOAD_FUNC(avs_is_planar_rgb);
|
||||
AVSC_LOAD_FUNC(avs_is_planar_rgba);
|
||||
AVSC_LOAD_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_plane_width_subsampling);
|
||||
@ -1036,15 +1216,35 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_is_writable);
|
||||
AVSC_LOAD_FUNC(avs_get_write_ptr_p);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_num_components);
|
||||
AVSC_LOAD_FUNC(avs_component_size);
|
||||
AVSC_LOAD_FUNC(avs_bits_per_component);
|
||||
|
||||
|
||||
#if 0
|
||||
// Avisynth+ specific but made them callable for classic Avisynth hosts
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_rgb48, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_rgb64, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv444p16, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv422p16, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv420p16, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_y16, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv444ps, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv422ps, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuv420ps, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_y32, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK(avs_is_444, avs_is_yv24);
|
||||
AVSC_LOAD_FUNC_FALLBACK(avs_is_422, avs_is_yv16);
|
||||
AVSC_LOAD_FUNC_FALLBACK(avs_is_420, avs_is_yv12);
|
||||
AVSC_LOAD_FUNC_FALLBACK(avs_is_y, avs_is_y8);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_yuva, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_planar_rgb, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_is_planar_rgba, avs_is_xx_fallback_return_false);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_num_components, avs_num_components_fallback);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_component_size, avs_component_size_fallback);
|
||||
AVSC_LOAD_FUNC_FALLBACK_SIMULATED(avs_bits_per_component, avs_bits_per_component_fallback);
|
||||
#endif
|
||||
|
||||
#undef __AVSC_STRINGIFY
|
||||
#undef AVSC_STRINGIFY
|
||||
#undef AVSC_LOAD_FUNC
|
||||
#undef AVSC_LOAD_FUNC_FALLBACK
|
||||
#undef AVSC_LOAD_FUNC_FALLBACK_SIMULATED
|
||||
|
||||
return library;
|
||||
|
||||
|
@ -39,17 +39,49 @@
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# if defined(GCC) && defined(X86_32)
|
||||
# define AVSC_CC
|
||||
# else // MSVC builds and 64-bit GCC
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# endif
|
||||
#else // needed for programs that talk to AviSynth+
|
||||
# ifndef AVSC_WIN32_GCC32 // see comment below
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# else
|
||||
# define AVSC_CC
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// On 64-bit Windows, there's only one calling convention,
|
||||
// so there is no difference between MSVC and GCC. On 32-bit,
|
||||
// this isn't true. The convention that GCC needs to use to
|
||||
// even build AviSynth+ as 32-bit makes anything that uses
|
||||
// it incompatible with 32-bit MSVC builds of AviSynth+.
|
||||
// The AVSC_WIN32_GCC32 define is meant to provide a user
|
||||
// switchable way to make builds of FFmpeg to test 32-bit
|
||||
// GCC builds of AviSynth+ without having to screw around
|
||||
// with alternate headers, while still default to the usual
|
||||
// situation of using 32-bit MSVC builds of AviSynth+.
|
||||
|
||||
// Hopefully, this situation will eventually be resolved
|
||||
// and a broadly compatible solution will arise so the
|
||||
// same 32-bit FFmpeg build can handle either MSVC or GCC
|
||||
// builds of AviSynth+.
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
# define AVSC_EXPORT __declspec(dllexport)
|
||||
# define AVSC_API(ret, name) EXTERN_C AVSC_EXPORT ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
|
@ -42,7 +42,7 @@
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 32
|
||||
#define FRAME_ALIGN 64
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
@ -52,4 +52,19 @@
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define MSVC
|
||||
#elif defined(__GNUC__)
|
||||
# define GCC
|
||||
#elif defined(__clang__)
|
||||
# define CLANG
|
||||
#else
|
||||
# error Unsupported compiler.
|
||||
#endif
|
||||
|
||||
#if defined(GCC)
|
||||
# undef __forceinline
|
||||
# define __forceinline inline
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
||||
|
@ -35,6 +35,12 @@
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <cstddef>
|
||||
#else
|
||||
#include <stddef.h>
|
||||
#endif
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
131
compat/cuda/cuda_runtime.h
Normal file
131
compat/cuda/cuda_runtime.h
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Minimum CUDA compatibility definitions header
|
||||
*
|
||||
* Copyright (c) 2019 Rodger Combs
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
#define COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
|
||||
// Common macros
|
||||
#define __global__ __attribute__((global))
|
||||
#define __device__ __attribute__((device))
|
||||
#define __device_builtin__ __attribute__((device_builtin))
|
||||
#define __align__(N) __attribute__((aligned(N)))
|
||||
#define __inline__ __inline__ __attribute__((always_inline))
|
||||
|
||||
#define max(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define abs(x) ((x) < 0 ? -(x) : (x))
|
||||
|
||||
#define atomicAdd(a, b) (__atomic_fetch_add(a, b, __ATOMIC_SEQ_CST))
|
||||
|
||||
// Basic typedefs
|
||||
typedef __device_builtin__ unsigned long long cudaTextureObject_t;
|
||||
|
||||
typedef struct __device_builtin__ __align__(2) uchar2
|
||||
{
|
||||
unsigned char x, y;
|
||||
} uchar2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) ushort2
|
||||
{
|
||||
unsigned short x, y;
|
||||
} ushort2;
|
||||
|
||||
typedef struct __device_builtin__ uint3
|
||||
{
|
||||
unsigned int x, y, z;
|
||||
} uint3;
|
||||
|
||||
typedef struct uint3 dim3;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) int2
|
||||
{
|
||||
int x, y;
|
||||
} int2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) uchar4
|
||||
{
|
||||
unsigned char x, y, z, w;
|
||||
} uchar4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) ushort4
|
||||
{
|
||||
unsigned char x, y, z, w;
|
||||
} ushort4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) int4
|
||||
{
|
||||
int x, y, z, w;
|
||||
} int4;
|
||||
|
||||
// Accessors for special registers
|
||||
#define GETCOMP(reg, comp) \
|
||||
asm("mov.u32 %0, %%" #reg "." #comp ";" : "=r"(tmp)); \
|
||||
ret.comp = tmp;
|
||||
|
||||
#define GET(name, reg) static inline __device__ uint3 name() {\
|
||||
uint3 ret; \
|
||||
unsigned tmp; \
|
||||
GETCOMP(reg, x) \
|
||||
GETCOMP(reg, y) \
|
||||
GETCOMP(reg, z) \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
GET(getBlockIdx, ctaid)
|
||||
GET(getBlockDim, ntid)
|
||||
GET(getThreadIdx, tid)
|
||||
|
||||
// Instead of externs for these registers, we turn access to them into calls into trivial ASM
|
||||
#define blockIdx (getBlockIdx())
|
||||
#define blockDim (getBlockDim())
|
||||
#define threadIdx (getThreadIdx())
|
||||
|
||||
// Basic initializers (simple macros rather than inline functions)
|
||||
#define make_uchar2(a, b) ((uchar2){.x = a, .y = b})
|
||||
#define make_ushort2(a, b) ((ushort2){.x = a, .y = b})
|
||||
#define make_uchar4(a, b, c, d) ((uchar4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_ushort4(a, b, c, d) ((ushort4){.x = a, .y = b, .z = c, .w = d})
|
||||
|
||||
// Conversions from the tex instruction's 4-register output to various types
|
||||
#define TEX2D(type, ret) static inline __device__ void conv(type* out, unsigned a, unsigned b, unsigned c, unsigned d) {*out = (ret);}
|
||||
|
||||
TEX2D(unsigned char, a & 0xFF)
|
||||
TEX2D(unsigned short, a & 0xFFFF)
|
||||
TEX2D(uchar2, make_uchar2(a & 0xFF, b & 0xFF))
|
||||
TEX2D(ushort2, make_ushort2(a & 0xFFFF, b & 0xFFFF))
|
||||
TEX2D(uchar4, make_uchar4(a & 0xFF, b & 0xFF, c & 0xFF, d & 0xFF))
|
||||
TEX2D(ushort4, make_ushort4(a & 0xFFFF, b & 0xFFFF, c & 0xFFFF, d & 0xFFFF))
|
||||
|
||||
// Template calling tex instruction and converting the output to the selected type
|
||||
template <class T>
|
||||
static inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
T ret;
|
||||
unsigned ret1, ret2, ret3, ret4;
|
||||
asm("tex.2d.v4.u32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret1), "=r"(ret2), "=r"(ret3), "=r"(ret4) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
conv(&ret, ret1, ret2, ret3, ret4);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
@ -16,8 +16,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#ifndef COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "compat/w32dlfcn.h"
|
||||
@ -30,4 +30,4 @@
|
||||
|
||||
#include <ffnvcodec/dynlink_loader.h>
|
||||
|
||||
#endif
|
||||
#endif /* COMPAT_CUDA_DYNLINK_LOADER_H */
|
||||
|
@ -27,7 +27,7 @@ IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while read LINE
|
||||
while IFS= read -r LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
|
47
compat/djgpp/math.c
Normal file
47
compat/djgpp/math.c
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#define FUN(name, type, op) \
|
||||
type name(type x, type y) \
|
||||
{ \
|
||||
if (fpclassify(x) == FP_NAN) return y; \
|
||||
if (fpclassify(y) == FP_NAN) return x; \
|
||||
return x op y ? x : y; \
|
||||
}
|
||||
|
||||
FUN(fmin, double, <)
|
||||
FUN(fmax, double, >)
|
||||
FUN(fminf, float, <)
|
||||
FUN(fmaxf, float, >)
|
||||
|
||||
long double fmodl(long double x, long double y)
|
||||
{
|
||||
return fmod(x, y);
|
||||
}
|
||||
|
||||
long double scalbnl(long double x, int exp)
|
||||
{
|
||||
return scalbn(x, exp);
|
||||
}
|
||||
|
||||
long double copysignl(long double x, long double y)
|
||||
{
|
||||
return copysign(x, y);
|
||||
}
|
25
compat/djgpp/math.h
Normal file
25
compat/djgpp/math.h
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
double fmin(double, double);
|
||||
double fmax(double, double);
|
||||
float fminf(float, float);
|
||||
float fmaxf(float, float);
|
||||
long double fmodl(long double, long double);
|
||||
long double scalbnl(long double, int);
|
||||
long double copysignl(long double, long double);
|
@ -48,7 +48,7 @@ trap 'rm -f -- $libname' EXIT
|
||||
if [ -n "$AR" ]; then
|
||||
$AR rcs ${libname} $@ >/dev/null
|
||||
else
|
||||
lib -out:${libname} $@ >/dev/null
|
||||
lib.exe -out:${libname} $@ >/dev/null
|
||||
fi
|
||||
if [ $? != 0 ]; then
|
||||
echo "Could not create temporary library." >&2
|
||||
@ -108,7 +108,7 @@ if [ -n "$NM" ]; then
|
||||
cut -d' ' -f3 |
|
||||
sed -e "s/^${prefix}//")
|
||||
else
|
||||
dump=$(dumpbin -linkermember:1 ${libname} |
|
||||
dump=$(dumpbin.exe -linkermember:1 ${libname} |
|
||||
sed -e '/public symbols/,$!d' -e '/^ \{1,\}Summary/,$d' -e "s/ \{1,\}${prefix}/ /" -e 's/ \{1,\}/ /g' |
|
||||
tail -n +2 |
|
||||
cut -d' ' -f3)
|
||||
|
@ -4,6 +4,6 @@ LINK_EXE_PATH=$(dirname "$(command -v cl)")/link
|
||||
if [ -x "$LINK_EXE_PATH" ]; then
|
||||
"$LINK_EXE_PATH" $@
|
||||
else
|
||||
link $@
|
||||
link.exe $@
|
||||
fi
|
||||
exit $?
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
jellyfin-ffmpeg (4.2.1-1) unstable; urgency=medium
|
||||
|
||||
* New upstream version 4.2.1
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Fri, 27 Sep 2019 20:13:25 -0400
|
||||
|
||||
jellyfin-ffmpeg (4.0.4-3) unstable; urgency=medium
|
||||
|
||||
* Use libfontconfig to allow baked-in subtitle support
|
||||
|
@ -15,6 +15,69 @@ libavutil: 2017-10-21
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 4.2 was cut here -------- 8< ---------
|
||||
|
||||
2019-06-21 - a30e44098a - lavu 56.30.100 - frame.h
|
||||
Add FF_DECODE_ERROR_DECODE_SLICES
|
||||
|
||||
2019-06-14 - edfced8c04 - lavu 56.29.100 - frame.h
|
||||
Add FF_DECODE_ERROR_CONCEALMENT_ACTIVE
|
||||
|
||||
2019-05-15 - b79b29ddb1 - lavu 56.28.100 - tx.h
|
||||
Add av_tx_init(), av_tx_uninit() and related definitions.
|
||||
|
||||
2019-04-20 - 3153a6502a - lavc 58.52.100 - avcodec.h
|
||||
Add AV_CODEC_FLAG_DROPCHANGED to allow avcodec_receive_frame to drop
|
||||
frames whose parameters differ from first decoded frame in stream.
|
||||
|
||||
2019-04-12 - abfeba9724 - lavf 58.27.102
|
||||
Rename hls,applehttp demuxer to hls
|
||||
|
||||
2019-01-27 - 5bcefceec8 - lavc 58.46.100 - avcodec.h
|
||||
Add discard_damaged_percentage
|
||||
|
||||
2019-01-08 - 1ef4828276 - lavu 56.26.100 - frame.h
|
||||
Add AV_FRAME_DATA_REGIONS_OF_INTEREST
|
||||
|
||||
2018-12-21 - 2744d6b364 - lavu 56.25.100 - hdr_dynamic_metadata.h
|
||||
Add AV_FRAME_DATA_DYNAMIC_HDR_PLUS enum value, av_dynamic_hdr_plus_alloc(),
|
||||
av_dynamic_hdr_plus_create_side_data() functions, and related structs.
|
||||
|
||||
-------- 8< --------- FFmpeg 4.1 was cut here -------- 8< ---------
|
||||
|
||||
2018-10-27 - 718044dc19 - lavu 56.21.100 - pixdesc.h
|
||||
Add av_read_image_line2(), av_write_image_line2()
|
||||
|
||||
2018-10-24 - f9d4126f28 - lavu 56.20.100 - frame.h
|
||||
Add AV_FRAME_DATA_S12M_TIMECODE
|
||||
|
||||
2018-10-11 - f6d48b618a - lavc 58.33.100 - mediacodec.h
|
||||
Add av_mediacodec_render_buffer_at_time().
|
||||
|
||||
2018-09-09 - 35498c124a - lavc 58.29.100 - avcodec.h
|
||||
Add AV_PKT_DATA_AFD
|
||||
|
||||
2018-08-16 - b33f5299a5 - lavc 58.23.100 - avcodec.h
|
||||
Add av_bsf_flush().
|
||||
|
||||
2018-05-18 - 2b2f2f65f3 - lavf 58.15.100 - avformat.h
|
||||
Add pmt_version field to AVProgram
|
||||
|
||||
2018-05-17 - 5dfeb7f081 - lavf 58.14.100 - avformat.h
|
||||
Add AV_DISPOSITION_STILL_IMAGE
|
||||
|
||||
2018-05-10 - c855683427 - lavu 56.18.101 - hwcontext_cuda.h
|
||||
Add AVCUDADeviceContext.stream.
|
||||
|
||||
2018-04-30 - 56b081da57 - lavu 56.18.100 - pixdesc.h
|
||||
Add AV_PIX_FMT_FLAG_ALPHA to AV_PIX_FMT_PAL8.
|
||||
|
||||
2018-04-26 - 5be0410cb3 - lavu 56.17.100 - opt.h
|
||||
Add AV_OPT_FLAG_DEPRECATED.
|
||||
|
||||
2018-04-26 - 71fa82bed6 - lavu 56.16.100 - threadmessage.h
|
||||
Add av_thread_message_queue_nb_elems().
|
||||
|
||||
-------- 8< --------- FFmpeg 4.0 was cut here -------- 8< ---------
|
||||
|
||||
2018-04-03 - d6fc031caf - lavu 56.13.100 - pixdesc.h
|
||||
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 4.0.4
|
||||
PROJECT_NUMBER = 4.2.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
@ -37,6 +37,61 @@ raw ADTS AAC or an MPEG-TS container to MP4A-LATM, to an FLV file, or
|
||||
to MOV/MP4 files and related formats such as 3GP or M4A. Please note
|
||||
that it is auto-inserted for MP4A-LATM and MOV/MP4 and related formats.
|
||||
|
||||
@section av1_metadata
|
||||
|
||||
Modify metadata embedded in an AV1 stream.
|
||||
|
||||
@table @option
|
||||
@item td
|
||||
Insert or remove temporal delimiter OBUs in all temporal units of the
|
||||
stream.
|
||||
|
||||
@table @samp
|
||||
@item insert
|
||||
Insert a TD at the beginning of every TU which does not already have one.
|
||||
@item remove
|
||||
Remove the TD from the beginning of every TU which has one.
|
||||
@end table
|
||||
|
||||
@item color_primaries
|
||||
@item transfer_characteristics
|
||||
@item matrix_coefficients
|
||||
Set the color description fields in the stream (see AV1 section 6.4.2).
|
||||
|
||||
@item color_range
|
||||
Set the color range in the stream (see AV1 section 6.4.2; note that
|
||||
this cannot be set for streams using BT.709 primaries, sRGB transfer
|
||||
characteristic and identity (RGB) matrix coefficients).
|
||||
@table @samp
|
||||
@item tv
|
||||
Limited range.
|
||||
@item pc
|
||||
Full range.
|
||||
@end table
|
||||
|
||||
@item chroma_sample_position
|
||||
Set the chroma sample location in the stream (see AV1 section 6.4.2).
|
||||
This can only be set for 4:2:0 streams.
|
||||
|
||||
@table @samp
|
||||
@item vertical
|
||||
Left position (matching the default in MPEG-2 and H.264).
|
||||
@item colocated
|
||||
Top-left position.
|
||||
@end table
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (@emph{num_units_in_display_tick / time_scale}) in
|
||||
the timing info in the sequence header.
|
||||
@item num_ticks_per_picture
|
||||
Set the number of ticks in each picture, to indicate that the stream
|
||||
has a fixed framerate. Ignored if @option{tick_rate} is not also set.
|
||||
|
||||
@item delete_padding
|
||||
Deletes Padding OBUs.
|
||||
|
||||
@end table
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
@ -48,7 +103,9 @@ DTS-HD.
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
Add extradata to the beginning of the filtered packets except when
|
||||
said packets already exactly begin with the extradata that is intended
|
||||
to be added.
|
||||
|
||||
@table @option
|
||||
@item freq
|
||||
@ -65,7 +122,7 @@ add extradata to all packets
|
||||
@end table
|
||||
@end table
|
||||
|
||||
If not specified it is assumed @samp{e}.
|
||||
If not specified it is assumed @samp{k}.
|
||||
|
||||
For example the following @command{ffmpeg} command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
@ -215,6 +272,15 @@ insert the string ``hello'' associated with the given UUID.
|
||||
@item delete_filler
|
||||
Deletes both filler NAL units and filler SEI messages.
|
||||
|
||||
@item level
|
||||
Set the level in the SPS. Refer to H.264 section A.3 and tables A-1
|
||||
to A-5.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{4.2}), a
|
||||
level_idc value (for example, @samp{42}), or the special name @samp{auto}
|
||||
indicating that the filter should attempt to guess the level from the
|
||||
input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section h264_mp4toannexb
|
||||
@ -297,6 +363,15 @@ will replace the current ones if the stream is already cropped.
|
||||
These fields are set in pixels. Note that some sizes may not be
|
||||
representable if the chroma is subsampled (H.265 section 7.4.3.2.1).
|
||||
|
||||
@item level
|
||||
Set the level in the VPS and SPS. See H.265 section A.4 and tables
|
||||
A.6 and A.7.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{5.1}), a
|
||||
@emph{general_level_idc} value (for example, @samp{153} for level 5.1),
|
||||
or the special name @samp{auto} indicating that the filter should
|
||||
attempt to guess the level from the input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section hevc_mp4toannexb
|
||||
@ -469,6 +544,72 @@ ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
|
||||
@table @option
|
||||
@item color_primaries
|
||||
Set the color primaries.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same color primaries property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item bt470bg
|
||||
BT601 625
|
||||
|
||||
@item smpte170m
|
||||
BT601 525
|
||||
|
||||
@item bt2020
|
||||
@item smpte431
|
||||
DCI P3
|
||||
|
||||
@item smpte432
|
||||
P3 D65
|
||||
|
||||
@end table
|
||||
|
||||
@item transfer_characteristics
|
||||
Set the color transfer.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same transfer characteristics property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
BT 601, BT 709, BT 2020
|
||||
@end table
|
||||
|
||||
|
||||
@item matrix_coefficients
|
||||
Set the matrix coefficient.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same transfer characteristics property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item smpte170m
|
||||
BT 601
|
||||
|
||||
@item bt2020nc
|
||||
@end table
|
||||
@end table
|
||||
|
||||
Set Rec709 colorspace for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
Remove extradata from packets.
|
||||
@ -505,7 +646,38 @@ Log trace output containing all syntax elements in the coded stream
|
||||
headers (everything above the level of individual coded blocks).
|
||||
This can be useful for debugging low-level stream issues.
|
||||
|
||||
Supports H.264, H.265 and MPEG-2.
|
||||
Supports AV1, H.264, H.265, (M)JPEG, MPEG-2 and VP9, but depending
|
||||
on the build only a subset of these may be available.
|
||||
|
||||
@section truehd_core
|
||||
|
||||
Extract the core from a TrueHD stream, dropping ATMOS data.
|
||||
|
||||
@section vp9_metadata
|
||||
|
||||
Modify metadata embedded in a VP9 stream.
|
||||
|
||||
@table @option
|
||||
@item color_space
|
||||
Set the color space value in the frame header.
|
||||
@table @samp
|
||||
@item unknown
|
||||
@item bt601
|
||||
@item bt709
|
||||
@item smpte170
|
||||
@item smpte240
|
||||
@item bt2020
|
||||
@item rgb
|
||||
@end table
|
||||
|
||||
@item color_range
|
||||
Set the color range value in the frame header. Note that this cannot
|
||||
be set in RGB streams.
|
||||
@table @samp
|
||||
@item tv
|
||||
@item pc
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section vp9_superframe
|
||||
|
||||
|
@ -36,11 +36,11 @@ install
|
||||
examples
|
||||
Build all examples located in doc/examples.
|
||||
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
checkheaders
|
||||
Check headers dependencies.
|
||||
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as an example).
|
||||
alltools
|
||||
Build all tools in tools directory.
|
||||
|
||||
config
|
||||
Reconfigure the project with the current configuration.
|
||||
|
@ -55,6 +55,9 @@ Do not draw edges.
|
||||
@item psnr
|
||||
Set error[?] variables during encoding.
|
||||
@item truncated
|
||||
@item drop_changed
|
||||
Don't output frames whose parameters differ from first decoded frame in stream.
|
||||
Error AVERROR_INPUT_CHANGED is returned when a frame is dropped.
|
||||
|
||||
@item ildct
|
||||
Use interlaced DCT.
|
||||
@ -775,8 +778,6 @@ Place global headers at every keyframe instead of in extradata.
|
||||
Frame data might be split into multiple chunks.
|
||||
@item showall
|
||||
Show all frames before the first keyframe.
|
||||
@item skiprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@ -962,6 +963,9 @@ Discard all bidirectional frames.
|
||||
@item nokey
|
||||
Discard all frames excepts keyframes.
|
||||
|
||||
@item nointra
|
||||
Discard all frames except I frames.
|
||||
|
||||
@item all
|
||||
Discard all frames.
|
||||
@end table
|
||||
@ -986,10 +990,6 @@ Set chroma qp offset from luma.
|
||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||
Set rate-distortion optimal quantization.
|
||||
|
||||
@item sc_factor @var{integer} (@emph{encoding,video})
|
||||
Set value multiplied by qscale for each frame and added to
|
||||
scene_change_score.
|
||||
|
||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||
Adjust sensitivity of b_frame_strategy 1.
|
||||
@ -1236,7 +1236,7 @@ instead of alpha. Default is 0.
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
|
@ -47,6 +47,38 @@ top-field-first is assumed
|
||||
|
||||
@end table
|
||||
|
||||
@section libdav1d
|
||||
|
||||
dav1d AV1 decoder.
|
||||
|
||||
libdav1d allows libavcodec to decode the AOMedia Video 1 (AV1) codec.
|
||||
Requires the presence of the libdav1d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libdav1d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libdav1d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item framethreads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@item tilethreads
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. The default value
|
||||
is true.
|
||||
|
||||
@end table
|
||||
|
||||
@section libdavs2
|
||||
|
||||
AVS2-P2/IEEE1857.4 video decoder wrapper.
|
||||
|
||||
This decoder allows libavcodec to decode AVS2 streams with davs2 library.
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@ -188,6 +220,31 @@ without this library.
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section libaribb24
|
||||
|
||||
ARIB STD-B24 caption decoder.
|
||||
|
||||
Implements profiles A and C of the ARIB STD-B24 standard.
|
||||
|
||||
@subsection libaribb24 Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -aribb24-base-path @var{path}
|
||||
Sets the base path for the libaribb24 library. This is utilized for reading of
|
||||
configuration files (for custom unicode conversions), and for dumping of
|
||||
non-text symbols as images under that location.
|
||||
|
||||
Unset by default.
|
||||
|
||||
@item -aribb24-skip-ruby-text @var{boolean}
|
||||
Tells the decoder wrapper to skip text blocks that contain half-height ruby
|
||||
text.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
@end table
|
||||
|
||||
@section dvbsub
|
||||
|
||||
@subsection Options
|
||||
@ -248,18 +305,25 @@ configuration. You need to explicitly configure the build with
|
||||
|
||||
@table @option
|
||||
@item txt_page
|
||||
List of teletext page numbers to decode. You may use the special * string to
|
||||
match all pages. Pages that do not match the specified list are dropped.
|
||||
List of teletext page numbers to decode. Pages that do not match the specified
|
||||
list are dropped. You may use the special @code{*} string to match all pages,
|
||||
or @code{subtitle} to match all subtitle pages.
|
||||
Default value is *.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
Specifies the format of the decoded subtitles. The teletext decoder is capable
|
||||
of decoding the teletext pages to bitmaps or to simple text, you should use
|
||||
"bitmap" for teletext pages, because certain graphics and colors cannot be
|
||||
expressed in simple text. You might use "text" for teletext based subtitles if
|
||||
your application can handle simple text based subtitles. Default value is
|
||||
bitmap.
|
||||
Specifies the format of the decoded subtitles.
|
||||
@table @option
|
||||
@item bitmap
|
||||
The default format, you should use this for teletext pages, because certain
|
||||
graphics and colors cannot be expressed in simple text or even ASS.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@item ass
|
||||
Formatted ASS output, subtitle pages and teletext pages are returned in
|
||||
different styles, subtitle pages are stripped down to text, but an effort is
|
||||
made to keep the text alignment and the formatting.
|
||||
@end table
|
||||
@item txt_left
|
||||
X offset of generated bitmaps, default is 0.
|
||||
@item txt_top
|
||||
@ -272,7 +336,8 @@ present between the subtitle lines because of double-sized teletext characters.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
milliseconds. Default value is 30000 which is 30 seconds.
|
||||
milliseconds. Default value is -1 which means infinity or until the next
|
||||
subtitle event comes.
|
||||
@item txt_transparent
|
||||
Force transparent background of the generated teletext bitmaps. Default value
|
||||
is 0 which means an opaque background.
|
||||
|
@ -25,17 +25,6 @@ Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section apng
|
||||
|
||||
Animated Portable Network Graphics demuxer.
|
||||
@ -269,6 +258,12 @@ ffmpeg -f live_flv -i rtmp://<any.server>/anything/key ....
|
||||
@table @option
|
||||
@item -flv_metadata @var{bool}
|
||||
Allocate the streams according to the onMetaData array content.
|
||||
|
||||
@item -flv_ignore_prevtag @var{bool}
|
||||
Ignore the size of previous tag value.
|
||||
|
||||
@item -flv_full_metadata @var{bool}
|
||||
Output all context of the onMetadata.
|
||||
@end table
|
||||
|
||||
@section gif
|
||||
@ -314,6 +309,15 @@ infinitely.
|
||||
|
||||
HLS demuxer
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@ -475,14 +479,84 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
See @url{https://bitbucket.org/mpyne/game-music-emu/overview} for more information.
|
||||
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
It accepts the following options:
|
||||
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
@table @option
|
||||
|
||||
@item track_index
|
||||
Set the index of which track to demux. The demuxer can only export one track.
|
||||
Track indexes start at 0. Default is to pick the first track. Number of tracks
|
||||
is exported as @var{tracks} metadata entry.
|
||||
|
||||
@item sample_rate
|
||||
Set the sampling rate of the exported track. Range is 1000 to 999999. Default is 44100.
|
||||
|
||||
@item max_size @emph{(bytes)}
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read.
|
||||
Default is 50 MiB.
|
||||
|
||||
@end table
|
||||
|
||||
@section libmodplug
|
||||
|
||||
ModPlug based module demuxer
|
||||
|
||||
See @url{https://github.com/Konstanty/libmodplug}
|
||||
|
||||
It will export one 2-channel 16-bit 44.1 kHz audio stream.
|
||||
Optionally, a @code{pal8} 16-color video stream can be exported with or without printed metadata.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item noise_reduction
|
||||
Apply a simple low-pass filter. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item reverb_depth
|
||||
Set amount of reverb. Range 0-100. Default is 0.
|
||||
|
||||
@item reverb_delay
|
||||
Set delay in ms, clamped to 40-250 ms. Default is 0.
|
||||
|
||||
@item bass_amount
|
||||
Apply bass expansion a.k.a. XBass or megabass. Range is 0 (quiet) to 100 (loud). Default is 0.
|
||||
|
||||
@item bass_range
|
||||
Set cutoff i.e. upper-bound for bass frequencies. Range is 10-100 Hz. Default is 0.
|
||||
|
||||
@item surround_depth
|
||||
Apply a Dolby Pro-Logic surround effect. Range is 0 (quiet) to 100 (heavy). Default is 0.
|
||||
|
||||
@item surround_delay
|
||||
Set surround delay in ms, clamped to 5-40 ms. Default is 0.
|
||||
|
||||
@item max_size
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read. Range is 0 to 100 MiB.
|
||||
0 removes buffer size limit (not recommended). Default is 5 MiB.
|
||||
|
||||
@item video_stream_expr
|
||||
String which is evaluated using the eval API to assign colors to the generated video stream.
|
||||
Variables which can be used are @code{x}, @code{y}, @code{w}, @code{h}, @code{t}, @code{speed},
|
||||
@code{tempo}, @code{order}, @code{pattern} and @code{row}.
|
||||
|
||||
@item video_stream
|
||||
Generate video stream. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item video_stream_w
|
||||
Set video frame width in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_h
|
||||
Set video frame height in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_ptxt
|
||||
Print metadata on video stream. Includes @code{speed}, @code{tempo}, @code{order}, @code{pattern},
|
||||
@code{row} and @code{ts} (time in ms). Can be 1 (on) or 0 (off). Default is 1.
|
||||
|
||||
@end table
|
||||
|
||||
@section libopenmpt
|
||||
|
||||
@ -538,6 +612,9 @@ This demuxer accepts the following options:
|
||||
Set size limit for looking up a new synchronization. Default value is
|
||||
65536.
|
||||
|
||||
@item skip_unknown_pmt
|
||||
Skip PMTs for programs not defined in the PAT. Default value is 0.
|
||||
|
||||
@item fix_teletext_pts
|
||||
Override teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
@ -552,6 +629,10 @@ Show the detected raw packet size, cannot be set by the user.
|
||||
Scan and combine all PMTs. The value is an integer with value from -1
|
||||
to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||
disabled). Default value is -1.
|
||||
|
||||
@item merge_pmt_versions
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
@end table
|
||||
|
||||
@section mpjpeg
|
||||
@ -649,4 +730,20 @@ Example: convert the captions to a format most players understand:
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@section vapoursynth
|
||||
|
||||
Vapoursynth wrapper.
|
||||
|
||||
Due to security concerns, Vapoursynth scripts will not
|
||||
be autodetected so the input format has to be forced. For ff* CLI tools,
|
||||
add @code{-f vapoursynth} before the input @code{-i yourscript.vpy}.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item max_script_size
|
||||
The demuxer buffers the entire script into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of scripts that can be read.
|
||||
Default is 1 MiB.
|
||||
@end table
|
||||
|
||||
@c man end DEMUXERS
|
||||
|
@ -128,6 +128,9 @@ designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
for loops with variable definition (@samp{for (int i = 0; i < 8; i++)});
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
|
@ -733,6 +733,14 @@ if set to 0.
|
||||
|
||||
Default value is 0.
|
||||
|
||||
@item eld_v2
|
||||
Enable ELDv2 (LD-MPS extension for ELD stereo signals) for ELDv2 if set to 1,
|
||||
disabled if set to 0.
|
||||
|
||||
Note that option is available when fdk-aac version (AACENCODER_LIB_VL0.AACENCODER_LIB_VL1.AACENCODER_LIB_VL2) > (4.0.0).
|
||||
|
||||
Default value is 0.
|
||||
|
||||
@item signaling
|
||||
Set SBR/PS signaling style.
|
||||
|
||||
@ -1370,6 +1378,181 @@ makes it possible to store non-rgb pix_fmts.
|
||||
|
||||
@end table
|
||||
|
||||
@section libaom-av1
|
||||
|
||||
libaom AV1 encoder wrapper.
|
||||
|
||||
Requires the presence of the libaom headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libaom}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The wrapper supports the following standard libavcodec options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set bitrate target in bits/second. By default this will use
|
||||
variable-bitrate mode. If @option{maxrate} and @option{minrate} are
|
||||
also set to the same value then it will use constant-bitrate mode,
|
||||
otherwise if @option{crf} is set as well then it will use
|
||||
constrained-quality mode.
|
||||
|
||||
@item g keyint_min
|
||||
Set key frame placement. The GOP size sets the maximum distance between
|
||||
key frames; if zero the output stream will be intra-only. The minimum
|
||||
distance is ignored unless it is the same as the GOP size, in which case
|
||||
key frames will always appear at a fixed interval. Not set by default,
|
||||
so without this option the library has completely free choice about
|
||||
where to place key frames.
|
||||
|
||||
@item qmin qmax
|
||||
Set minimum/maximum quantisation values. Valid range is from 0 to 63
|
||||
(warning: this does not match the quantiser values actually used by AV1
|
||||
- divide by four to map real quantiser values to this range). Defaults
|
||||
to min/max (no constraint).
|
||||
|
||||
@item minrate maxrate bufsize rc_init_occupancy
|
||||
Set rate control buffering parameters. Not used if not set - defaults
|
||||
to unconstrained variable bitrate.
|
||||
|
||||
@item threads
|
||||
Set the number of threads to use while encoding. This may require the
|
||||
@option{tiles} or @option{row-mt} options to also be set to actually
|
||||
use the specified number of threads fully. Defaults to the number of
|
||||
hardware threads supported by the host machine.
|
||||
|
||||
@item profile
|
||||
Set the encoding profile. Defaults to using the profile which matches
|
||||
the bit depth and chroma subsampling of the input.
|
||||
|
||||
@end table
|
||||
|
||||
The wrapper also has some specific options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item cpu-used
|
||||
Set the quality/encoding speed tradeoff. Valid range is from 0 to 8,
|
||||
higher numbers indicating greater speed and lower quality. The default
|
||||
value is 1, which will be slow and high quality.
|
||||
|
||||
@item auto-alt-ref
|
||||
Enable use of alternate reference frames. Defaults to the internal
|
||||
default of the library.
|
||||
|
||||
@item arnr-max-frames (@emph{frames})
|
||||
Set altref noise reduction max frame count. Default is -1.
|
||||
|
||||
@item arnr-strength (@emph{strength})
|
||||
Set altref noise reduction filter strength. Range is -1 to 6. Default is -1.
|
||||
|
||||
@item aq-mode (@emph{aq-mode})
|
||||
Set adaptive quantization mode. Possible values:
|
||||
|
||||
@table @samp
|
||||
@item none (@emph{0})
|
||||
Disabled.
|
||||
|
||||
@item variance (@emph{1})
|
||||
Variance-based.
|
||||
|
||||
@item complexity (@emph{2})
|
||||
Complexity-based.
|
||||
|
||||
@item cyclic (@emph{3})
|
||||
Cyclic refresh.
|
||||
@end table
|
||||
|
||||
@item lag-in-frames
|
||||
Set the maximum number of frames which the encoder may keep in flight
|
||||
at any one time for lookahead purposes. Defaults to the internal
|
||||
default of the library.
|
||||
|
||||
@item error-resilience
|
||||
Enable error resilience features:
|
||||
@table @option
|
||||
@item default
|
||||
Improve resilience against losses of whole frames.
|
||||
@end table
|
||||
Not enabled by default.
|
||||
|
||||
@item crf
|
||||
Set the quality/size tradeoff for constant-quality (no bitrate target)
|
||||
and constrained-quality (with maximum bitrate target) modes. Valid
|
||||
range is 0 to 63, higher numbers indicating lower quality and smaller
|
||||
output size. Only used if set; by default only the bitrate target is
|
||||
used.
|
||||
|
||||
@item static-thresh
|
||||
Set a change threshold on blocks below which they will be skipped by
|
||||
the encoder. Defined in arbitrary units as a nonnegative integer,
|
||||
defaulting to zero (no blocks are skipped).
|
||||
|
||||
@item drop-threshold
|
||||
Set a threshold for dropping frames when close to rate control bounds.
|
||||
Defined as a percentage of the target buffer - when the rate control
|
||||
buffer falls below this percentage, frames will be dropped until it
|
||||
has refilled above the threshold. Defaults to zero (no frames are
|
||||
dropped).
|
||||
|
||||
@item denoise-noise-level (@emph{level})
|
||||
Amount of noise to be removed for grain synthesis. Grain synthesis is disabled if
|
||||
this option is not set or set to 0.
|
||||
|
||||
@item denoise-block-size (@emph{pixels})
|
||||
Block size used for denoising for grain synthesis. If not set, AV1 codec
|
||||
uses the default value of 32.
|
||||
|
||||
@item undershoot-pct (@emph{pct})
|
||||
Set datarate undershoot (min) percentage of the target bitrate. Range is -1 to 100.
|
||||
Default is -1.
|
||||
|
||||
@item overshoot-pct (@emph{pct})
|
||||
Set datarate overshoot (max) percentage of the target bitrate. Range is -1 to 1000.
|
||||
Default is -1.
|
||||
|
||||
@item minsection-pct (@emph{pct})
|
||||
Minimum percentage variation of the GOP bitrate from the target bitrate. If minsection-pct
|
||||
is not set, the libaomenc wrapper computes it as follows: @code{(minrate * 100 / bitrate)}.
|
||||
Range is -1 to 100. Default is -1 (unset).
|
||||
|
||||
@item maxsection-pct (@emph{pct})
|
||||
Maximum percentage variation of the GOP bitrate from the target bitrate. If maxsection-pct
|
||||
is not set, the libaomenc wrapper computes it as follows: @code{(maxrate * 100 / bitrate)}.
|
||||
Range is -1 to 5000. Default is -1 (unset).
|
||||
|
||||
@item frame-parallel (@emph{boolean})
|
||||
Enable frame parallel decodability features. Default is true.
|
||||
|
||||
@item tiles
|
||||
Set the number of tiles to encode the input video with, as columns x
|
||||
rows. Larger numbers allow greater parallelism in both encoding and
|
||||
decoding, but may decrease coding efficiency. Defaults to the minimum
|
||||
number of tiles required by the size of the input video (this is 1x1
|
||||
(that is, a single tile) for sizes up to and including 4K).
|
||||
|
||||
@item tile-columns tile-rows
|
||||
Set the number of tiles as log2 of the number of tile rows and columns.
|
||||
Provided for compatibility with libvpx/VP9.
|
||||
|
||||
@item row-mt (Requires libaom >= 1.0.0-759-g90a15f4f2)
|
||||
Enable row based multi-threading. Disabled by default.
|
||||
|
||||
@item enable-cdef (@emph{boolean})
|
||||
Enable Constrained Directional Enhancement Filter. The libaom-av1
|
||||
encoder enables CDEF by default.
|
||||
|
||||
@item enable-global-motion (@emph{boolean})
|
||||
Enable the use of global motion for block prediction. Default is true.
|
||||
|
||||
@item enable-intrabc (@emph{boolean})
|
||||
Enable block copy mode for intra block prediction. This mode is
|
||||
useful for screen content. Default is true.
|
||||
|
||||
@end table
|
||||
|
||||
@section libkvazaar
|
||||
|
||||
Kvazaar H.265/HEVC encoder.
|
||||
@ -1641,7 +1824,8 @@ means unlimited.
|
||||
@table @option
|
||||
@item auto-alt-ref
|
||||
Enable use of alternate reference frames (2-pass only).
|
||||
@item arnr-max-frames
|
||||
Values greater than 1 enable multi-layer alternate reference frames (VP9 only).
|
||||
@item arnr-maxframes
|
||||
Set altref noise reduction max frame count.
|
||||
@item arnr-type
|
||||
Set altref noise reduction filter type: backward, forward, centered.
|
||||
@ -1654,6 +1838,38 @@ Set number of frames to look ahead for frametype and ratecontrol.
|
||||
@item error-resilient
|
||||
Enable error resiliency features.
|
||||
|
||||
@item sharpness @var{integer}
|
||||
Increase sharpness at the expense of lower PSNR.
|
||||
The valid range is [0, 7].
|
||||
|
||||
@item VP8-specific options
|
||||
@table @option
|
||||
@item ts-parameters
|
||||
Sets the temporal scalability configuration using a :-separated list of
|
||||
key=value pairs. For example, to specify temporal scalability parameters
|
||||
with @code{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\
|
||||
ts_target_bitrate=250000,500000,1000000:ts_rate_decimator=4,2,1:\
|
||||
ts_periodicity=4:ts_layer_id=0,2,1,2 OUTPUT
|
||||
@end example
|
||||
Below is a brief explanation of each of the parameters, please
|
||||
refer to @code{struct vpx_codec_enc_cfg} in @code{vpx/vpx_encoder.h} for more
|
||||
details.
|
||||
@table @option
|
||||
@item ts_number_layers
|
||||
Number of temporal coding layers.
|
||||
@item ts_target_bitrate
|
||||
Target bitrate for each temporal layer.
|
||||
@item ts_rate_decimator
|
||||
Frame rate decimation factor for each temporal layer.
|
||||
@item ts_periodicity
|
||||
Length of the sequence defining frame temporal layer membership.
|
||||
@item ts_layer_id
|
||||
Template defining the membership of frames to temporal layers.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item VP9-specific options
|
||||
@table @option
|
||||
@item lossless
|
||||
@ -1692,6 +1908,8 @@ Corpus VBR mode is a variant of standard VBR where the complexity distribution
|
||||
midpoint is passed in rather than calculated for a specific clip or chunk.
|
||||
|
||||
The valid range is [0, 10000]. 0 (default) uses standard VBR.
|
||||
@item enable-tpl @var{boolean}
|
||||
Enable temporal dependency model.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
@ -2180,6 +2398,63 @@ ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section libxavs2
|
||||
|
||||
xavs2 AVS2-P2/IEEE1857.4 encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libxavs2 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libxavs2}.
|
||||
|
||||
The following standard libavcodec options are used:
|
||||
@itemize
|
||||
@item
|
||||
@option{b} / @option{bit_rate}
|
||||
@item
|
||||
@option{g} / @option{gop_size}
|
||||
@item
|
||||
@option{bf} / @option{max_b_frames}
|
||||
@end itemize
|
||||
|
||||
The encoder also has its own specific options:
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item lcu_row_threads
|
||||
Set the number of parallel threads for rows from 1 to 8 (default 5).
|
||||
|
||||
@item initial_qp
|
||||
Set the xavs2 quantization parameter from 1 to 63 (default 34). This is
|
||||
used to set the initial qp for the first frame.
|
||||
|
||||
@item qp
|
||||
Set the xavs2 quantization parameter from 1 to 63 (default 34). This is
|
||||
used to set the qp value under constant-QP mode.
|
||||
|
||||
@item max_qp
|
||||
Set the max qp for rate control from 1 to 63 (default 55).
|
||||
|
||||
@item min_qp
|
||||
Set the min qp for rate control from 1 to 63 (default 20).
|
||||
|
||||
@item speed_level
|
||||
Set the Speed level from 0 to 9 (default 0). Higher is better but slower.
|
||||
|
||||
@item log_level
|
||||
Set the log level from -1 to 3 (default 0). -1: none, 0: error,
|
||||
1: warning, 2: info, 3: debug.
|
||||
|
||||
@item xavs2-params
|
||||
Set xavs2 options using a list of @var{key}=@var{value} couples separated
|
||||
by ":".
|
||||
|
||||
For example to specify libxavs2 encoding options with @option{-xavs2-params}:
|
||||
|
||||
@example
|
||||
ffmpeg -i input -c:v libxavs2 -xavs2-params RdoqLevel=0 output.avs2
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
@ -2370,6 +2645,9 @@ Specifies the video_format written into the sequence display extension
|
||||
indicating the source of the video pictures. The default is @samp{unspecified},
|
||||
can be @samp{component}, @samp{pal}, @samp{ntsc}, @samp{secam} or @samp{mac}.
|
||||
For maximum compatibility, use @samp{component}.
|
||||
@item a53cc @var{boolean}
|
||||
Import closed captions (which must be ATSC compatible format) into output.
|
||||
Default is 1 (on).
|
||||
@end table
|
||||
|
||||
@section png
|
||||
@ -2565,6 +2843,9 @@ The following standard libavcodec options are used:
|
||||
@option{bf} / @option{max_b_frames}
|
||||
@item
|
||||
@option{profile}
|
||||
|
||||
If not set, this will be determined automatically from the format of the input
|
||||
frames and the profiles supported by the driver.
|
||||
@item
|
||||
@option{level}
|
||||
@item
|
||||
@ -2585,7 +2866,8 @@ Speed / quality tradeoff: higher values are faster / worse quality.
|
||||
Size / quality tradeoff: higher values are smaller / worse quality.
|
||||
@item
|
||||
@option{qmin}
|
||||
(only: @option{qmax} is not supported)
|
||||
@item
|
||||
@option{qmax}
|
||||
@item
|
||||
@option{i_qfactor} / @option{i_quant_factor}
|
||||
@item
|
||||
@ -2594,8 +2876,55 @@ Size / quality tradeoff: higher values are smaller / worse quality.
|
||||
@option{b_qfactor} / @option{b_quant_factor}
|
||||
@item
|
||||
@option{b_qoffset} / @option{b_quant_offset}
|
||||
@item
|
||||
@option{slices}
|
||||
@end itemize
|
||||
|
||||
All encoders support the following options:
|
||||
@table @option
|
||||
@item low_power
|
||||
Some drivers/platforms offer a second encoder for some codecs intended to use
|
||||
less power than the default encoder; setting this option will attempt to use
|
||||
that encoder. Note that it may support a reduced feature set, so some other
|
||||
options may not be available in this mode.
|
||||
|
||||
@item idr_interval
|
||||
Set the number of normal intra frames between full-refresh (IDR) frames in
|
||||
open-GOP mode. The intra frames are still IRAPs, but will not include global
|
||||
headers and may have non-decodable leading pictures.
|
||||
|
||||
@item b_depth
|
||||
Set the B-frame reference depth. When set to one (the default), all B-frames
|
||||
will refer only to P- or I-frames. When set to greater values multiple layers
|
||||
of B-frames will be present, frames in each layer only referring to frames in
|
||||
higher layers.
|
||||
|
||||
@item rc_mode
|
||||
Set the rate control mode to use. A given driver may only support a subset of
|
||||
modes.
|
||||
|
||||
Possible modes:
|
||||
@table @option
|
||||
@item auto
|
||||
Choose the mode automatically based on driver support and the other options.
|
||||
This is the default.
|
||||
@item CQP
|
||||
Constant-quality.
|
||||
@item CBR
|
||||
Constant-bitrate.
|
||||
@item VBR
|
||||
Variable-bitrate.
|
||||
@item ICQ
|
||||
Intelligent constant-quality.
|
||||
@item QVBR
|
||||
Quality-defined variable-bitrate.
|
||||
@item AVBR
|
||||
Average variable bitrate.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
Each encoder also has its own specific options:
|
||||
@table @option
|
||||
|
||||
@item h264_vaapi
|
||||
@ -2603,8 +2932,6 @@ Size / quality tradeoff: higher values are smaller / worse quality.
|
||||
@option{level} sets the value of @emph{level_idc}.
|
||||
|
||||
@table @option
|
||||
@item low_power
|
||||
Use low-power encoding mode.
|
||||
@item coder
|
||||
Set entropy encoder (default is @emph{cabac}). Possible values:
|
||||
|
||||
@ -2617,21 +2944,70 @@ Use CABAC.
|
||||
@item cavlc
|
||||
Use CAVLC.
|
||||
@end table
|
||||
|
||||
@item aud
|
||||
Include access unit delimiters in the stream (not included by default).
|
||||
|
||||
@item sei
|
||||
Set SEI message types to include.
|
||||
Some combination of the following values:
|
||||
@table @samp
|
||||
@item identifier
|
||||
Include a @emph{user_data_unregistered} message containing information about
|
||||
the encoder.
|
||||
@item timing
|
||||
Include picture timing parameters (@emph{buffering_period} and
|
||||
@emph{pic_timing} messages).
|
||||
@item recovery_point
|
||||
Include recovery points where appropriate (@emph{recovery_point} messages).
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@item hevc_vaapi
|
||||
@option{profile} and @option{level} set the values of
|
||||
@emph{general_profile_idc} and @emph{general_level_idc} respectively.
|
||||
|
||||
@table @option
|
||||
@item aud
|
||||
Include access unit delimiters in the stream (not included by default).
|
||||
|
||||
@item tier
|
||||
Set @emph{general_tier_flag}. This may affect the level chosen for the stream
|
||||
if it is not explicitly specified.
|
||||
|
||||
@item sei
|
||||
Set SEI message types to include.
|
||||
Some combination of the following values:
|
||||
@table @samp
|
||||
@item hdr
|
||||
Include HDR metadata if the input frames have it
|
||||
(@emph{mastering_display_colour_volume} and @emph{content_light_level}
|
||||
messages).
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@item mjpeg_vaapi
|
||||
Always encodes using the standard quantisation and huffman tables -
|
||||
@option{global_quality} scales the standard quantisation table (range 1-100).
|
||||
Only baseline DCT encoding is supported. The encoder always uses the standard
|
||||
quantisation and huffman tables - @option{global_quality} scales the standard
|
||||
quantisation table (range 1-100).
|
||||
|
||||
For YUV, 4:2:0, 4:2:2 and 4:4:4 subsampling modes are supported. RGB is also
|
||||
supported, and will create an RGB JPEG.
|
||||
|
||||
@table @option
|
||||
@item jfif
|
||||
Include JFIF header in each frame (not included by default).
|
||||
@item huffman
|
||||
Include standard huffman tables (on by default). Turning this off will save
|
||||
a few hundred bytes in each output frame, but may lose compatibility with some
|
||||
JPEG decoders which don't fully handle MJPEG.
|
||||
@end table
|
||||
|
||||
@item mpeg2_vaapi
|
||||
@option{profile} and @option{level} set the value of @emph{profile_and_level_indication}.
|
||||
|
||||
No rate control is supported.
|
||||
|
||||
@item vp8_vaapi
|
||||
B-frames are not supported.
|
||||
|
||||
|
@ -37,7 +37,7 @@ $(EXAMPLES_G): %$(PROGSSUF)_g$(EXESUF): %.o
|
||||
examples: $(EXAMPLES)
|
||||
|
||||
$(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.o): | doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
OUTDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT += $(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.c)
|
||||
|
||||
|
@ -117,11 +117,12 @@ int main(int argc, char *argv[])
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx) {
|
||||
if (avio_ctx)
|
||||
av_freep(&avio_ctx->buffer);
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
avio_context_free(&avio_ctx);
|
||||
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
|
@ -74,7 +74,6 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
|
@ -29,6 +29,8 @@
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
@ -77,7 +79,6 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
@ -210,18 +211,21 @@ int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
@ -248,27 +252,25 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(&packet);
|
||||
|
@ -4,21 +4,23 @@
|
||||
*
|
||||
* HW Acceleration API (video decoding) decode sample
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
@ -211,7 +213,6 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
|
||||
decoder_ctx->get_format = get_hw_format;
|
||||
av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
if (hw_decoder_init(decoder_ctx, type) < 0)
|
||||
return -1;
|
||||
|
@ -47,6 +47,11 @@ int main (int argc, char **argv)
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
|
@ -1,21 +1,23 @@
|
||||
/*
|
||||
* Video Acceleration API (video encoding) encode sample
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
@ -1,21 +1,23 @@
|
||||
/*
|
||||
* Video Acceleration API (video transcoding) transcode sample
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
|
12
doc/faq.texi
12
doc/faq.texi
@ -76,7 +76,7 @@ the gcc developers. Note that we will not add workarounds for gcc bugs.
|
||||
|
||||
Also note that (some of) the gcc developers believe this is not a bug or
|
||||
not a bug they should fix:
|
||||
@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
@url{https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@ -257,13 +257,13 @@ default.
|
||||
@section Which are good parameters for encoding high quality MPEG-4?
|
||||
|
||||
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
|
||||
things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
|
||||
|
||||
'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2'
|
||||
but beware the '-g 100' might cause problems with some decoders.
|
||||
Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd.
|
||||
Things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
|
||||
@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong?
|
||||
|
||||
@ -516,7 +516,7 @@ in the ffmpeg invocation. This is effective whether you run ffmpeg in a shell
|
||||
or invoke ffmpeg in its own process via an operating system API.
|
||||
|
||||
As an alternative, when you are running ffmpeg in a shell, you can redirect
|
||||
standard input to @code{/dev/null} (on Linux and Mac OS)
|
||||
standard input to @code{/dev/null} (on Linux and macOS)
|
||||
or @code{NUL} (on Windows). You can do this redirect either
|
||||
on the ffmpeg invocation, or from a shell script which calls ffmpeg.
|
||||
|
||||
@ -526,7 +526,7 @@ For example:
|
||||
ffmpeg -nostdin -i INPUT OUTPUT
|
||||
@end example
|
||||
|
||||
or (on Linux, Mac OS, and other UNIX-like shells):
|
||||
or (on Linux, macOS, and other UNIX-like shells):
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT OUTPUT </dev/null
|
||||
@ -601,7 +601,7 @@ No. These tools are too bloated and they complicate the build.
|
||||
FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
|
||||
read @uref{https://web.archive.org/web/20111004021423/http://kernel.org/pub/linux/docs/lkml/#s15, "Programming Religion"}.
|
||||
|
||||
@section Why are the ffmpeg programs devoid of debugging symbols?
|
||||
|
||||
|
@ -155,6 +155,8 @@ space on each client, network bandwidth and so on benefit from smaller test case
|
||||
Also keep in mind older checkouts use existing sample files, that means in
|
||||
practice generally do not replace, remove or overwrite files as it likely would
|
||||
break older checkouts or releases.
|
||||
Also all needed samples for a commit should be uploaded, ideally 24
|
||||
hours, before the push.
|
||||
|
||||
@example
|
||||
#First update your local samples copy:
|
||||
@ -222,6 +224,11 @@ Set to @samp{1} to generate the missing or mismatched references.
|
||||
Specify which hardware acceleration to use while running regression tests,
|
||||
by default @samp{none} is used.
|
||||
|
||||
@item KEEP
|
||||
Set to @samp{1} to keep temp files generated by fate test(s) when test is successful.
|
||||
Default is @samp{0}, which removes these files. Files are always kept when a test
|
||||
fails.
|
||||
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
284
doc/ffmpeg.texi
284
doc/ffmpeg.texi
@ -216,16 +216,208 @@ filters is obviously also impossible, since filters work on uncompressed data.
|
||||
@chapter Stream selection
|
||||
@c man begin STREAM SELECTION
|
||||
|
||||
By default, @command{ffmpeg} includes only one stream of each type (video, audio, subtitle)
|
||||
present in the input files and adds them to each output file. It picks the
|
||||
"best" of each based upon the following criteria: for video, it is the stream
|
||||
with the highest resolution, for audio, it is the stream with the most channels, for
|
||||
subtitles, it is the first subtitle stream. In the case where several streams of
|
||||
the same type rate equally, the stream with the lowest index is chosen.
|
||||
@command{ffmpeg} provides the @code{-map} option for manual control of stream selection in each
|
||||
output file. Users can skip @code{-map} and let ffmpeg perform automatic stream selection as
|
||||
described below. The @code{-vn / -an / -sn / -dn} options can be used to skip inclusion of
|
||||
video, audio, subtitle and data streams respectively, whether manually mapped or automatically
|
||||
selected, except for those streams which are outputs of complex filtergraphs.
|
||||
|
||||
You can disable some of those defaults by using the @code{-vn/-an/-sn/-dn} options. For
|
||||
full manual control, use the @code{-map} option, which disables the defaults just
|
||||
described.
|
||||
@section Description
|
||||
The sub-sections that follow describe the various rules that are involved in stream selection.
|
||||
The examples that follow next show how these rules are applied in practice.
|
||||
|
||||
While every effort is made to accurately reflect the behavior of the program, FFmpeg is under
|
||||
continuous development and the code may have changed since the time of this writing.
|
||||
|
||||
@subsection Automatic stream selection
|
||||
|
||||
In the absence of any map options for a particular output file, ffmpeg inspects the output
|
||||
format to check which type of streams can be included in it, viz. video, audio and/or
|
||||
subtitles. For each acceptable stream type, ffmpeg will pick one stream, when available,
|
||||
from among all the inputs.
|
||||
|
||||
It will select that stream based upon the following criteria:
|
||||
@itemize
|
||||
@item
|
||||
for video, it is the stream with the highest resolution,
|
||||
@item
|
||||
for audio, it is the stream with the most channels,
|
||||
@item
|
||||
for subtitles, it is the first subtitle stream found but there's a caveat.
|
||||
The output format's default subtitle encoder can be either text-based or image-based,
|
||||
and only a subtitle stream of the same type will be chosen.
|
||||
@end itemize
|
||||
|
||||
In the case where several streams of the same type rate equally, the stream with the lowest
|
||||
index is chosen.
|
||||
|
||||
Data or attachment streams are not automatically selected and can only be included
|
||||
using @code{-map}.
|
||||
@subsection Manual stream selection
|
||||
|
||||
When @code{-map} is used, only user-mapped streams are included in that output file,
|
||||
with one possible exception for filtergraph outputs described below.
|
||||
|
||||
@subsection Complex filtergraphs
|
||||
|
||||
If there are any complex filtergraph output streams with unlabeled pads, they will be added
|
||||
to the first output file. This will lead to a fatal error if the stream type is not supported
|
||||
by the output format. In the absence of the map option, the inclusion of these streams leads
|
||||
to the automatic stream selection of their types being skipped. If map options are present,
|
||||
these filtergraph streams are included in addition to the mapped streams.
|
||||
|
||||
Complex filtergraph output streams with labeled pads must be mapped once and exactly once.
|
||||
|
||||
@subsection Stream handling
|
||||
|
||||
Stream handling is independent of stream selection, with an exception for subtitles described
|
||||
below. Stream handling is set via the @code{-codec} option addressed to streams within a
|
||||
specific @emph{output} file. In particular, codec options are applied by ffmpeg after the
|
||||
stream selection process and thus do not influence the latter. If no @code{-codec} option is
|
||||
specified for a stream type, ffmpeg will select the default encoder registered by the output
|
||||
file muxer.
|
||||
|
||||
An exception exists for subtitles. If a subtitle encoder is specified for an output file, the
|
||||
first subtitle stream found of any type, text or image, will be included. ffmpeg does not validate
|
||||
if the specified encoder can convert the selected stream or if the converted stream is acceptable
|
||||
within the output format. This applies generally as well: when the user sets an encoder manually,
|
||||
the stream selection process cannot check if the encoded stream can be muxed into the output file.
|
||||
If it cannot, ffmpeg will abort and @emph{all} output files will fail to be processed.
|
||||
|
||||
@section Examples
|
||||
|
||||
The following examples illustrate the behavior, quirks and limitations of ffmpeg's stream
|
||||
selection methods.
|
||||
|
||||
They assume the following three input files.
|
||||
|
||||
@verbatim
|
||||
|
||||
input file 'A.avi'
|
||||
stream 0: video 640x360
|
||||
stream 1: audio 2 channels
|
||||
|
||||
input file 'B.mp4'
|
||||
stream 0: video 1920x1080
|
||||
stream 1: audio 2 channels
|
||||
stream 2: subtitles (text)
|
||||
stream 3: audio 5.1 channels
|
||||
stream 4: subtitles (text)
|
||||
|
||||
input file 'C.mkv'
|
||||
stream 0: video 1280x720
|
||||
stream 1: audio 2 channels
|
||||
stream 2: subtitles (image)
|
||||
@end verbatim
|
||||
|
||||
@subsubheading Example: automatic stream selection
|
||||
@example
|
||||
ffmpeg -i A.avi -i B.mp4 out1.mkv out2.wav -map 1:a -c:a copy out3.mov
|
||||
@end example
|
||||
There are three output files specified, and for the first two, no @code{-map} options
|
||||
are set, so ffmpeg will select streams for these two files automatically.
|
||||
|
||||
@file{out1.mkv} is a Matroska container file and accepts video, audio and subtitle streams,
|
||||
so ffmpeg will try to select one of each type.@*
|
||||
For video, it will select @code{stream 0} from @file{B.mp4}, which has the highest
|
||||
resolution among all the input video streams.@*
|
||||
For audio, it will select @code{stream 3} from @file{B.mp4}, since it has the greatest
|
||||
number of channels.@*
|
||||
For subtitles, it will select @code{stream 2} from @file{B.mp4}, which is the first subtitle
|
||||
stream from among @file{A.avi} and @file{B.mp4}.
|
||||
|
||||
@file{out2.wav} accepts only audio streams, so only @code{stream 3} from @file{B.mp4} is
|
||||
selected.
|
||||
|
||||
For @file{out3.mov}, since a @code{-map} option is set, no automatic stream selection will
|
||||
occur. The @code{-map 1:a} option will select all audio streams from the second input
|
||||
@file{B.mp4}. No other streams will be included in this output file.
|
||||
|
||||
For the first two outputs, all included streams will be transcoded. The encoders chosen will
|
||||
be the default ones registered by each output format, which may not match the codec of the
|
||||
selected input streams.
|
||||
|
||||
For the third output, codec option for audio streams has been set
|
||||
to @code{copy}, so no decoding-filtering-encoding operations will occur, or @emph{can} occur.
|
||||
Packets of selected streams shall be conveyed from the input file and muxed within the output
|
||||
file.
|
||||
|
||||
@subsubheading Example: automatic subtitles selection
|
||||
@example
|
||||
ffmpeg -i C.mkv out1.mkv -c:s dvdsub -an out2.mkv
|
||||
@end example
|
||||
Although @file{out1.mkv} is a Matroska container file which accepts subtitle streams, only a
|
||||
video and audio stream shall be selected. The subtitle stream of @file{C.mkv} is image-based
|
||||
and the default subtitle encoder of the Matroska muxer is text-based, so a transcode operation
|
||||
for the subtitles is expected to fail and hence the stream isn't selected. However, in
|
||||
@file{out2.mkv}, a subtitle encoder is specified in the command and so, the subtitle stream is
|
||||
selected, in addition to the video stream. The presence of @code{-an} disables audio stream
|
||||
selection for @file{out2.mkv}.
|
||||
|
||||
@subsubheading Example: unlabeled filtergraph outputs
|
||||
@example
|
||||
ffmpeg -i A.avi -i C.mkv -i B.mp4 -filter_complex "overlay" out1.mp4 out2.srt
|
||||
@end example
|
||||
A filtergraph is setup here using the @code{-filter_complex} option and consists of a single
|
||||
video filter. The @code{overlay} filter requires exactly two video inputs, but none are
|
||||
specified, so the first two available video streams are used, those of @file{A.avi} and
|
||||
@file{C.mkv}. The output pad of the filter has no label and so is sent to the first output file
|
||||
@file{out1.mp4}. Due to this, automatic selection of the video stream is skipped, which would
|
||||
have selected the stream in @file{B.mp4}. The audio stream with most channels viz. @code{stream 3}
|
||||
in @file{B.mp4}, is chosen automatically. No subtitle stream is chosen however, since the MP4
|
||||
format has no default subtitle encoder registered, and the user hasn't specified a subtitle encoder.
|
||||
|
||||
The 2nd output file, @file{out2.srt}, only accepts text-based subtitle streams. So, even though
|
||||
the first subtitle stream available belongs to @file{C.mkv}, it is image-based and hence skipped.
|
||||
The selected stream, @code{stream 2} in @file{B.mp4}, is the first text-based subtitle stream.
|
||||
|
||||
@subsubheading Example: labeled filtergraph outputs
|
||||
@example
|
||||
ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0[outv];overlay;aresample" \
|
||||
-map '[outv]' -an out1.mp4 \
|
||||
out2.mkv \
|
||||
-map '[outv]' -map 1:a:0 out3.mkv
|
||||
@end example
|
||||
|
||||
The above command will fail, as the output pad labelled @code{[outv]} has been mapped twice.
|
||||
None of the output files shall be processed.
|
||||
|
||||
@example
|
||||
ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0[outv];overlay;aresample" \
|
||||
-an out1.mp4 \
|
||||
out2.mkv \
|
||||
-map 1:a:0 out3.mkv
|
||||
@end example
|
||||
|
||||
This command above will also fail as the hue filter output has a label, @code{[outv]},
|
||||
and hasn't been mapped anywhere.
|
||||
|
||||
The command should be modified as follows,
|
||||
@example
|
||||
ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0,split=2[outv1][outv2];overlay;aresample" \
|
||||
-map '[outv1]' -an out1.mp4 \
|
||||
out2.mkv \
|
||||
-map '[outv2]' -map 1:a:0 out3.mkv
|
||||
@end example
|
||||
The video stream from @file{B.mp4} is sent to the hue filter, whose output is cloned once using
|
||||
the split filter, and both outputs labelled. Then a copy each is mapped to the first and third
|
||||
output files.
|
||||
|
||||
The overlay filter, requiring two video inputs, uses the first two unused video streams. Those
|
||||
are the streams from @file{A.avi} and @file{C.mkv}. The overlay output isn't labelled, so it is
|
||||
sent to the first output file @file{out1.mp4}, regardless of the presence of the @code{-map} option.
|
||||
|
||||
The aresample filter is sent the first unused audio stream, that of @file{A.avi}. Since this filter
|
||||
output is also unlabelled, it too is mapped to the first output file. The presence of @code{-an}
|
||||
only suppresses automatic or manual stream selection of audio streams, not outputs sent from
|
||||
filtergraphs. Both these mapped streams shall be ordered before the mapped stream in @file{out1.mp4}.
|
||||
|
||||
The video, audio and subtitle streams mapped to @code{out2.mkv} are entirely determined by
|
||||
automatic stream selection.
|
||||
|
||||
@file{out3.mkv} consists of the cloned video output from the hue filter and the first audio
|
||||
stream from @file{B.mp4}.
|
||||
@*
|
||||
|
||||
@c man end STREAM SELECTION
|
||||
|
||||
@ -316,7 +508,7 @@ input until the timestamps reach @var{position}.
|
||||
@var{position} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
@item -sseof @var{position} (@emph{input/output})
|
||||
@item -sseof @var{position} (@emph{input})
|
||||
|
||||
Like the @code{-ss} option but relative to the "end of file". That is negative
|
||||
values are earlier in the file, 0 is at EOF.
|
||||
@ -331,6 +523,9 @@ The offset is added to the timestamps of the input files. Specifying
|
||||
a positive offset means that the corresponding streams are delayed by
|
||||
the time duration specified in @var{offset}.
|
||||
|
||||
@item -itsscale @var{scale} (@emph{input,per-stream})
|
||||
Rescale input timestamps. @var{scale} should be a floating point number.
|
||||
|
||||
@item -timestamp @var{date} (@emph{output})
|
||||
Set the recording timestamp in the container.
|
||||
|
||||
@ -375,22 +570,31 @@ The following dispositions are recognized:
|
||||
@item hearing_impaired
|
||||
@item visual_impaired
|
||||
@item clean_effects
|
||||
@item attached_pic
|
||||
@item captions
|
||||
@item descriptions
|
||||
@item dependent
|
||||
@item metadata
|
||||
@end table
|
||||
|
||||
For example, to make the second audio stream the default stream:
|
||||
@example
|
||||
ffmpeg -i in.mkv -disposition:a:1 default out.mkv
|
||||
ffmpeg -i in.mkv -c copy -disposition:a:1 default out.mkv
|
||||
@end example
|
||||
|
||||
To make the second subtitle stream the default stream and remove the default
|
||||
disposition from the first subtitle stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -disposition:s:0 0 -disposition:s:1 default OUTPUT
|
||||
ffmpeg -i in.mkv -c copy -disposition:s:0 0 -disposition:s:1 default out.mkv
|
||||
@end example
|
||||
|
||||
To add an embedded cover/thumbnail:
|
||||
@example
|
||||
ffmpeg -i in.mp4 -i IMAGE -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic out.mp4
|
||||
@end example
|
||||
|
||||
Not all muxers support embedded thumbnails, and those who do, only support a few formats, like JPEG or PNG.
|
||||
|
||||
@item -program [title=@var{title}:][program_num=@var{program_num}:]st=@var{stream}[:st=@var{stream}...] (@emph{output})
|
||||
|
||||
Creates a program with the specified @var{title}, @var{program_num} and adds the specified
|
||||
@ -413,8 +617,13 @@ they do not conflict with the standard, as in:
|
||||
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
@item -dn (@emph{output})
|
||||
Disable data recording. For full manual control see the @code{-map}
|
||||
@item -dn (@emph{input/output})
|
||||
As an input option, blocks all data streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
option to disable streams individually.
|
||||
|
||||
As an output option, disables data recording i.e. automatic selection or
|
||||
mapping of any data stream. For full manual control see the @code{-map}
|
||||
option.
|
||||
|
||||
@item -dframes @var{number} (@emph{output})
|
||||
@ -574,8 +783,13 @@ If used together with @option{-vcodec copy}, it will affect the aspect ratio
|
||||
stored at container level, but not the aspect ratio stored in encoded
|
||||
frames, if it exists.
|
||||
|
||||
@item -vn (@emph{output})
|
||||
Disable video recording. For full manual control see the @code{-map}
|
||||
@item -vn (@emph{input/output})
|
||||
As an input option, blocks all video streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
option to disable streams individually.
|
||||
|
||||
As an output option, disables video recording i.e. automatic selection or
|
||||
mapping of any video stream. For full manual control see the @code{-map}
|
||||
option.
|
||||
|
||||
@item -vcodec @var{codec} (@emph{output})
|
||||
@ -623,8 +837,6 @@ as the input (or graph output) and automatic conversions are disabled.
|
||||
|
||||
@item -sws_flags @var{flags} (@emph{input/output})
|
||||
Set SwScaler flags.
|
||||
@item -vdt @var{n}
|
||||
Discard threshold.
|
||||
|
||||
@item -rc_override[:@var{stream_specifier}] @var{override} (@emph{output,per-stream})
|
||||
Rate control override for specific intervals, formatted as "int,int,int"
|
||||
@ -890,8 +1102,13 @@ Set the number of audio channels. For output streams it is set by
|
||||
default to the number of input audio channels. For input streams
|
||||
this option only makes sense for audio grabbing devices and raw demuxers
|
||||
and is mapped to the corresponding demuxer options.
|
||||
@item -an (@emph{output})
|
||||
Disable audio recording. For full manual control see the @code{-map}
|
||||
@item -an (@emph{input/output})
|
||||
As an input option, blocks all audio streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
option to disable streams individually.
|
||||
|
||||
As an output option, disables audio recording i.e. automatic selection or
|
||||
mapping of any audio stream. For full manual control see the @code{-map}
|
||||
option.
|
||||
@item -acodec @var{codec} (@emph{input/output})
|
||||
Set the audio codec. This is an alias for @code{-codec:a}.
|
||||
@ -926,8 +1143,13 @@ stereo but not 6 channels as 5.1. The default is to always try to guess. Use
|
||||
@table @option
|
||||
@item -scodec @var{codec} (@emph{input/output})
|
||||
Set the subtitle codec. This is an alias for @code{-codec:s}.
|
||||
@item -sn (@emph{output})
|
||||
Disable subtitle recording. For full manual control see the @code{-map}
|
||||
@item -sn (@emph{input/output})
|
||||
As an input option, blocks all subtitle streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
option to disable streams individually.
|
||||
|
||||
As an output option, disables subtitle recording i.e. automatic selection or
|
||||
mapping of any subtitle stream. For full manual control see the @code{-map}
|
||||
option.
|
||||
@item -sbsf @var{bitstream_filter}
|
||||
Deprecated, see -bsf
|
||||
@ -1154,12 +1376,12 @@ disable any chapter copying.
|
||||
|
||||
@item -benchmark (@emph{global})
|
||||
Show benchmarking information at the end of an encode.
|
||||
Shows CPU time used and maximum memory consumption.
|
||||
Shows real, system and user time used and maximum memory consumption.
|
||||
Maximum memory consumption is not supported on all systems,
|
||||
it will usually display as 0 if not supported.
|
||||
@item -benchmark_all (@emph{global})
|
||||
Show benchmarking information during the encode.
|
||||
Shows CPU time used in various steps (audio/video encode/decode).
|
||||
Shows real, system and user time used in various steps (audio/video encode/decode).
|
||||
@item -timelimit @var{duration} (@emph{global})
|
||||
Exit after ffmpeg has been running for @var{duration} seconds.
|
||||
@item -dump (@emph{global})
|
||||
@ -1174,10 +1396,6 @@ loss).
|
||||
By default @command{ffmpeg} attempts to read the input(s) as fast as possible.
|
||||
This option will slow down the reading of the input(s) to the native frame rate
|
||||
of the input(s). It is useful for real-time output (e.g. live streaming).
|
||||
@item -loop_output @var{number_of_times}
|
||||
Repeatedly loop output for formats that support looping such as animated GIF
|
||||
(0 will loop the output infinitely).
|
||||
This option is deprecated, use -loop.
|
||||
@item -vsync @var{parameter}
|
||||
Video sync method.
|
||||
For compatibility reasons old values can be specified as numbers.
|
||||
@ -1297,9 +1515,9 @@ Enable bitexact mode for (de)muxer and (de/en)coder
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
Timestamp discontinuity delta threshold.
|
||||
@item -muxdelay @var{seconds} (@emph{input})
|
||||
@item -muxdelay @var{seconds} (@emph{output})
|
||||
Set the maximum demux-decode delay.
|
||||
@item -muxpreload @var{seconds} (@emph{input})
|
||||
@item -muxpreload @var{seconds} (@emph{output})
|
||||
Set the initial demux-decode delay.
|
||||
@item -streamid @var{output-stream-index}:@var{new-value} (@emph{output})
|
||||
Assign a new stream-id value to an output stream. This option should be
|
||||
@ -1421,8 +1639,10 @@ This allows dumping sdp information when at least one output isn't an
|
||||
rtp stream. (Requires at least one of the output formats to be rtp).
|
||||
|
||||
@item -discard (@emph{input})
|
||||
Allows discarding specific streams or frames of streams at the demuxer.
|
||||
Not all demuxers support this.
|
||||
Allows discarding specific streams or frames from streams.
|
||||
Any input stream can be fully discarded, using value @code{all} whereas
|
||||
selective discarding of frames from a stream occurs at the demuxer
|
||||
and is not supported by all demuxers.
|
||||
|
||||
@table @option
|
||||
@item none
|
||||
|
@ -60,10 +60,14 @@ Play @var{duration} seconds of audio/video.
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -seek_interval
|
||||
Set custom interval, in seconds, for seeking using left/right keys. Default is 10 seconds.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -noborder
|
||||
Borderless window.
|
||||
@item -alwaysontop
|
||||
Window always on top. Available on: X11 with SDL >= 2.0.5, Windows SDL >= 2.0.6.
|
||||
@item -volume
|
||||
Set the startup volume. 0 means silence, 100 means no volume reduction or
|
||||
amplification. Negative values are treated as 0, values above 100 are treated
|
||||
@ -72,6 +76,10 @@ as 100.
|
||||
Force format.
|
||||
@item -window_title @var{title}
|
||||
Set window title (default is the input filename).
|
||||
@item -left @var{title}
|
||||
Set the x position for the left of the window (default is a centered window).
|
||||
@item -top @var{title}
|
||||
Set the y position for the top of the window (default is a centered window).
|
||||
@item -loop @var{number}
|
||||
Loops movie playback <number> times. 0 means forever.
|
||||
@item -showmode @var{mode}
|
||||
@ -189,6 +197,12 @@ input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use @option{-noinfbuf} to disable it.
|
||||
|
||||
@item -filter_threads @var{nb_threads}
|
||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||
will produce a thread pool with this many threads available for parallel
|
||||
processing. The default is 0 which means that the thread count will be
|
||||
determined by the number of available CPUs.
|
||||
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
@ -425,7 +425,7 @@ The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specifid, the output has the form:
|
||||
If no option is specified, the output has the form:
|
||||
@example
|
||||
section|key1=val1| ... |keyN=valN
|
||||
@end example
|
||||
@ -584,14 +584,14 @@ value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_compliant, x
|
||||
@item xsd_strict, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@url{https://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
|
@ -147,11 +147,25 @@
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecodes" type="ffprobe:frameSideDataTimecodeList" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecode" type="ffprobe:frameSideDataTimecodeType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeType">
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
|
@ -34,27 +34,24 @@ Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
thread count for the second stream to 4. If @var{stream_index} is used as an
|
||||
additional stream specifier (see below), then it selects stream number
|
||||
@var{stream_index} from the matching streams. Stream numbering is based on the
|
||||
order of the streams as detected by libavformat except when a program ID is
|
||||
also specified. In this case it is based on the ordering of the streams in the
|
||||
program.
|
||||
@item @var{stream_type}[:@var{additional_stream_specifier}]
|
||||
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
|
||||
streams, 'V' only matches video streams which are not attached pictures, video
|
||||
thumbnails or cover arts. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}] or p:@var{program_id}[:@var{stream_type}[:@var{stream_index}]] or
|
||||
p:@var{program_id}:m:@var{key}[:@var{value}]
|
||||
In first version, if @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program. In the second version, @var{stream_type} is one of following: 'v' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data. If @var{stream_index} is also given, then it matches
|
||||
stream number @var{stream_index} of this type in the program with the id @var{program_id}.
|
||||
Otherwise, if only @var{stream_type} is given, it matches all
|
||||
streams of this type in the program with the id @var{program_id}.
|
||||
In the third version matches streams in the program with the id @var{program_id} with the metadata
|
||||
tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
thumbnails or cover arts. If @var{additional_stream_specifier} is used, then
|
||||
it matches streams which both have this type and match the
|
||||
@var{additional_stream_specifier}. Otherwise, it matches all streams of the
|
||||
specified type.
|
||||
@item p:@var{program_id}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the program with the id @var{program_id}. If
|
||||
@var{additional_stream_specifier} is used, then it matches streams which both
|
||||
are part of the program and match the @var{additional_stream_specifier}.
|
||||
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@ -112,6 +109,10 @@ Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
|
||||
@item bsf=@var{bitstream_filter_name}
|
||||
Print detailed information about the bitstream filter name @var{bitstream_filter_name}.
|
||||
Use the @option{-bsfs} option to get a list of all bitstream filters.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
@ -245,7 +246,7 @@ Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
It also implies @code{-loglevel debug}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
@ -371,7 +372,15 @@ ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
@end example
|
||||
|
||||
All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them.
|
||||
should be attached to them:
|
||||
@example
|
||||
ffmpeg -i multichannel.mxf -map 0:v:0 -map 0:a:0 -map 0:a:0 -c:a:0 ac3 -b:a:0 640k -ac:a:1 2 -c:a:1 aac -b:2 128k out.mp4
|
||||
@end example
|
||||
|
||||
In the above example, a multichannel audio stream is mapped twice for output.
|
||||
The first instance is encoded with codec ac3 and bitrate 640k.
|
||||
The second instance is downmixed to 2 channels and encoded with codec aac. A bitrate of 128k is specified for it using
|
||||
absolute index of the output stream.
|
||||
|
||||
Note: the @option{-nooption} syntax cannot be used for boolean
|
||||
AVOptions, use @option{-option 0}/@option{-option 1}.
|
||||
|
3128
doc/filters.texi
3128
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@ -30,37 +30,43 @@ latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
@item fflags @var{flags}
|
||||
Set format flags. Some are implemented for a limited number of formats.
|
||||
|
||||
Possible values:
|
||||
Possible values for input files:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item discardcorrupt
|
||||
Discard corrupted packets.
|
||||
@item fastseek
|
||||
Enable fast, but inaccurate seeks for some formats.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
Generate missing PTS if DTS is present.
|
||||
@item igndts
|
||||
Ignore DTS if PTS is set. Inert when nofillin is set.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item keepside (@emph{deprecated},@emph{inert})
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by buffering during initial input streams analysis.
|
||||
@item nofillin
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
Do not fill in missing values in packet fields that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
Try to interleave output packets by DTS. At present, available only for AVIs with an index.
|
||||
@end table
|
||||
|
||||
Possible values for output files:
|
||||
@table @samp
|
||||
@item autobsf
|
||||
Automatically apply bitstream filters as required by the output format. Enabled by default.
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data.
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item flush_packets
|
||||
Write out packets immediately.
|
||||
@item latm (@emph{deprecated},@emph{inert})
|
||||
@item shortest
|
||||
Stop muxing at the end of the shortest stream.
|
||||
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
||||
@ -205,7 +211,7 @@ is @code{0} (meaning that no offset is applied).
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@ -214,6 +220,32 @@ ffprobe -dump_separator "
|
||||
@item max_streams @var{integer} (@emph{input})
|
||||
Specifies the maximum number of streams. This can be used to reject files that
|
||||
would require too many resources due to a large number of streams.
|
||||
|
||||
@item skip_estimate_duration_from_pts @var{bool} (@emph{input})
|
||||
Skip estimation of input duration when calculated using PTS.
|
||||
At present, applicable for MPEG-PS and MPEG-TS.
|
||||
|
||||
@item strict, f_strict @var{integer} (@emph{input/output})
|
||||
Specify how strictly to follow the standards. @code{f_strict} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item very
|
||||
strictly conform to an older more strict version of the spec or reference software
|
||||
@item strict
|
||||
strictly conform to all the things in the spec no matter what consequences
|
||||
@item normal
|
||||
|
||||
@item unofficial
|
||||
allow unofficial extensions
|
||||
@item experimental
|
||||
allow non standardized experimental things, experimental
|
||||
(unfinished/work in progress/not well tested) decoders and encoders.
|
||||
Note: experimental decoders can pose a security risk, do not use this for
|
||||
decoding untrusted input.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
@ -224,30 +256,10 @@ would require too many resources due to a large number of streams.
|
||||
Format stream specifiers allow selection of one or more streams that
|
||||
match specific properties.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index.
|
||||
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio,
|
||||
's' for subtitle, 'd' for data, and 't' for attachments. If
|
||||
@var{stream_index} is given, then it matches the stream number
|
||||
@var{stream_index} of this type. Otherwise, it matches all streams of
|
||||
this type.
|
||||
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number
|
||||
@var{stream_index} in the program with the id
|
||||
@var{program_id}. Otherwise, it matches all streams in the program.
|
||||
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
The exact semantics of stream specifiers is defined by the
|
||||
@code{avformat_match_stream_specifier()} function declared in the
|
||||
@file{libavformat/avformat.h} header.
|
||||
@file{libavformat/avformat.h} header and documented in the
|
||||
@ref{Stream specifiers,,Stream specifiers section in the ffmpeg(1) manual,ffmpeg}.
|
||||
|
||||
@ifclear config-writeonly
|
||||
@include demuxers.texi
|
||||
|
385
doc/general.texi
385
doc/general.texi
@ -17,21 +17,164 @@ for more formats. None of them are used by default, their use has to be
|
||||
explicitly requested by passing the appropriate flags to
|
||||
@command{./configure}.
|
||||
|
||||
@section Alliance for Open Media libaom
|
||||
@section Alliance for Open Media (AOM)
|
||||
|
||||
FFmpeg can make use of the libaom library for AV1 decoding.
|
||||
FFmpeg can make use of the AOM library for AV1 decoding and encoding.
|
||||
|
||||
Go to @url{http://aomedia.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libaom} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenJPEG
|
||||
@section AMD AMF/VCE
|
||||
|
||||
FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to
|
||||
@url{http://www.openjpeg.org/} to get the libraries and follow the installation
|
||||
instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjpeg} to
|
||||
@file{./configure}.
|
||||
FFmpeg can use the AMD Advanced Media Framework library under Windows
|
||||
for accelerated H.264 and HEVC encoding on hardware with Video Coding Engine (VCE).
|
||||
|
||||
To enable support you must obtain the AMF framework header files from
|
||||
@url{https://github.com/GPUOpen-LibrariesAndSDKs/AMF.git}.
|
||||
|
||||
Create an @code{AMF/} directory in the system include path.
|
||||
Copy the contents of @code{AMF/amf/public/include/} into that directory.
|
||||
Then configure FFmpeg with @code{--enable-amf}.
|
||||
|
||||
@section AviSynth
|
||||
|
||||
FFmpeg can read AviSynth scripts as input. To enable support, pass
|
||||
@code{--enable-avisynth} to configure. The correct headers are
|
||||
included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
|
||||
For Windows, supported AviSynth variants are
|
||||
@url{http://avisynth.nl, AviSynth 2.6 RC1 or higher} for 32-bit builds and
|
||||
@url{http://avisynth.nl/index.php/AviSynth+, AviSynth+ r1718 or higher} for 32-bit and 64-bit builds.
|
||||
|
||||
For Linux and OS X, the supported AviSynth variant is
|
||||
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||
|
||||
@float NOTE
|
||||
In 2016, AviSynth+ added support for building with GCC. However, due to
|
||||
the eccentricities of Windows' calling conventions, 32-bit GCC builds
|
||||
of AviSynth+ are not compatible with typical 32-bit builds of FFmpeg.
|
||||
|
||||
By default, FFmpeg assumes compatibility with 32-bit MSVC builds of
|
||||
AviSynth+ since that is the most widely-used and entrenched build
|
||||
configuration. Users can override this and enable support for 32-bit
|
||||
GCC builds of AviSynth+ by passing @code{-DAVSC_WIN32_GCC32} to
|
||||
@code{--extra-cflags} when configuring FFmpeg.
|
||||
|
||||
64-bit builds of FFmpeg are not affected, and can use either MSVC or
|
||||
GCC builds of AviSynth+ without any special flags.
|
||||
@end float
|
||||
|
||||
@float NOTE
|
||||
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
|
||||
with @code{--enable-avisynth}, and the binaries will work regardless of the
|
||||
end user having AviSynth or AvxSynth installed - they'll only need to be
|
||||
installed to use AviSynth scripts (obviously).
|
||||
@end float
|
||||
|
||||
@section Chromaprint
|
||||
|
||||
FFmpeg can make use of the Chromaprint library for generating audio fingerprints.
|
||||
Pass @code{--enable-chromaprint} to configure to
|
||||
enable it. See @url{https://acoustid.org/chromaprint}.
|
||||
|
||||
@section codec2
|
||||
|
||||
FFmpeg can make use of the codec2 library for codec2 decoding and encoding.
|
||||
There is currently no native decoder, so libcodec2 must be used for decoding.
|
||||
|
||||
Go to @url{http://freedv.org/}, download "Codec 2 source archive".
|
||||
Build and install using CMake. Debian users can install the libcodec2-dev package instead.
|
||||
Once libcodec2 is installed you can pass @code{--enable-libcodec2} to configure to enable it.
|
||||
|
||||
The easiest way to use codec2 is with .c2 files, since they contain the mode information required for decoding.
|
||||
To encode such a file, use a .c2 file extension and give the libcodec2 encoder the -mode option:
|
||||
@code{ffmpeg -i input.wav -mode 700C output.c2}.
|
||||
Playback is as simple as @code{ffplay output.c2}.
|
||||
For a list of supported modes, run @code{ffmpeg -h encoder=libcodec2}.
|
||||
Raw codec2 files are also supported.
|
||||
To make sense of them the mode in use needs to be specified as a format option:
|
||||
@code{ffmpeg -f codec2raw -mode 1300 -i input.raw output.wav}.
|
||||
|
||||
@section dav1d
|
||||
|
||||
FFmpeg can make use of the dav1d library for AV1 video decoding.
|
||||
|
||||
Go to @url{https://code.videolan.org/videolan/dav1d} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libdav1d} to configure to enable it.
|
||||
|
||||
@section davs2
|
||||
|
||||
FFmpeg can make use of the davs2 library for AVS2-P2/IEEE1857.4 video decoding.
|
||||
|
||||
Go to @url{https://github.com/pkuvcl/davs2} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libdavs2} to configure to
|
||||
enable it.
|
||||
|
||||
@float NOTE
|
||||
libdavs2 is under the GNU Public License Version 2 or later
|
||||
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section Game Music Emu
|
||||
|
||||
FFmpeg can make use of the Game Music Emu library to read audio from supported video game
|
||||
music file formats. Pass @code{--enable-libgme} to configure to
|
||||
enable it. See @url{https://bitbucket.org/mpyne/game-music-emu/overview}.
|
||||
|
||||
@section Intel QuickSync Video
|
||||
|
||||
FFmpeg can use Intel QuickSync Video (QSV) for accelerated decoding and encoding
|
||||
of multiple codecs. To use QSV, FFmpeg must be linked against the @code{libmfx}
|
||||
dispatcher, which loads the actual decoding libraries.
|
||||
|
||||
The dispatcher is open source and can be downloaded from
|
||||
@url{https://github.com/lu-zero/mfx_dispatch.git}. FFmpeg needs to be configured
|
||||
with the @code{--enable-libmfx} option and @code{pkg-config} needs to be able to
|
||||
locate the dispatcher's @code{.pc} files.
|
||||
|
||||
@section Kvazaar
|
||||
|
||||
FFmpeg can make use of the Kvazaar library for HEVC encoding.
|
||||
|
||||
Go to @url{https://github.com/ultravideo/kvazaar} and follow the
|
||||
instructions for installing the library. Then pass
|
||||
@code{--enable-libkvazaar} to configure to enable it.
|
||||
|
||||
@section LAME
|
||||
|
||||
FFmpeg can make use of the LAME library for MP3 encoding.
|
||||
|
||||
Go to @url{http://lame.sourceforge.net/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libmp3lame} to configure to enable it.
|
||||
|
||||
@section libilbc
|
||||
|
||||
iLBC is a narrowband speech codec that has been made freely available
|
||||
by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC decoding and encoding.
|
||||
|
||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
@section libvpx
|
||||
|
||||
FFmpeg can make use of the libvpx library for VP8/VP9 decoding and encoding.
|
||||
|
||||
Go to @url{http://www.webmproject.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libvpx} to configure to
|
||||
enable it.
|
||||
|
||||
@section ModPlug
|
||||
|
||||
FFmpeg can make use of this library, originating in Modplug-XMMS, to read from MOD-like music files.
|
||||
See @url{https://github.com/Konstanty/libmodplug}. Pass @code{--enable-libmodplug} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenCORE, VisualOn, and Fraunhofer libraries
|
||||
|
||||
@ -46,9 +189,10 @@ upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
||||
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
|
||||
order to use it.
|
||||
|
||||
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||
@code{--enable-nonfree} to configure to use it.
|
||||
The license of the Fraunhofer AAC library is incompatible with the GPL.
|
||||
Therefore, for GPL builds, you have to pass @code{--enable-nonfree} to
|
||||
configure in order to use it. To the best of our knowledge, it is
|
||||
compatible with the LGPL.
|
||||
@end float
|
||||
|
||||
@subsection OpenCORE AMR
|
||||
@ -71,73 +215,15 @@ Then pass @code{--enable-libvo-amrwbenc} to configure to enable it.
|
||||
|
||||
@subsection Fraunhofer AAC library
|
||||
|
||||
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
|
||||
FFmpeg can make use of the Fraunhofer AAC library for AAC decoding & encoding.
|
||||
|
||||
Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libfdk-aac} to configure to enable it.
|
||||
|
||||
@section LAME
|
||||
|
||||
FFmpeg can make use of the LAME library for MP3 encoding.
|
||||
|
||||
Go to @url{http://lame.sourceforge.net/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libmp3lame} to configure to enable it.
|
||||
|
||||
@section TwoLAME
|
||||
|
||||
FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
|
||||
Go to @url{http://www.twolame.org/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libtwolame} to configure to enable it.
|
||||
|
||||
@section libcodec2 / codec2 general
|
||||
|
||||
FFmpeg can make use of libcodec2 for codec2 encoding and decoding.
|
||||
There is currently no native decoder, so libcodec2 must be used for decoding.
|
||||
|
||||
Go to @url{http://freedv.org/}, download "Codec 2 source archive".
|
||||
Build and install using CMake. Debian users can install the libcodec2-dev package instead.
|
||||
Once libcodec2 is installed you can pass @code{--enable-libcodec2} to configure to enable it.
|
||||
|
||||
The easiest way to use codec2 is with .c2 files, since they contain the mode information required for decoding.
|
||||
To encode such a file, use a .c2 file extension and give the libcodec2 encoder the -mode option:
|
||||
@code{ffmpeg -i input.wav -mode 700C output.c2}.
|
||||
Playback is as simple as @code{ffplay output.c2}.
|
||||
For a list of supported modes, run @code{ffmpeg -h encoder=libcodec2}.
|
||||
Raw codec2 files are also supported.
|
||||
To make sense of them the mode in use needs to be specified as a format option:
|
||||
@code{ffmpeg -f codec2raw -mode 1300 -i input.raw output.wav}.
|
||||
|
||||
@section libvpx
|
||||
|
||||
FFmpeg can make use of the libvpx library for VP8/VP9 encoding.
|
||||
|
||||
Go to @url{http://www.webmproject.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libvpx} to configure to
|
||||
enable it.
|
||||
|
||||
@section libwavpack
|
||||
|
||||
FFmpeg can make use of the libwavpack library for WavPack encoding.
|
||||
|
||||
Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section libxavs
|
||||
|
||||
FFmpeg can make use of the libxavs library for Xavs encoding.
|
||||
|
||||
Go to @url{http://xavs.sf.net/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libxavs} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenH264
|
||||
|
||||
FFmpeg can make use of the OpenH264 library for H.264 encoding and decoding.
|
||||
FFmpeg can make use of the OpenH264 library for H.264 decoding and encoding.
|
||||
|
||||
Go to @url{http://www.openh264.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libopenh264} to configure to
|
||||
@ -150,6 +236,40 @@ constrained baseline profile and CABAC.) Using it is mostly useful for
|
||||
testing and for taking advantage of Cisco's patent portfolio license
|
||||
(@url{http://www.openh264.org/BINARY_LICENSE.txt}).
|
||||
|
||||
@section OpenJPEG
|
||||
|
||||
FFmpeg can use the OpenJPEG libraries for decoding/encoding J2K videos. Go to
|
||||
@url{http://www.openjpeg.org/} to get the libraries and follow the installation
|
||||
instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjpeg} to
|
||||
@file{./configure}.
|
||||
|
||||
@section TwoLAME
|
||||
|
||||
FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
|
||||
Go to @url{http://www.twolame.org/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libtwolame} to configure to enable it.
|
||||
|
||||
@section VapourSynth
|
||||
|
||||
FFmpeg can read VapourSynth scripts as input. To enable support, pass
|
||||
@code{--enable-vapoursynth} to configure. Vapoursynth is detected via
|
||||
@code{pkg-config}. Versions 42 or greater supported.
|
||||
See @url{http://www.vapoursynth.com/}.
|
||||
|
||||
Due to security concerns, Vapoursynth scripts will not
|
||||
be autodetected so the input format has to be forced. For ff* CLI tools,
|
||||
add @code{-f vapoursynth} before the input @code{-i yourscript.vpy}.
|
||||
|
||||
@section WavPack
|
||||
|
||||
FFmpeg can make use of the libwavpack library for WavPack encoding.
|
||||
|
||||
Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section x264
|
||||
|
||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
@ -178,92 +298,37 @@ x265 is under the GNU Public License Version 2 or later
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section kvazaar
|
||||
@section xavs
|
||||
|
||||
FFmpeg can make use of the kvazaar library for HEVC encoding.
|
||||
FFmpeg can make use of the xavs library for AVS encoding.
|
||||
|
||||
Go to @url{https://github.com/ultravideo/kvazaar} and follow the
|
||||
instructions for installing the library. Then pass
|
||||
@code{--enable-libkvazaar} to configure to enable it.
|
||||
|
||||
@section libilbc
|
||||
|
||||
iLBC is a narrowband speech codec that has been made freely available
|
||||
by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
Go to @url{http://xavs.sf.net/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libxavs} to configure to
|
||||
enable it.
|
||||
|
||||
@section libzvbi
|
||||
@section xavs2
|
||||
|
||||
libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB
|
||||
FFmpeg can make use of the xavs2 library for AVS2-P2/IEEE1857.4 video encoding.
|
||||
|
||||
Go to @url{https://github.com/pkuvcl/xavs2} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libxavs2} to configure to
|
||||
enable it.
|
||||
|
||||
@float NOTE
|
||||
libxavs2 is under the GNU Public License Version 2 or later
|
||||
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section ZVBI
|
||||
|
||||
ZVBI is a VBI decoding library which can be used by FFmpeg to decode DVB
|
||||
teletext pages and DVB teletext subtitles.
|
||||
|
||||
Go to @url{http://sourceforge.net/projects/zapping/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libzvbi} to configure to
|
||||
enable it.
|
||||
|
||||
@section AviSynth
|
||||
|
||||
FFmpeg can read AviSynth scripts as input. To enable support, pass
|
||||
@code{--enable-avisynth} to configure. The correct headers are
|
||||
included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
|
||||
For Windows, supported AviSynth variants are
|
||||
@url{http://avisynth.nl, AviSynth 2.6 RC1 or higher} for 32-bit builds and
|
||||
@url{http://avs-plus.net, AviSynth+ r1718 or higher} for 32-bit and 64-bit builds.
|
||||
|
||||
For Linux and OS X, the supported AviSynth variant is
|
||||
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||
|
||||
@float NOTE
|
||||
There is currently a regression in AviSynth+'s @code{capi.h} header as of
|
||||
October 2016, which interferes with the ability for builds of FFmpeg to use
|
||||
MSVC-built binaries of AviSynth. Until this is resolved, you can make sure
|
||||
a known good version is installed by checking out a version from before
|
||||
the regression occurred:
|
||||
|
||||
@code{git clone -b MT git://github.com/AviSynth/AviSynthPlus.git @*
|
||||
cd AviSynthPlus @*
|
||||
git checkout -b oldheader b4f292b4dbfad149697fb65c6a037bb3810813f9 @*
|
||||
make install PREFIX=/install/prefix}
|
||||
@end float
|
||||
|
||||
@float NOTE
|
||||
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
|
||||
with @code{--enable-avisynth}, and the binaries will work regardless of the
|
||||
end user having AviSynth or AvxSynth installed - they'll only need to be
|
||||
installed to use AviSynth scripts (obviously).
|
||||
@end float
|
||||
|
||||
@section Intel QuickSync Video
|
||||
|
||||
FFmpeg can use Intel QuickSync Video (QSV) for accelerated encoding and decoding
|
||||
of multiple codecs. To use QSV, FFmpeg must be linked against the @code{libmfx}
|
||||
dispatcher, which loads the actual decoding libraries.
|
||||
|
||||
The dispatcher is open source and can be downloaded from
|
||||
@url{https://github.com/lu-zero/mfx_dispatch.git}. FFmpeg needs to be configured
|
||||
with the @code{--enable-libmfx} option and @code{pkg-config} needs to be able to
|
||||
locate the dispatcher's @code{.pc} files.
|
||||
|
||||
@section AMD VCE
|
||||
|
||||
FFmpeg can use the AMD Advanced Media Framework library for accelerated H.264
|
||||
and HEVC encoding on VCE enabled hardware under Windows.
|
||||
|
||||
To enable support you must obtain the AMF framework header files from
|
||||
@url{https://github.com/GPUOpen-LibrariesAndSDKs/AMF.git}.
|
||||
|
||||
Create an @code{AMF/} directory in the system include path.
|
||||
Copy the contents of @code{AMF/amf/public/include/} into that directory.
|
||||
Then configure FFmpeg with @code{--enable-amf}.
|
||||
|
||||
|
||||
@chapter Supported File Formats, Codecs or Features
|
||||
|
||||
You can use the @code{-formats} and @code{-codecs} options to have an exhaustive list.
|
||||
@ -403,6 +468,8 @@ library:
|
||||
@item IEC61937 encapsulation @tab X @tab X
|
||||
@item IFF @tab @tab X
|
||||
@tab Interchange File Format
|
||||
@item IFV @tab @tab X
|
||||
@tab A format used by some old CCTV DVRs.
|
||||
@item iLBC @tab X @tab X
|
||||
@item Interplay MVE @tab @tab X
|
||||
@tab Format used in various Interplay computer games.
|
||||
@ -516,6 +583,7 @@ library:
|
||||
@item raw VC-1 @tab X @tab X
|
||||
@item raw PCM A-law @tab X @tab X
|
||||
@item raw PCM mu-law @tab X @tab X
|
||||
@item raw PCM Archimedes VIDC @tab X @tab X
|
||||
@item raw PCM signed 8 bit @tab X @tab X
|
||||
@item raw PCM signed 16 bit big-endian @tab X @tab X
|
||||
@item raw PCM signed 16 bit little-endian @tab X @tab X
|
||||
@ -559,6 +627,7 @@ library:
|
||||
@item SAP @tab X @tab X
|
||||
@item SBG @tab @tab X
|
||||
@item SDP @tab @tab X
|
||||
@item SER @tab @tab X
|
||||
@item Sega FILM/CPK @tab X @tab X
|
||||
@tab Used in many Sega Saturn console games.
|
||||
@item Silicon Graphics Movie @tab @tab X
|
||||
@ -619,7 +688,7 @@ library:
|
||||
@item Psygnosis YOP @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@code{X} means that the feature in that column (encoding / decoding) is supported.
|
||||
|
||||
@section Image Formats
|
||||
|
||||
@ -689,7 +758,7 @@ following image formats are supported:
|
||||
@tab X Window Dump image format
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@code{X} means that the feature in that column (encoding / decoding) is supported.
|
||||
|
||||
@code{E} means that support is provided through an external library.
|
||||
|
||||
@ -727,8 +796,8 @@ following image formats are supported:
|
||||
@item Autodesk Animator Flic video @tab @tab X
|
||||
@item Autodesk RLE @tab @tab X
|
||||
@tab fourcc: AASC
|
||||
@item AV1 @tab @tab E
|
||||
@tab Supported through external library libaom
|
||||
@item AV1 @tab E @tab E
|
||||
@tab Supported through external libraries libaom and libdav1d
|
||||
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
||||
@tab fourcc: AVrp
|
||||
@item AVS (Audio Video Standard) video @tab @tab X
|
||||
@ -802,6 +871,7 @@ following image formats are supported:
|
||||
@tab fourcc: G2M2, G2M3
|
||||
@item Go2Webinar @tab @tab X
|
||||
@tab fourcc: G2M4
|
||||
@item Gremlin Digital Video @tab @tab X
|
||||
@item H.261 @tab X @tab X
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||
@ -822,6 +892,7 @@ following image formats are supported:
|
||||
@tab IFF interleaved bitmap
|
||||
@item IFF ByteRun1 @tab @tab X
|
||||
@tab IFF run length encoded bitmap
|
||||
@item Infinity IMM4 @tab @tab X
|
||||
@item Intel H.263 @tab @tab X
|
||||
@item Intel Indeo 2 @tab @tab X
|
||||
@item Intel Indeo 3 @tab @tab X
|
||||
@ -875,6 +946,8 @@ following image formats are supported:
|
||||
@tab Video encoding used in NuppelVideo files.
|
||||
@item On2 VP3 @tab @tab X
|
||||
@tab still experimental
|
||||
@item On2 VP4 @tab @tab X
|
||||
@tab fourcc: VP40
|
||||
@item On2 VP5 @tab @tab X
|
||||
@tab fourcc: VP50
|
||||
@item On2 VP6 @tab @tab X
|
||||
@ -969,7 +1042,7 @@ following image formats are supported:
|
||||
@tab Encoder works only in PAL8.
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@code{X} means that the feature in that column (encoding / decoding) is supported.
|
||||
|
||||
@code{E} means that support is provided through an external library.
|
||||
|
||||
@ -1048,6 +1121,7 @@ following image formats are supported:
|
||||
@item ATRAC1 @tab @tab X
|
||||
@item ATRAC3 @tab @tab X
|
||||
@item ATRAC3+ @tab @tab X
|
||||
@item ATRAC9 @tab @tab X
|
||||
@item Bink Audio @tab @tab X
|
||||
@tab Used in Bink and Smacker files in many games.
|
||||
@item CELT @tab @tab E
|
||||
@ -1074,10 +1148,10 @@ following image formats are supported:
|
||||
@item DPCM Sol @tab @tab X
|
||||
@item DPCM Xan @tab @tab X
|
||||
@tab Used in Origin's Wing Commander IV AVI files.
|
||||
@item DSD (Direct Stream Digitial), least significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), most significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), least significant bit first, planar @tab @tab X
|
||||
@item DSD (Direct Stream Digitial), most significant bit first, planar @tab @tab X
|
||||
@item DSD (Direct Stream Digital), least significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digital), most significant bit first @tab @tab X
|
||||
@item DSD (Direct Stream Digital), least significant bit first, planar @tab @tab X
|
||||
@item DSD (Direct Stream Digital), most significant bit first, planar @tab @tab X
|
||||
@item DSP Group TrueSpeech @tab @tab X
|
||||
@item DST (Direct Stream Transfer) @tab @tab X
|
||||
@item DV audio @tab @tab X
|
||||
@ -1114,6 +1188,7 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@item PCM mu-law @tab X @tab X
|
||||
@item PCM Archimedes VIDC @tab X @tab X
|
||||
@item PCM signed 8-bit planar @tab X @tab X
|
||||
@item PCM signed 16-bit big-endian planar @tab X @tab X
|
||||
@item PCM signed 16-bit little-endian planar @tab X @tab X
|
||||
@ -1186,7 +1261,7 @@ following image formats are supported:
|
||||
@item Xbox Media Audio 2 @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@code{X} means that the feature in that column (encoding / decoding) is supported.
|
||||
|
||||
@code{E} means that support is provided through an external library.
|
||||
|
||||
|
231
doc/indevs.texi
231
doc/indevs.texi
@ -178,6 +178,9 @@ Capture the mouse pointer. Default is 0.
|
||||
@item -capture_mouse_clicks
|
||||
Capture the screen mouse clicks. Default is 0.
|
||||
|
||||
@item -capture_raw_data
|
||||
Capture the raw device data. Default is 0.
|
||||
Using this option may result in receiving the underlying data delivered to the AVFoundation framework. E.g. for muxed devices that sends raw DV data to the framework (like tape-based camcorders), setting this option to false results in extracted video frames captured in the designated pixel format only. Setting this option to true results in receiving the raw DV stream untouched.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@ -208,6 +211,13 @@ Record video from the system default video device using the pixel format bgr0 an
|
||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record raw DV data from a suitable input device and write the output into out.dv:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -capture_raw_data true -i "zr100:none" out.dv
|
||||
@end example
|
||||
|
||||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
@ -267,7 +277,8 @@ audio track.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sources}
|
||||
option of ffmpeg to list the available input devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@ -326,6 +337,12 @@ Defaults to @samp{2}.
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item timecode_format
|
||||
Timecode type to include in the frame and video stream metadata. Must be
|
||||
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
||||
@samp{rp188any}, @samp{vitc}, @samp{vitc2}, or @samp{serial}. Defaults to
|
||||
@samp{none} (not included).
|
||||
|
||||
@item video_input
|
||||
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
|
||||
@samp{optical_sdi}, @samp{component}, @samp{composite} or @samp{s_video}.
|
||||
@ -364,6 +381,20 @@ If set to @option{true}, timestamps are forwarded as they are without removing
|
||||
the initial offset.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item timestamp_align
|
||||
Capture start time alignment in seconds. If set to nonzero, input frames are
|
||||
dropped till the system timestamp aligns with configured value.
|
||||
Alignment difference of up to one frame duration is tolerated.
|
||||
This is useful for maintaining input synchronization across N different
|
||||
hardware devices deployed for 'N-way' redundancy. The system time of different
|
||||
hardware devices should be synchronized with protocols such as NTP or PTP,
|
||||
before using this option.
|
||||
Note that this method is not foolproof. In some border cases input
|
||||
synchronization may not happen due to thread scheduling jitters in the OS.
|
||||
Either sync could go wrong by 1 frame or in a rarer case
|
||||
@option{timestamp_align} seconds.
|
||||
Defaults to @samp{0}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@ -402,116 +433,6 @@ ffmpeg -channels 16 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder'
|
||||
|
||||
@end itemize
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek input device provides capture capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Input filename is a NDI source name that could be found by sending -find_sources 1
|
||||
to command line - it has no specific syntax but human-readable formatted.
|
||||
|
||||
To enable this input device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item find_sources
|
||||
If set to @option{true}, print a list of found/available NDI sources and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item wait_sources
|
||||
Override time to wait until the number of online sources have changed.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item allow_video_fields
|
||||
When this flag is @option{false}, all video that you receive will be progressive.
|
||||
Defaults to @option{true}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -find_sources 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
Restream to NDI:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -i "DEV-5.INTERNAL.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@ -876,7 +797,7 @@ ffplay -f iec61883 -i auto
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
ffmpeg -f iec61883 -i auto -dvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@ -939,6 +860,76 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
To capture only part of a plane the output can be cropped - this can be used to capture
|
||||
a single window, as long as it has a known absolute position and size. For example, to
|
||||
capture and encode the middle quarter of a 1920x1080 plane:
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,crop=960:540:480:270,scale_vaapi=960:540:nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section lavfi
|
||||
|
||||
Libavfilter input virtual device.
|
||||
@ -1077,6 +1068,21 @@ IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
Requires the configure option @code{--enable-libdc1394}.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is @code{ntsc}, corresponding to a frame
|
||||
rate of @code{30000/1001}.
|
||||
|
||||
@item pixel_format
|
||||
Select the pixel format. Default is @code{uyvy422}.
|
||||
|
||||
@item video_size
|
||||
Set the video size given as a string such as @code{640x480} or @code{hd720}.
|
||||
Default is @code{qvga}.
|
||||
@end table
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
@ -1195,7 +1201,6 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
|
||||
@section pulse
|
||||
|
||||
PulseAudio input device.
|
||||
|
@ -95,17 +95,16 @@ Stuff that didn't reach the codebase:
|
||||
- 0cef06df0 checkasm: add HEVC MC tests
|
||||
- e7078e842 hevcdsp: add x86 SIMD for MC
|
||||
- 7993ec19a hevc: Add hevc_get_pixel_4/8/12/16/24/32/48/64
|
||||
- new bitstream reader (see http://ffmpeg.org/pipermail/ffmpeg-devel/2017-April/209609.html)
|
||||
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
|
||||
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
|
||||
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
|
||||
- Support recovery from an already present HLS playlist (see 16cb06bb30)
|
||||
- Remove all output devices (see 8e7e042d41, 8d3db95f20, 6ce13070bd, d46cd24986 and https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/216904.html)
|
||||
- avcodec/libaomenc: export the Sequence Header OBU as extradata (See a024c3ce9a)
|
||||
|
||||
Collateral damage that needs work locally:
|
||||
------------------------------------------
|
||||
|
||||
- Merge proresdec2.c and proresdec_lgpl.c
|
||||
- Merge proresenc_anatoliy.c and proresenc_kostya.c
|
||||
- Fix MIPS AC3 downmix
|
||||
|
||||
|
@ -47,7 +47,8 @@ We cannot provide help for scripts and/or third-party tools.
|
||||
@anchor{How do I ask a question or send a message to a mailing list?}
|
||||
@section How do I ask a question or send a message to a mailing list?
|
||||
|
||||
All you have to do is send an email:
|
||||
First you must @ref{How do I subscribe?, subscribe}. Then all you have to do is
|
||||
send an email:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@ -57,49 +58,14 @@ ffmpeg-user mailing list.
|
||||
@item
|
||||
Email @email{libav-user@@ffmpeg.org} to send a message to the
|
||||
libav-user mailing list.
|
||||
|
||||
@item
|
||||
Email @email{ffmpeg-devel@@ffmpeg.org} to send a message to the
|
||||
ffmpeg-devel mailing list.
|
||||
@end itemize
|
||||
|
||||
If you are not subscribed to the mailing list then your question must be
|
||||
manually approved. Approval may take several days, but the wait is
|
||||
usually less. If you want the message to be sent with no delay then you
|
||||
must subscribe first. See @ref{How do I subscribe?}
|
||||
|
||||
Please do not send a message, subscribe, and re-send the message: this
|
||||
results in duplicates, causes more work for the admins, and may lower
|
||||
your chance at getting an answer. However, you may do so if you first
|
||||
@ref{How do I delete my message in the moderation queue?, delete your original message from the moderation queue}.
|
||||
|
||||
@chapter Subscribing / Unsubscribing
|
||||
|
||||
@section What does subscribing do?
|
||||
|
||||
Subscribing allows two things:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Your messages will show up in the mailing list without waiting in the
|
||||
moderation queue and needing to be manually approved by a mailing list
|
||||
admin.
|
||||
|
||||
@item
|
||||
You will receive all messages to the mailing list including replies to
|
||||
your messages. Non-subscribed users do not receive any messages.
|
||||
@end itemize
|
||||
|
||||
@section Do I need to subscribe?
|
||||
|
||||
No. You can still send a message to the mailing list without
|
||||
subscribing. See @ref{How do I ask a question or send a message to a mailing list?}
|
||||
|
||||
However, your message will need to be manually approved by a mailing
|
||||
list admin, and you will not receive any mailing list messages or
|
||||
replies.
|
||||
|
||||
You can ask to be CCd in your message, but replying users will
|
||||
sometimes forget to do so.
|
||||
|
||||
You may also view and reply to messages via the @ref{Where are the archives?, archives}.
|
||||
|
||||
@anchor{How do I subscribe?}
|
||||
@section How do I subscribe?
|
||||
|
||||
@ -124,6 +90,9 @@ The process is the same for the other mailing lists.
|
||||
Please avoid asking a mailing list admin to unsubscribe you unless you
|
||||
are absolutely unable to do so by yourself. See @ref{Who do I contact if I have a problem with the mailing list?}
|
||||
|
||||
Note that it is possible to temporarily halt message delivery (vacation mode).
|
||||
See @ref{How do I disable mail delivery without unsubscribing?}
|
||||
|
||||
@chapter Moderation Queue
|
||||
@anchor{Why is my message awaiting moderator approval?}
|
||||
@section Why is my message awaiting moderator approval?
|
||||
@ -134,8 +103,6 @@ must be manually approved by a mailing list admin:
|
||||
These are:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Messages from users who are @strong{not} subscribed.
|
||||
|
||||
@item
|
||||
Messages that exceed the @ref{What is the message size limit?, message size limit}.
|
||||
@ -148,13 +115,13 @@ or is abusive towards others).
|
||||
|
||||
@section How long does it take for my message in the moderation queue to be approved?
|
||||
|
||||
The queue is usually checked once or twice a day, but on occasion
|
||||
several days may pass before someone checks the queue.
|
||||
The queue is not checked on a regular basis. You can ask on the
|
||||
@t{#ffmpeg-devel} IRC channel on Freenode for someone to approve your message.
|
||||
|
||||
@anchor{How do I delete my message in the moderation queue?}
|
||||
@section How do I delete my message in the moderation queue?
|
||||
|
||||
You should have received an email with the subject @emph{Your message to ffmpeg-user awaits moderator approval}.
|
||||
You should have received an email with the subject @emph{Your message to <mailing list name> awaits moderator approval}.
|
||||
A link is in the message that will allow you to delete your message
|
||||
unless a mailing list admin already approved or rejected it.
|
||||
|
||||
@ -175,6 +142,9 @@ Click the email link at the top of the message just under the subject
|
||||
title. The link will provide the proper headers to keep the message
|
||||
within the thread.
|
||||
|
||||
Note that you must be subscribed to send a message to the ffmpeg-user or
|
||||
libav-user mailing lists.
|
||||
|
||||
@section How do I search the archives?
|
||||
|
||||
Perform a site search using your favorite search engine. Example:
|
||||
@ -187,11 +157,12 @@ Perform a site search using your favorite search engine. Example:
|
||||
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||
|
||||
Some users prefer the third-party Nabble interface which presents the
|
||||
mailing lists in a typical forum layout.
|
||||
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
||||
interface which presents the mailing lists in a typical forum layout.
|
||||
|
||||
There are also numerous third-party help sites such as Super User and
|
||||
r/ffmpeg on reddit.
|
||||
There are also numerous third-party help sites such as
|
||||
@url{https://superuser.com/tags/ffmpeg, Super User} and
|
||||
@url{https://www.reddit.com/r/ffmpeg/, r/ffmpeg on reddit}.
|
||||
|
||||
@anchor{What is top-posting?}
|
||||
@section What is top-posting?
|
||||
@ -203,16 +174,15 @@ Instead, use trimmed interleaved/inline replies (@url{https://lists.ffmpeg.org/p
|
||||
@anchor{What is the message size limit?}
|
||||
@section What is the message size limit?
|
||||
|
||||
The message size limit is 500 kilobytes for the user lists and 1000
|
||||
kilobytes for ffmpeg-devel. Please provide links to larger files instead
|
||||
of attaching them.
|
||||
The message size limit is 1000 kilobytes. Please provide links to larger files
|
||||
instead of attaching them.
|
||||
|
||||
@section Where can I upload sample files?
|
||||
|
||||
Anywhere that is not too annoying for us to use.
|
||||
|
||||
Google Drive and Dropbox are acceptable if you need a file host, and
|
||||
0x0.st is good for files under 256 MiB.
|
||||
@url{https://0x0.st/, 0x0.st} is good for files under 256 MiB.
|
||||
|
||||
Small, short samples are preferred if possible.
|
||||
|
||||
@ -259,6 +229,54 @@ or headers.
|
||||
|
||||
You can then filter the mailing list messages to their own folder.
|
||||
|
||||
@anchor{How do I disable mail delivery without unsubscribing?}
|
||||
@section How do I disable mail delivery without unsubscribing?
|
||||
|
||||
Sometimes you may want to temporarily stop receiving all mailing list
|
||||
messages. This "vacation mode" is simple to do:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Go to the @url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user mailing list info page}
|
||||
|
||||
@item
|
||||
Enter your email address in the box at very bottom of the page and click the
|
||||
@emph{Unsubscribe or edit options} box.
|
||||
|
||||
@item
|
||||
Enter your password and click the @emph{Log in} button.
|
||||
|
||||
@item
|
||||
Look for the @emph{Mail delivery} option. Here you can disable/enable mail
|
||||
delivery. If you check @emph{Set globally} it will apply your choice to all
|
||||
other FFmpeg mailing lists you are subscribed to.
|
||||
@end enumerate
|
||||
|
||||
Alternatively, from your subscribed address, send a message to @email{ffmpeg-user-request@@ffmpeg.org}
|
||||
with the subject @emph{set delivery off}. To re-enable mail delivery send a
|
||||
message to @email{ffmpeg-user-request@@ffmpeg.org} with the subject
|
||||
@emph{set delivery on}.
|
||||
|
||||
@anchor{Why is the mailing list munging my address?}
|
||||
@section Why is the mailing list munging my address?
|
||||
|
||||
This is due to subscribers that use an email service with a DMARC reject policy
|
||||
which adds difficulties to mailing list operators.
|
||||
|
||||
The mailing list must re-write (munge) the @emph{From:} header for such users;
|
||||
otherwise their email service will reject and bounce the message resulting in
|
||||
automatic unsubscribing from the mailing list.
|
||||
|
||||
When sending a message these users will see @emph{via <mailing list name>}
|
||||
added to their name and the @emph{From:} address munged to the address of
|
||||
the particular mailing list.
|
||||
|
||||
If you want to avoid this then please use a different email service.
|
||||
|
||||
Note that ffmpeg-devel does not apply any munging as it causes issues with
|
||||
patch authorship. As a result users with an email service with a DMARC reject
|
||||
policy may be automatically unsubscribed due to rejected and bounced messages.
|
||||
|
||||
@chapter Rules and Etiquette
|
||||
|
||||
@section What are the rules and the proper etiquette?
|
||||
@ -357,6 +375,15 @@ form a multi-part message is recommended by email standards.
|
||||
Check your spam folder.
|
||||
@end itemize
|
||||
|
||||
@anchor{Why do I keep getting unsubscribed from ffmpeg-devel?}
|
||||
@section Why do I keep getting unsubscribed from ffmpeg-devel?
|
||||
|
||||
Users with an email service that has a DMARC reject or quarantine policy may be
|
||||
automatically unsubscribed from the ffmpeg-devel mailing list due to the mailing
|
||||
list messages being continuously rejected and bounced back.
|
||||
|
||||
Consider using a different email service.
|
||||
|
||||
@anchor{Who do I contact if I have a problem with the mailing list?}
|
||||
@section Who do I contact if I have a problem with the mailing list?
|
||||
|
||||
|
@ -33,7 +33,7 @@ At the beginning of a chapter section there may be an optional timebase to be
|
||||
used for start/end values. It must be in form
|
||||
@samp{TIMEBASE=@var{num}/@var{den}}, where @var{num} and @var{den} are
|
||||
integers. If the timebase is missing then start/end times are assumed to
|
||||
be in milliseconds.
|
||||
be in nanoseconds.
|
||||
|
||||
Next a chapter section must contain chapter start and end times in form
|
||||
@samp{START=@var{num}}, @samp{END=@var{num}}, where @var{num} is a positive
|
||||
|
332
doc/muxers.texi
332
doc/muxers.texi
@ -94,21 +94,23 @@ compatibility with software that only supports a single audio stream in AVI
|
||||
@anchor{chromaprint}
|
||||
@section chromaprint
|
||||
|
||||
Chromaprint fingerprinter
|
||||
Chromaprint fingerprinter.
|
||||
|
||||
This muxer feeds audio data to the Chromaprint library, which generates
|
||||
a fingerprint for the provided audio data. It takes a single signed
|
||||
native-endian 16-bit raw audio stream.
|
||||
This muxer feeds audio data to the Chromaprint library,
|
||||
which generates a fingerprint for the provided audio data. See @url{https://acoustid.org/chromaprint}
|
||||
|
||||
It takes a single signed native-endian 16-bit raw audio stream of at most 2 channels.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item silence_threshold
|
||||
Threshold for detecting silence, ranges from 0 to 32767. -1 for default
|
||||
(required for use with the AcoustID service).
|
||||
Threshold for detecting silence, ranges from -1 to 32767. -1 disables silence detection and
|
||||
is required for use with the AcoustID service. Default is -1.
|
||||
|
||||
@item algorithm
|
||||
Algorithm index to fingerprint with.
|
||||
Version of algorithm to fingerprint with. Range is 0 to 4. Version 2 requires that silence
|
||||
detection be enabled. Default is 1.
|
||||
|
||||
@item fp_format
|
||||
Format to output the fingerprint as. Accepts the following options:
|
||||
@ -120,7 +122,7 @@ Binary raw fingerprint
|
||||
Binary compressed fingerprint
|
||||
|
||||
@item base64
|
||||
Base64 compressed fingerprint
|
||||
Base64 compressed fingerprint @emph{(default)}
|
||||
|
||||
@end table
|
||||
|
||||
@ -214,58 +216,113 @@ It creates a MPD manifest file and segment files for each stream.
|
||||
The segment filename might contain pre-defined identifiers used with SegmentTemplate
|
||||
as defined in section 5.3.9.4.4 of the standard. Available identifiers are "$RepresentationID$",
|
||||
"$Number$", "$Bandwidth$" and "$Time$".
|
||||
In addition to the standard identifiers, an ffmpeg-specific "$ext$" identifier is also supported.
|
||||
When specified ffmpeg will replace $ext$ in the file name with muxing format's extensions such as mp4, webm etc.,
|
||||
|
||||
@example
|
||||
ffmpeg -re -i <input> -map 0 -map 0 -c:a libfdk_aac -c:v libx264
|
||||
-b:v:0 800k -b:v:1 300k -s:v:1 320x170 -profile:v:1 baseline
|
||||
-profile:v:0 main -bf 1 -keyint_min 120 -g 120 -sc_threshold 0
|
||||
-b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1
|
||||
-window_size 5 -adaptation_sets "id=0,streams=v id=1,streams=a"
|
||||
ffmpeg -re -i <input> -map 0 -map 0 -c:a libfdk_aac -c:v libx264 \
|
||||
-b:v:0 800k -b:v:1 300k -s:v:1 320x170 -profile:v:1 baseline \
|
||||
-profile:v:0 main -bf 1 -keyint_min 120 -g 120 -sc_threshold 0 \
|
||||
-b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1 \
|
||||
-window_size 5 -adaptation_sets "id=0,streams=v id=1,streams=a" \
|
||||
-f dash /path/to/out.mpd
|
||||
@end example
|
||||
|
||||
@table @option
|
||||
@item -min_seg_duration @var{microseconds}
|
||||
Set the segment length in microseconds.
|
||||
@item -window_size @var{size}
|
||||
@item min_seg_duration @var{microseconds}
|
||||
This is a deprecated option to set the segment length in microseconds, use @var{seg_duration} instead.
|
||||
@item seg_duration @var{duration}
|
||||
Set the segment length in seconds (fractional value can be set). The value is
|
||||
treated as average segment duration when @var{use_template} is enabled and
|
||||
@var{use_timeline} is disabled and as minimum segment duration for all the other
|
||||
use cases.
|
||||
@item window_size @var{size}
|
||||
Set the maximum number of segments kept in the manifest.
|
||||
@item -extra_window_size @var{size}
|
||||
@item extra_window_size @var{size}
|
||||
Set the maximum number of segments kept outside of the manifest before removing from disk.
|
||||
@item -remove_at_exit @var{remove}
|
||||
@item remove_at_exit @var{remove}
|
||||
Enable (1) or disable (0) removal of all segments when finished.
|
||||
@item -use_template @var{template}
|
||||
@item use_template @var{template}
|
||||
Enable (1) or disable (0) use of SegmentTemplate instead of SegmentList.
|
||||
@item -use_timeline @var{timeline}
|
||||
@item use_timeline @var{timeline}
|
||||
Enable (1) or disable (0) use of SegmentTimeline in SegmentTemplate.
|
||||
@item -single_file @var{single_file}
|
||||
@item single_file @var{single_file}
|
||||
Enable (1) or disable (0) storing all segments in one file, accessed using byte ranges.
|
||||
@item -single_file_name @var{file_name}
|
||||
DASH-templated name to be used for baseURL. Implies @var{single_file} set to "1".
|
||||
@item -init_seg_name @var{init_name}
|
||||
DASH-templated name to used for the initialization segment. Default is "init-stream$RepresentationID$.m4s"
|
||||
@item -media_seg_name @var{segment_name}
|
||||
DASH-templated name to used for the media segments. Default is "chunk-stream$RepresentationID$-$Number%05d$.m4s"
|
||||
@item -utc_timing_url @var{utc_url}
|
||||
@item single_file_name @var{file_name}
|
||||
DASH-templated name to be used for baseURL. Implies @var{single_file} set to "1". In the template, "$ext$" is replaced with the file name extension specific for the segment format.
|
||||
@item init_seg_name @var{init_name}
|
||||
DASH-templated name to used for the initialization segment. Default is "init-stream$RepresentationID$.$ext$". "$ext$" is replaced with the file name extension specific for the segment format.
|
||||
@item media_seg_name @var{segment_name}
|
||||
DASH-templated name to used for the media segments. Default is "chunk-stream$RepresentationID$-$Number%05d$.$ext$". "$ext$" is replaced with the file name extension specific for the segment format.
|
||||
@item utc_timing_url @var{utc_url}
|
||||
URL of the page that will return the UTC timestamp in ISO format. Example: "https://time.akamai.com/?iso"
|
||||
@item -http_user_agent @var{user_agent}
|
||||
@item method @var{method}
|
||||
Use the given HTTP method to create output files. Generally set to PUT or POST.
|
||||
@item http_user_agent @var{user_agent}
|
||||
Override User-Agent field in HTTP header. Applicable only for HTTP output.
|
||||
@item -http_persistent @var{http_persistent}
|
||||
@item http_persistent @var{http_persistent}
|
||||
Use persistent HTTP connections. Applicable only for HTTP output.
|
||||
@item -hls_playlist @var{hls_playlist}
|
||||
@item hls_playlist @var{hls_playlist}
|
||||
Generate HLS playlist files as well. The master playlist is generated with the filename master.m3u8.
|
||||
One media playlist file is generated for each stream with filenames media_0.m3u8, media_1.m3u8, etc.
|
||||
@item -streaming @var{streaming}
|
||||
@item streaming @var{streaming}
|
||||
Enable (1) or disable (0) chunk streaming mode of output. In chunk streaming
|
||||
mode, each frame will be a moof fragment which forms a chunk.
|
||||
@item -adaptation_sets @var{adaptation_sets}
|
||||
@item adaptation_sets @var{adaptation_sets}
|
||||
Assign streams to AdaptationSets. Syntax is "id=x,streams=a,b,c id=y,streams=d,e" with x and y being the IDs
|
||||
of the adaptation sets and a,b,c,d and e are the indices of the mapped streams.
|
||||
|
||||
To map all video (or audio) streams to an AdaptationSet, "v" (or "a") can be used as stream identifier instead of IDs.
|
||||
|
||||
When no assignment is defined, this defaults to an AdaptationSet for each stream.
|
||||
@item -timeout @var{timeout}
|
||||
@item timeout @var{timeout}
|
||||
Set timeout for socket I/O operations. Applicable only for HTTP output.
|
||||
@item index_correction @var{index_correction}
|
||||
Enable (1) or Disable (0) segment index correction logic. Applicable only when
|
||||
@var{use_template} is enabled and @var{use_timeline} is disabled.
|
||||
|
||||
When enabled, the logic monitors the flow of segment indexes. If a streams's
|
||||
segment index value is not at the expected real time position, then the logic
|
||||
corrects that index value.
|
||||
|
||||
Typically this logic is needed in live streaming use cases. The network bandwidth
|
||||
fluctuations are common during long run streaming. Each fluctuation can cause
|
||||
the segment indexes fall behind the expected real time position.
|
||||
@item format_options @var{options_list}
|
||||
Set container format (mp4/webm) options using a @code{:} separated list of
|
||||
key=value parameters. Values containing @code{:} special characters must be
|
||||
escaped.
|
||||
|
||||
@item global_sidx @var{global_sidx}
|
||||
Write global SIDX atom. Applicable only for single file, mp4 output, non-streaming mode.
|
||||
|
||||
@item dash_segment_type @var{dash_segment_type}
|
||||
Possible values:
|
||||
@table @option
|
||||
@item auto
|
||||
If this flag is set, the dash segment files format will be selected based on the stream codec. This is the default mode.
|
||||
|
||||
@item mp4
|
||||
If this flag is set, the dash segment files will be in in ISOBMFF format.
|
||||
|
||||
@item webm
|
||||
If this flag is set, the dash segment files will be in in WebM format.
|
||||
@end table
|
||||
|
||||
@item ignore_io_errors @var{ignore_io_errors}
|
||||
Ignore IO errors during open and write. Useful for long-duration runs with network output.
|
||||
|
||||
@item lhls @var{lhls}
|
||||
Enable Low-latency HLS(LHLS). Adds #EXT-X-PREFETCH tag with current segment's URI.
|
||||
Apple doesn't have an official spec for LHLS. Meanwhile hls.js player folks are
|
||||
trying to standardize a open LHLS spec. The draft spec is available in https://github.com/video-dev/hlsjs-rfcs/blob/lhls-spec/proposals/0001-lhls.md
|
||||
This option will also try to comply with the above open spec, till Apple's spec officially supports it.
|
||||
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
|
||||
This is an experimental feature.
|
||||
|
||||
@item master_m3u8_publish_rate @var{master_m3u8_publish_rate}
|
||||
Publish master playlist repeatedly every after specified number of segment intervals.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{framecrc}
|
||||
@ -586,7 +643,7 @@ This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
but only the file name part without any path info will be contained in the m3u8 segment list.
|
||||
Should a relative path be specified, the path of the created segment
|
||||
files will be relative to the current working directory.
|
||||
When use_localtime_mkdir is set, the whole expanded value of @var{filename} will be written into the m3u8 segment list.
|
||||
When strftime_mkdir is set, the whole expanded value of @var{filename} will be written into the m3u8 segment list.
|
||||
|
||||
When @code{var_stream_map} is set with two or more variant streams, the
|
||||
@var{filename} pattern must contain the string "%v", this string specifies
|
||||
@ -601,7 +658,8 @@ This example will produce the playlists segment file sets:
|
||||
@file{file_1_000.ts}, @file{file_1_001.ts}, @file{file_1_002.ts}, etc.
|
||||
|
||||
The string "%v" may be present in the filename or in the last directory name
|
||||
containing the file. If the string is present in the directory name, then
|
||||
containing the file, but only in one of them. (Additionally, %v may appear multiple times in the last
|
||||
sub-directory or filename.) If the string %v is present in the directory name, then
|
||||
sub-directories are created after expanding the directory name pattern. This
|
||||
enables creation of segments corresponding to different variant streams in
|
||||
subdirectories.
|
||||
@ -615,34 +673,40 @@ This example will produce the playlists segment file sets:
|
||||
@file{vs1/file_000.ts}, @file{vs1/file_001.ts}, @file{vs1/file_002.ts}, etc.
|
||||
|
||||
@item use_localtime
|
||||
Same as strftime option, will be deprecated.
|
||||
|
||||
@item strftime
|
||||
Use strftime() on @var{filename} to expand the segment filename with localtime.
|
||||
The segment number is also available in this mode, but to use it, you need to specify second_level_segment_index
|
||||
hls_flag and %%d will be the specifier.
|
||||
@example
|
||||
ffmpeg -i in.nut -use_localtime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8
|
||||
ffmpeg -i in.nut -strftime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{file-20160215-1455569023.ts}, @file{file-20160215-1455569024.ts}, etc.
|
||||
Note: On some systems/environments, the @code{%s} specifier is not available. See
|
||||
@code{strftime()} documentation.
|
||||
@example
|
||||
ffmpeg -i in.nut -use_localtime 1 -hls_flags second_level_segment_index -hls_segment_filename 'file-%Y%m%d-%%04d.ts' out.m3u8
|
||||
ffmpeg -i in.nut -strftime 1 -hls_flags second_level_segment_index -hls_segment_filename 'file-%Y%m%d-%%04d.ts' out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{file-20160215-0001.ts}, @file{file-20160215-0002.ts}, etc.
|
||||
|
||||
@item use_localtime_mkdir
|
||||
Used together with -use_localtime, it will create all subdirectories which
|
||||
Same as strftime_mkdir option, will be deprecated .
|
||||
|
||||
@item strftime_mkdir
|
||||
Used together with -strftime_mkdir, it will create all subdirectories which
|
||||
is expanded in @var{filename}.
|
||||
@example
|
||||
ffmpeg -i in.nut -use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8
|
||||
ffmpeg -i in.nut -strftime 1 -strftime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8
|
||||
@end example
|
||||
This example will create a directory 201560215 (if it does not exist), and then
|
||||
produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{20160215/file-20160215-1455569023.ts}, @file{20160215/file-20160215-1455569024.ts}, etc.
|
||||
|
||||
@example
|
||||
ffmpeg -i in.nut -use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename '%Y/%m/%d/file-%Y%m%d-%s.ts' out.m3u8
|
||||
ffmpeg -i in.nut -strftime 1 -strftime_mkdir 1 -hls_segment_filename '%Y/%m/%d/file-%Y%m%d-%s.ts' out.m3u8
|
||||
@end example
|
||||
This example will create a directory hierarchy 2016/02/15 (if any of them do not exist), and then
|
||||
produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@ -727,17 +791,17 @@ Possible values:
|
||||
|
||||
@table @samp
|
||||
@item mpegts
|
||||
If this flag is set, the hls segment files will format to mpegts.
|
||||
the mpegts files is used in all hls versions.
|
||||
Output segment files in MPEG-2 Transport Stream format. This is
|
||||
compatible with all HLS versions.
|
||||
|
||||
@item fmp4
|
||||
If this flag is set, the hls segment files will format to fragment mp4 looks like dash.
|
||||
the fmp4 files is used in hls after version 7.
|
||||
Output segment files in fragmented MP4 format, similar to MPEG-DASH.
|
||||
fmp4 files may be used in HLS version 7 and above.
|
||||
|
||||
@end table
|
||||
|
||||
@item hls_fmp4_init_filename @var{filename}
|
||||
set filename to the fragment files header file, default filename is @file{init.mp4}.
|
||||
Set filename to the fragment files header file, default filename is @file{init.mp4}.
|
||||
|
||||
When @code{var_stream_map} is set with two or more variant streams, the
|
||||
@var{filename} pattern must contain the string "%v", this string specifies
|
||||
@ -791,6 +855,10 @@ including the file containing the AES encryption key.
|
||||
Add the @code{#EXT-X-INDEPENDENT-SEGMENTS} to playlists that has video segments
|
||||
and when all the segments of that playlist are guaranteed to start with a Key frame.
|
||||
|
||||
@item iframes_only
|
||||
Add the @code{#EXT-X-I-FRAMES-ONLY} to playlists that has video segments
|
||||
and can play only I-frames in the @code{#EXT-X-BYTERANGE} mode.
|
||||
|
||||
@item split_by_time
|
||||
Allow segments to start on frames other than keyframes. This improves
|
||||
behavior on some players when the time between keyframes is inconsistent,
|
||||
@ -802,24 +870,24 @@ Generate @code{EXT-X-PROGRAM-DATE-TIME} tags.
|
||||
|
||||
@item second_level_segment_index
|
||||
Makes it possible to use segment indexes as %%d in hls_segment_filename expression
|
||||
besides date/time values when use_localtime is on.
|
||||
besides date/time values when strftime is on.
|
||||
To get fixed width numbers with trailing zeroes, %%0xd format is available where x is the required width.
|
||||
|
||||
@item second_level_segment_size
|
||||
Makes it possible to use segment sizes (counted in bytes) as %%s in hls_segment_filename
|
||||
expression besides date/time values when use_localtime is on.
|
||||
expression besides date/time values when strftime is on.
|
||||
To get fixed width numbers with trailing zeroes, %%0xs format is available where x is the required width.
|
||||
|
||||
@item second_level_segment_duration
|
||||
Makes it possible to use segment duration (calculated in microseconds) as %%t in hls_segment_filename
|
||||
expression besides date/time values when use_localtime is on.
|
||||
expression besides date/time values when strftime is on.
|
||||
To get fixed width numbers with trailing zeroes, %%0xt format is available where x is the required width.
|
||||
|
||||
@example
|
||||
ffmpeg -i sample.mpeg \
|
||||
-f hls -hls_time 3 -hls_list_size 5 \
|
||||
-hls_flags second_level_segment_index+second_level_segment_size+second_level_segment_duration \
|
||||
-use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename "segment_%Y%m%d%H%M%S_%%04d_%%08s_%%013t.ts" stream.m3u8
|
||||
-strftime 1 -strftime_mkdir 1 -hls_segment_filename "segment_%Y%m%d%H%M%S_%%04d_%%08s_%%013t.ts" stream.m3u8
|
||||
@end example
|
||||
This will produce segments like this:
|
||||
@file{segment_20170102194334_0003_00122200_0000003000000.ts}, @file{segment_20170102194334_0004_00120072_0000003000000.ts} etc.
|
||||
@ -827,7 +895,11 @@ This will produce segments like this:
|
||||
@item temp_file
|
||||
Write segment data to filename.tmp and rename to filename only once the segment is complete. A webserver
|
||||
serving up segments can be configured to reject requests to *.tmp to prevent access to in-progress segments
|
||||
before they have been added to the m3u8 playlist.
|
||||
before they have been added to the m3u8 playlist. This flag also affects how m3u8 playlist files are created.
|
||||
If this flag is set, all playlist files will written into temporary file and renamed after they are complete, similarly as segments are handled.
|
||||
But playlists with @code{file} protocol and with type (@code{hls_playlist_type}) other than @code{vod}
|
||||
are always written into temporary file regardles of this flag. Master playlist files (@code{master_pl_name}), if any, with @code{file} protocol,
|
||||
are always written into temporary file regardles of this flag if @code{master_pl_publish_rate} value is other than zero.
|
||||
|
||||
@end table
|
||||
|
||||
@ -878,7 +950,21 @@ This example creates two hls variant streams. The first variant stream will
|
||||
contain video stream of bitrate 1000k and audio stream of bitrate 64k and the
|
||||
second variant stream will contain video stream of bitrate 256k and audio
|
||||
stream of bitrate 32k. Here, two media playlist with file names out_0.m3u8 and
|
||||
out_1.m3u8 will be created.
|
||||
out_1.m3u8 will be created. If you want something meaningful text instead of indexes
|
||||
in result names, you may specify names for each or some of the variants
|
||||
as in the following example.
|
||||
|
||||
|
||||
@example
|
||||
ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \
|
||||
-map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0,name:my_hd v:1,a:1,name:my_sd" \
|
||||
http://example.com/live/out_%v.m3u8
|
||||
@end example
|
||||
|
||||
This example creates two hls variant streams as in the previous one.
|
||||
But here, the two media playlist with file names out_my_hd.m3u8 and
|
||||
out_my_sd.m3u8 will be created.
|
||||
|
||||
@example
|
||||
ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k \
|
||||
-map 0:v -map 0:a -map 0:v -f hls -var_stream_map "v:0 a:0 v:1" \
|
||||
@ -912,6 +998,37 @@ and they are mapped to the two video only variant streams with audio group names
|
||||
|
||||
By default, a single hls variant containing all the encoded streams is created.
|
||||
|
||||
@example
|
||||
ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \
|
||||
-map 0:a -map 0:a -map 0:v -f hls \
|
||||
-var_stream_map "a:0,agroup:aud_low,default:yes a:1,agroup:aud_low v:0,agroup:aud_low" \
|
||||
-master_pl_name master.m3u8 \
|
||||
http://example.com/live/out_%v.m3u8
|
||||
@end example
|
||||
This example creates two audio only and one video only variant streams. In
|
||||
addition to the #EXT-X-STREAM-INF tag for each variant stream in the master
|
||||
playlist, #EXT-X-MEDIA tag is also added for the two audio only variant streams
|
||||
and they are mapped to the one video only variant streams with audio group name
|
||||
'aud_low', and the audio group have default stat is NO or YES.
|
||||
|
||||
By default, a single hls variant containing all the encoded streams is created.
|
||||
|
||||
@example
|
||||
ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \
|
||||
-map 0:a -map 0:a -map 0:v -f hls \
|
||||
-var_stream_map "a:0,agroup:aud_low,default:yes,language:ENG a:1,agroup:aud_low,language:CHN v:0,agroup:aud_low" \
|
||||
-master_pl_name master.m3u8 \
|
||||
http://example.com/live/out_%v.m3u8
|
||||
@end example
|
||||
This example creates two audio only and one video only variant streams. In
|
||||
addition to the #EXT-X-STREAM-INF tag for each variant stream in the master
|
||||
playlist, #EXT-X-MEDIA tag is also added for the two audio only variant streams
|
||||
and they are mapped to the one video only variant streams with audio group name
|
||||
'aud_low', and the audio group have default stat is NO or YES, and one audio
|
||||
have and language is named ENG, the other audio language is named CHN.
|
||||
|
||||
By default, a single hls variant containing all the encoded streams is created.
|
||||
|
||||
@item cc_stream_map
|
||||
Map string which specifies different closed captions groups and their
|
||||
attributes. The closed captions stream groups are separated by space.
|
||||
@ -932,7 +1049,7 @@ ffmpeg -re -i in.ts -b:v 1000k -b:a 64k -a53cc 1 -f hls \
|
||||
http://example.com/live/out.m3u8
|
||||
@end example
|
||||
This example adds @code{#EXT-X-MEDIA} tag with @code{TYPE=CLOSED-CAPTIONS} in
|
||||
the master playlist with group name 'cc', langauge 'en' (english) and
|
||||
the master playlist with group name 'cc', language 'en' (english) and
|
||||
INSTREAM-ID 'CC1'. Also, it adds @code{CLOSED-CAPTIONS} attribute with group
|
||||
name 'cc' for the output variant stream.
|
||||
@example
|
||||
@ -975,6 +1092,12 @@ Use persistent HTTP connections. Applicable only for HTTP output.
|
||||
@item timeout
|
||||
Set timeout for socket I/O operations. Applicable only for HTTP output.
|
||||
|
||||
@item -ignore_io_errors
|
||||
Ignore IO errors during open, write and delete. Useful for long-duration runs with network output.
|
||||
|
||||
@item headers
|
||||
Set custom HTTP headers, can override built in default headers. Applicable only for HTTP output.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@ -1282,6 +1405,10 @@ more efficient), but with this option set, the muxer writes one moof/mdat
|
||||
pair for each track, making it easier to separate tracks.
|
||||
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -movflags skip_sidx
|
||||
Skip writing of sidx atom. When bitrate overhead due to sidx atom is high,
|
||||
this option could be used for cases where sidx atom is not mandatory.
|
||||
When global_sidx flag is enabled, this option will be ignored.
|
||||
@item -movflags faststart
|
||||
Run a second pass moving the index (moov atom) to the beginning of the file.
|
||||
This operation can take a while, and will not work in various situations such
|
||||
@ -1313,6 +1440,18 @@ be negative. This enables the initial sample to have DTS/CTS of zero, and
|
||||
reduces the need for edit lists for some cases such as video tracks with
|
||||
B-frames. Additionally, eases conformance with the DASH-IF interoperability
|
||||
guidelines.
|
||||
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -write_prft
|
||||
Write producer time reference box (PRFT) with a specified time source for the
|
||||
NTP field in the PRFT box. Set value as @samp{wallclock} to specify timesource
|
||||
as wallclock time and @samp{pts} to specify timesource as input packets' PTS
|
||||
values.
|
||||
|
||||
Setting value to @samp{pts} is applicable only for a live encoding use case,
|
||||
where PTS values are set as as wallclock time at the source. For example, an
|
||||
encoding use case with decklink capture source where @option{video_pts} and
|
||||
@option{audio_pts} are set to @samp{abs_wallclock}.
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@ -1418,7 +1557,7 @@ Set the program @samp{service_type}. Default is @code{digital_tv}.
|
||||
Accepts the following options:
|
||||
@table @samp
|
||||
@item hex_value
|
||||
Any hexdecimal value between @code{0x01} to @code{0xff} as defined in
|
||||
Any hexadecimal value between @code{0x01} and @code{0xff} as defined in
|
||||
ETSI 300 468.
|
||||
@item digital_tv
|
||||
Digital TV service.
|
||||
@ -1521,7 +1660,7 @@ ffmpeg -i file.mpg -c copy \
|
||||
out.ts
|
||||
@end example
|
||||
|
||||
@section mxf, mxf_d10
|
||||
@section mxf, mxf_d10, mxf_opatom
|
||||
|
||||
MXF muxer.
|
||||
|
||||
@ -1533,7 +1672,7 @@ The muxer options are:
|
||||
@item store_user_comments @var{bool}
|
||||
Set if user comments should be stored if available or never.
|
||||
IRT D-10 does not allow user comments. The default is thus to write them for
|
||||
mxf but not for mxf_d10
|
||||
mxf and mxf_opatom but not for mxf_d10
|
||||
@end table
|
||||
|
||||
@section null
|
||||
@ -2021,20 +2160,35 @@ ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0
|
||||
@anchor{tee}
|
||||
@section tee
|
||||
|
||||
The tee muxer can be used to write the same data to several files or any
|
||||
other kind of muxer. It can be used, for example, to both stream a video to
|
||||
the network and save it to disk at the same time.
|
||||
The tee muxer can be used to write the same data to several outputs, such as files or streams.
|
||||
It can be used, for example, to stream a video over a network and save it to disk at the same time.
|
||||
|
||||
It is different from specifying several outputs to the @command{ffmpeg}
|
||||
command-line tool because the audio and video data will be encoded only once
|
||||
with the tee muxer; encoding can be a very expensive process. It is not
|
||||
useful when using the libavformat API directly because it is then possible
|
||||
to feed the same packets to several muxers directly.
|
||||
command-line tool. With the tee muxer, the audio and video data will be encoded only once.
|
||||
With conventional multiple outputs, multiple encoding operations in parallel are initiated,
|
||||
which can be a very expensive process. The tee muxer is not useful when using the libavformat API
|
||||
directly because it is then possible to feed the same packets to several muxers directly.
|
||||
|
||||
Since the tee muxer does not represent any particular output format, ffmpeg cannot auto-select
|
||||
output streams. So all streams intended for output must be specified using @code{-map}. See
|
||||
the examples below.
|
||||
|
||||
Some encoders may need different options depending on the output format;
|
||||
the auto-detection of this can not work with the tee muxer, so they need to be explicitly specified.
|
||||
The main example is the @option{global_header} flag.
|
||||
|
||||
The slave outputs are specified in the file name given to the muxer,
|
||||
separated by '|'. If any of the slave name contains the '|' separator,
|
||||
leading or trailing spaces or any special character, those must be
|
||||
escaped (see @ref{quoting_and_escaping,,the "Quoting and escaping"
|
||||
section in the ffmpeg-utils(1) manual,ffmpeg-utils}).
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item use_fifo @var{bool}
|
||||
If set to 1, slave outputs will be processed in separate thread using @ref{fifo}
|
||||
If set to 1, slave outputs will be processed in separate threads using the @ref{fifo}
|
||||
muxer. This allows to compensate for different speed/latency/reliability of
|
||||
outputs and setup transparent recovery. By default this feature is turned off.
|
||||
|
||||
@ -2043,12 +2197,6 @@ Options to pass to fifo pseudo-muxer instances. See @ref{fifo}.
|
||||
|
||||
@end table
|
||||
|
||||
The slave outputs are specified in the file name given to the muxer,
|
||||
separated by '|'. If any of the slave name contains the '|' separator,
|
||||
leading or trailing spaces or any special character, it must be
|
||||
escaped (see @ref{quoting_and_escaping,,the "Quoting and escaping"
|
||||
section in the ffmpeg-utils(1) manual,ffmpeg-utils}).
|
||||
|
||||
Muxer options can be specified for each slave by prepending them as a list of
|
||||
@var{key}=@var{value} pairs separated by ':', between square brackets. If
|
||||
the options values contain a special character or the ':' separator, they
|
||||
@ -2057,13 +2205,27 @@ must be escaped; note that this is a second level escaping.
|
||||
The following special options are also recognized:
|
||||
@table @option
|
||||
@item f
|
||||
Specify the format name. Useful if it cannot be guessed from the
|
||||
output name suffix.
|
||||
Specify the format name. Required if it cannot be guessed from the
|
||||
output URL.
|
||||
|
||||
@item bsfs[/@var{spec}]
|
||||
Specify a list of bitstream filters to apply to the specified
|
||||
output.
|
||||
|
||||
It is possible to specify to which streams a given bitstream filter
|
||||
applies, by appending a stream specifier to the option separated by
|
||||
@code{/}. @var{spec} must be a stream specifier (see @ref{Format
|
||||
stream specifiers}).
|
||||
|
||||
If the stream specifier is not specified, the bitstream filters will be
|
||||
applied to all streams in the output. This will cause that output operation
|
||||
to fail if the output contains streams to which the bitstream filter cannot
|
||||
be applied e.g. @code{h264_mp4toannexb} being applied to an output containing an audio stream.
|
||||
|
||||
Options for a bitstream filter must be specified in the form of @code{opt=value}.
|
||||
|
||||
Several bitstream filters can be specified, separated by ",".
|
||||
|
||||
@item use_fifo @var{bool}
|
||||
This allows to override tee muxer use_fifo option for individual slave muxer.
|
||||
|
||||
@ -2071,19 +2233,13 @@ This allows to override tee muxer use_fifo option for individual slave muxer.
|
||||
This allows to override tee muxer fifo_options for individual slave muxer.
|
||||
See @ref{fifo}.
|
||||
|
||||
It is possible to specify to which streams a given bitstream filter
|
||||
applies, by appending a stream specifier to the option separated by
|
||||
@code{/}. @var{spec} must be a stream specifier (see @ref{Format
|
||||
stream specifiers}). If the stream specifier is not specified, the
|
||||
bitstream filters will be applied to all streams in the output.
|
||||
|
||||
Several bitstream filters can be specified, separated by ",".
|
||||
|
||||
@item select
|
||||
Select the streams that should be mapped to the slave output,
|
||||
specified by a stream specifier. If not specified, this defaults to
|
||||
all the input streams. You may use multiple stream specifiers
|
||||
separated by commas (@code{,}) e.g.: @code{a:0,v}
|
||||
all the mapped streams. This will cause that output operation to fail
|
||||
if the output format does not accept all mapped streams.
|
||||
|
||||
You may use multiple stream specifiers separated by commas (@code{,}) e.g.: @code{a:0,v}
|
||||
|
||||
@item onfail
|
||||
Specify behaviour on output failure. This can be set to either @code{abort} (which is
|
||||
@ -2097,7 +2253,7 @@ will continue without being affected.
|
||||
@itemize
|
||||
@item
|
||||
Encode something and both archive it in a WebM file and stream it
|
||||
as MPEG-TS over UDP (the streams need to be explicitly mapped):
|
||||
as MPEG-TS over UDP:
|
||||
@example
|
||||
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
|
||||
"archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
|
||||
@ -2120,23 +2276,19 @@ option is applied to @file{out.aac} in order to make it contain only
|
||||
audio packets.
|
||||
@example
|
||||
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac
|
||||
-f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
|
||||
-f tee "[bsfs/v=dump_extra=freq=keyframe]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
|
||||
@end example
|
||||
|
||||
@item
|
||||
As below, but select only stream @code{a:1} for the audio output. Note
|
||||
As above, but select only stream @code{a:1} for the audio output. Note
|
||||
that a second level escaping must be performed, as ":" is a special
|
||||
character used to separate options.
|
||||
@example
|
||||
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac
|
||||
-f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
|
||||
-f tee "[bsfs/v=dump_extra=freq=keyframe]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
Note: some codecs may need different options depending on the output format;
|
||||
the auto-detection of this can not work with the tee muxer. The main example
|
||||
is the @option{global_header} flag.
|
||||
|
||||
@section webm_dash_manifest
|
||||
|
||||
WebM DASH Manifest muxer.
|
||||
|
@ -140,7 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sinks}
|
||||
option of ffmpeg to list the available output devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@ -150,6 +151,14 @@ Defaults to @option{false}.
|
||||
Amount of time to preroll video in seconds.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item timing_offset
|
||||
Sets the genlock timing pixel offset on the used output.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@ -182,51 +191,6 @@ ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLi
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek output device provides playback capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Output filename is a NDI name.
|
||||
|
||||
To enable this output device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
NDI uses uyvy422 pixel format natively, but also supports bgra, bgr0, rgba and
|
||||
rgb0.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item reference_level
|
||||
The audio reference level in dB. This specifies how many dB above the
|
||||
reference level (+4dBU) is the full range of 16 bit audio.
|
||||
Defaults to @option{0}.
|
||||
|
||||
@item clock_video
|
||||
These specify whether video "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item clock_audio
|
||||
These specify whether audio "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Play video clip:
|
||||
@example
|
||||
ffmpeg -i "udp://@@239.1.1.1:10480?fifo_size=1000000&overrun_nonfatal=1" -vf "scale=720:576,fps=fps=25,setdar=dar=16/9,format=pix_fmts=uyvy422" -f libndi_newtek NEW_NDI1
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer output device.
|
||||
@ -393,9 +357,18 @@ Set the SDL window size, can be a string of the form
|
||||
If not specified it defaults to the size of the input video,
|
||||
downscaled according to the aspect ratio.
|
||||
|
||||
@item window_x
|
||||
@item window_y
|
||||
Set the position of the window on the screen.
|
||||
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
|
||||
@item window_enable_quit
|
||||
Enable quit action (using window button or keyboard key)
|
||||
when non-zero value is provided.
|
||||
Default value is 1 (enable quit action)
|
||||
@end table
|
||||
|
||||
@subsection Interactive commands
|
||||
@ -420,6 +393,10 @@ ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL out
|
||||
|
||||
sndio audio output device.
|
||||
|
||||
@section v4l2
|
||||
|
||||
Video4Linux2 output device.
|
||||
|
||||
@section xv
|
||||
|
||||
XV (XVideo) output device.
|
||||
|
@ -148,16 +148,11 @@ To target 32 bits replace @code{x86_64} with @code{i686} in the command above.
|
||||
|
||||
@section Microsoft Visual C++ or Intel C++ Compiler for Windows
|
||||
|
||||
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
|
||||
and wrapper, or with MSVC 2013 and ICL natively.
|
||||
FFmpeg can be built with MSVC 2013 or later.
|
||||
|
||||
You will need the following prerequisites:
|
||||
|
||||
@itemize
|
||||
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.nasm.us/, NASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@ -166,16 +161,13 @@ You will need the following prerequisites:
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your
|
||||
@code{PATH} as well.
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}.
|
||||
|
||||
Next, make sure any other headers and libs you want to use, such as zlib, are
|
||||
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
|
||||
and @code{INCLUDE} environment variables to include the @strong{Windows-style}
|
||||
paths to these directories. Alternatively, you can try to use the
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC
|
||||
2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too.
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
|
||||
|
||||
Finally, run:
|
||||
|
||||
@ -217,8 +209,6 @@ can see.
|
||||
|
||||
@item FFmpeg has been tested with the following on i686 and x86_64:
|
||||
@itemize
|
||||
@item Visual Studio 2010 Pro and Express
|
||||
@item Visual Studio 2012 Pro and Express
|
||||
@item Visual Studio 2013 Pro and Express
|
||||
@item Intel Composer XE 2013
|
||||
@item Intel Composer XE 2013 SP1
|
||||
|
@ -193,6 +193,20 @@ Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable for files on slow medium.
|
||||
|
||||
@item follow
|
||||
If set to 1, the protocol will retry reading at the end of the file, allowing
|
||||
reading files that still are being written. In order for this to terminate,
|
||||
you either need to use the rw_timeout option, or use the interrupt callback
|
||||
(for API users).
|
||||
|
||||
@item seekable
|
||||
Controls if seekability is advertised on the file. 0 means non-seekable, -1
|
||||
means auto (seekable for normal files, non-seekable for named pipes).
|
||||
|
||||
Many demuxers handle seekable and non-seekable resources differently,
|
||||
overriding this might speed up opening certain files at the cost of losing some
|
||||
features (e.g. accurate seeking).
|
||||
@end table
|
||||
|
||||
@section ftp
|
||||
@ -229,17 +243,6 @@ it, unless special care is taken (tests, customized server configuration
|
||||
etc.). Different FTP servers behave in different way during seek
|
||||
operation. ff* tools may produce incomplete content due to server limitations.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item follow
|
||||
If set to 1, the protocol will retry reading at the end of the file, allowing
|
||||
reading files that still are being written. In order for this to terminate,
|
||||
you either need to use the rw_timeout option, or use the interrupt callback
|
||||
(for API users).
|
||||
|
||||
@end table
|
||||
|
||||
@section gopher
|
||||
|
||||
Gopher protocol.
|
||||
@ -390,6 +393,11 @@ ffmpeg -i somefile.ogg -chunked_post 0 -c copy -f ogg http://@var{server}:@var{p
|
||||
wget --post-file=somefile.ogg http://@var{server}:@var{port}
|
||||
@end example
|
||||
|
||||
@item send_expect_100
|
||||
Send an Expect: 100-continue header for POST. If set to 1 it will send, if set
|
||||
to 0 it won't, if set to -1 it will try to send if it is applicable. Default
|
||||
value is -1.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HTTP Cookies
|
||||
@ -1210,6 +1218,17 @@ IP Type of Service. Applies to sender only. Default value is 0xB8.
|
||||
@item ipttl=@var{ttl}
|
||||
IP Time To Live. Applies to sender only. Default value is 64.
|
||||
|
||||
@item latency
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb bursts of missed packet retransmissions.
|
||||
This flag sets both @option{rcvlatency} and @option{peerlatency}
|
||||
to the same value. Note that prior to version 1.3.0
|
||||
this is the only flag to set the latency, however
|
||||
this is effectively equivalent to setting @option{peerlatency},
|
||||
when side is sender and @option{rcvlatency}
|
||||
when side is receiver, and the bidirectional stream
|
||||
sending is not supported.
|
||||
|
||||
@item listen_timeout
|
||||
Set socket listen timeout.
|
||||
|
||||
@ -1255,6 +1274,25 @@ only if @option{pbkeylen} is non-zero. It is used on
|
||||
the receiver only if the received data is encrypted.
|
||||
The configured passphrase cannot be recovered (write-only).
|
||||
|
||||
@item payload_size=@var{bytes}
|
||||
Sets the maximum declared size of a packet transferred
|
||||
during the single call to the sending function in Live
|
||||
mode. Use 0 if this value isn't used (which is default in
|
||||
file mode).
|
||||
Default is -1 (automatic), which typically means MPEG-TS;
|
||||
if you are going to use SRT
|
||||
to send any different kind of payload, such as, for example,
|
||||
wrapping a live stream in very small frames, then you can
|
||||
use a bigger maximum frame size, though not greater than
|
||||
1456 bytes.
|
||||
|
||||
@item pkt_size=@var{bytes}
|
||||
Alias for @samp{payload_size}.
|
||||
|
||||
@item peerlatency
|
||||
The latency value (as described in @option{rcvlatency}) that is
|
||||
set by the sender side as a minimum value for the receiver.
|
||||
|
||||
@item pbkeylen=@var{bytes}
|
||||
Sender encryption key length, in bytes.
|
||||
Only can be set to 0, 16, 24 and 32.
|
||||
@ -1263,11 +1301,23 @@ Not required on receiver (set to 0),
|
||||
key size obtained from sender in HaiCrypt handshake.
|
||||
Default value is 0.
|
||||
|
||||
@item rcvlatency
|
||||
The time that should elapse since the moment when the
|
||||
packet was sent and the moment when it's delivered to
|
||||
the receiver application in the receiving function.
|
||||
This time should be a buffer time large enough to cover
|
||||
the time spent for sending, unexpectedly extended RTT
|
||||
time, and the time needed to retransmit the lost UDP
|
||||
packet. The effective latency value will be the maximum
|
||||
of this options' value and the value of @option{peerlatency}
|
||||
set by the peer side. Before version 1.3.0 this option
|
||||
is only available as @option{latency}.
|
||||
|
||||
@item recv_buffer_size=@var{bytes}
|
||||
Set receive buffer size, expressed in bytes.
|
||||
Set UDP receive buffer size, expressed in bytes.
|
||||
|
||||
@item send_buffer_size=@var{bytes}
|
||||
Set send buffer size, expressed in bytes.
|
||||
Set UDP send buffer size, expressed in bytes.
|
||||
|
||||
@item rw_timeout
|
||||
Set raise error timeout for read/write optations.
|
||||
@ -1287,9 +1337,86 @@ have no chance of being delivered in time. It was
|
||||
automatically enabled in the sender if the receiver
|
||||
supports it.
|
||||
|
||||
@item tsbpddelay
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb burst of missed packet retransmission.
|
||||
@item sndbuf=@var{bytes}
|
||||
Set send buffer size, expressed in bytes.
|
||||
|
||||
@item rcvbuf=@var{bytes}
|
||||
Set receive buffer size, expressed in bytes.
|
||||
|
||||
Receive buffer must not be greater than @option{ffs}.
|
||||
|
||||
@item lossmaxttl=@var{packets}
|
||||
The value up to which the Reorder Tolerance may grow. When
|
||||
Reorder Tolerance is > 0, then packet loss report is delayed
|
||||
until that number of packets come in. Reorder Tolerance
|
||||
increases every time a "belated" packet has come, but it
|
||||
wasn't due to retransmission (that is, when UDP packets tend
|
||||
to come out of order), with the difference between the latest
|
||||
sequence and this packet's sequence, and not more than the
|
||||
value of this option. By default it's 0, which means that this
|
||||
mechanism is turned off, and the loss report is always sent
|
||||
immediately upon experiencing a "gap" in sequences.
|
||||
|
||||
@item minversion
|
||||
The minimum SRT version that is required from the peer. A connection
|
||||
to a peer that does not satisfy the minimum version requirement
|
||||
will be rejected.
|
||||
|
||||
The version format in hex is 0xXXYYZZ for x.y.z in human readable
|
||||
form.
|
||||
|
||||
@item streamid=@var{string}
|
||||
A string limited to 512 characters that can be set on the socket prior
|
||||
to connecting. This stream ID will be able to be retrieved by the
|
||||
listener side from the socket that is returned from srt_accept and
|
||||
was connected by a socket with that set stream ID. SRT does not enforce
|
||||
any special interpretation of the contents of this string.
|
||||
This option doesn’t make sense in Rendezvous connection; the result
|
||||
might be that simply one side will override the value from the other
|
||||
side and it’s the matter of luck which one would win
|
||||
|
||||
@item smoother=@var{live|file}
|
||||
The type of Smoother used for the transmission for that socket, which
|
||||
is responsible for the transmission and congestion control. The Smoother
|
||||
type must be exactly the same on both connecting parties, otherwise
|
||||
the connection is rejected.
|
||||
|
||||
@item messageapi=@var{1|0}
|
||||
When set, this socket uses the Message API, otherwise it uses Buffer
|
||||
API. Note that in live mode (see @option{transtype}) there’s only
|
||||
message API available. In File mode you can chose to use one of two modes:
|
||||
|
||||
Stream API (default, when this option is false). In this mode you may
|
||||
send as many data as you wish with one sending instruction, or even use
|
||||
dedicated functions that read directly from a file. The internal facility
|
||||
will take care of any speed and congestion control. When receiving, you
|
||||
can also receive as many data as desired, the data not extracted will be
|
||||
waiting for the next call. There is no boundary between data portions in
|
||||
the Stream mode.
|
||||
|
||||
Message API. In this mode your single sending instruction passes exactly
|
||||
one piece of data that has boundaries (a message). Contrary to Live mode,
|
||||
this message may span across multiple UDP packets and the only size
|
||||
limitation is that it shall fit as a whole in the sending buffer. The
|
||||
receiver shall use as large buffer as necessary to receive the message,
|
||||
otherwise the message will not be given up. When the message is not
|
||||
complete (not all packets received or there was a packet loss) it will
|
||||
not be given up.
|
||||
|
||||
@item transtype=@var{live|file}
|
||||
Sets the transmission type for the socket, in particular, setting this
|
||||
option sets multiple other parameters to their default values as required
|
||||
for a particular transmission type.
|
||||
|
||||
live: Set options as for live transmission. In this mode, you should
|
||||
send by one sending instruction only so many data that fit in one UDP packet,
|
||||
and limited to the value defined first in @option{payload_size} (1316 is
|
||||
default in this mode). There is no speed control in this mode, only the
|
||||
bandwidth control, if configured, in order to not exceed the bandwidth with
|
||||
the overhead transmission (retransmitted and control packets).
|
||||
|
||||
file: Set options as for non-live transmission. See @option{messageapi}
|
||||
for further explanations
|
||||
|
||||
@end table
|
||||
|
||||
@ -1397,6 +1524,9 @@ Set send buffer size, expressed bytes.
|
||||
|
||||
@item tcp_nodelay=@var{1|0}
|
||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||
|
||||
@item tcp_mss=@var{bytes}
|
||||
Set maximum segment size for outgoing TCP packets, expressed in bytes.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
@ -1503,9 +1633,8 @@ packet bursts.
|
||||
Override the local UDP port to bind with.
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Choose the local IP address. This is useful e.g. if sending multicast
|
||||
and the host has multiple interfaces, where the user can choose
|
||||
which interface to send on by specifying the IP address of that interface.
|
||||
Local IP address of a network interface used for sending packets or joining
|
||||
multicast groups.
|
||||
|
||||
@item pkt_size=@var{size}
|
||||
Set the size in bytes of UDP packets.
|
||||
@ -1528,12 +1657,12 @@ For receiving, this gives the benefit of only receiving packets from
|
||||
the specified peer address/port.
|
||||
|
||||
@item sources=@var{address}[,@var{address}]
|
||||
Only receive packets sent to the multicast group from one of the
|
||||
specified sender IP addresses.
|
||||
Only receive packets sent from the specified addresses. In case of multicast,
|
||||
also subscribe to multicast traffic coming from these addresses only.
|
||||
|
||||
@item block=@var{address}[,@var{address}]
|
||||
Ignore packets sent to the multicast group from the specified
|
||||
sender IP addresses.
|
||||
Ignore packets sent from the specified addresses. In case of multicast, also
|
||||
exclude the source addresses in the multicast subscription.
|
||||
|
||||
@item fifo_size=@var{units}
|
||||
Set the UDP receiving circular buffer size, expressed as a number of
|
||||
|
@ -5,7 +5,8 @@
|
||||
The video scaler supports the following named options.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools. For programmatic use, they can be set explicitly in the
|
||||
FFmpeg tools, with a few API-only exceptions noted below.
|
||||
For programmatic use, they can be set explicitly in the
|
||||
@code{SwsContext} options or through the @file{libavutil/opt.h} API.
|
||||
|
||||
@table @option
|
||||
@ -47,7 +48,8 @@ Select Gaussian rescaling algorithm.
|
||||
Select sinc rescaling algorithm.
|
||||
|
||||
@item lanczos
|
||||
Select Lanczos rescaling algorithm.
|
||||
Select Lanczos rescaling algorithm. The default width (alpha) is 3 and can be
|
||||
changed by setting @code{param0}.
|
||||
|
||||
@item spline
|
||||
Select natural bicubic spline rescaling algorithm.
|
||||
@ -68,29 +70,31 @@ Select full chroma input.
|
||||
Enable bitexact output.
|
||||
@end table
|
||||
|
||||
@item srcw
|
||||
@item srcw @var{(API only)}
|
||||
Set source width.
|
||||
|
||||
@item srch
|
||||
@item srch @var{(API only)}
|
||||
Set source height.
|
||||
|
||||
@item dstw
|
||||
@item dstw @var{(API only)}
|
||||
Set destination width.
|
||||
|
||||
@item dsth
|
||||
@item dsth @var{(API only)}
|
||||
Set destination height.
|
||||
|
||||
@item src_format
|
||||
@item src_format @var{(API only)}
|
||||
Set source pixel format (must be expressed as an integer).
|
||||
|
||||
@item dst_format
|
||||
@item dst_format @var{(API only)}
|
||||
Set destination pixel format (must be expressed as an integer).
|
||||
|
||||
@item src_range
|
||||
Select source range.
|
||||
@item src_range @var{(boolean)}
|
||||
If value is set to @code{1}, indicates source is full range. Default value is
|
||||
@code{0}, which indicates source is limited range.
|
||||
|
||||
@item dst_range
|
||||
Select destination range.
|
||||
@item dst_range @var{(boolean)}
|
||||
If value is set to @code{1}, enable full range for destination. Default value
|
||||
is @code{0}, which enables limited range.
|
||||
|
||||
@anchor{sws_params}
|
||||
@item param0, param1
|
||||
|
24
doc/snow.txt
24
doc/snow.txt
@ -172,7 +172,7 @@ spatial_decomposition_count
|
||||
FIXME
|
||||
|
||||
colorspace_type
|
||||
0 unspecified YcbCr
|
||||
0 unspecified YCbCr
|
||||
1 Gray
|
||||
2 Gray + Alpha
|
||||
3 GBR
|
||||
@ -235,7 +235,7 @@ spatial_decomposition_type
|
||||
stored as delta from last, last is reset to 0 if always_reset || keyframe
|
||||
|
||||
qlog
|
||||
quality (logarthmic quantizer scale)
|
||||
quality (logarithmic quantizer scale)
|
||||
stored as delta from last, last is reset to 0 if always_reset || keyframe
|
||||
|
||||
mv_scale
|
||||
@ -251,11 +251,11 @@ block_max_depth
|
||||
stored as delta from last, last is reset to 0 if always_reset || keyframe
|
||||
|
||||
quant_table
|
||||
quantiztation table
|
||||
quantization table
|
||||
|
||||
|
||||
Highlevel bitstream structure:
|
||||
=============================
|
||||
==============================
|
||||
--------------------------------------------
|
||||
| Header |
|
||||
--------------------------------------------
|
||||
@ -303,7 +303,7 @@ Decoding process:
|
||||
| Intra DC | |
|
||||
| | LL0 subband prediction
|
||||
------------ |
|
||||
\ Dequantizaton
|
||||
\ Dequantization
|
||||
------------------- \ |
|
||||
| Reference frames | \ IDWT
|
||||
| ------- ------- | Motion \ |
|
||||
@ -390,8 +390,8 @@ motion vector prediction
|
||||
(mvx_diff, mvy_diff)*mv_scale
|
||||
|
||||
|
||||
Intra DC Predicton:
|
||||
======================
|
||||
Intra DC Prediction:
|
||||
====================
|
||||
the luma and chroma values of the left block are used as predictors
|
||||
|
||||
the used luma and chroma is the sum of the predictor and y_diff, cb_diff, cr_diff
|
||||
@ -407,7 +407,7 @@ Motion Compensation:
|
||||
|
||||
Halfpel interpolation:
|
||||
----------------------
|
||||
halfpel interpolation is done by convolution with the halfpel filter stored
|
||||
Halfpel interpolation is done by convolution with the halfpel filter stored
|
||||
in the header:
|
||||
|
||||
horizontal halfpel samples are found by
|
||||
@ -463,8 +463,8 @@ to the closest available fullpel sample
|
||||
Smaller pel interpolation:
|
||||
--------------------------
|
||||
if diag_mc is set then points which lie on a line between 2 vertically,
|
||||
horiziontally or diagonally adjacent halfpel points shall be interpolated
|
||||
linearls with rounding to nearest and halfway values rounded up.
|
||||
horizontally or diagonally adjacent halfpel points shall be interpolated
|
||||
linearly with rounding to nearest and halfway values rounded up.
|
||||
points which lie on 2 diagonals at the same time should only use the one
|
||||
diagonal not containing the fullpel point
|
||||
|
||||
@ -519,8 +519,8 @@ width,height here are the width and height of the LL0 subband not of the final
|
||||
video
|
||||
|
||||
|
||||
Dequantizaton:
|
||||
==============
|
||||
Dequantization:
|
||||
===============
|
||||
FIXME
|
||||
|
||||
Wavelet Transform:
|
||||
|
@ -389,7 +389,7 @@ distributor with something like this:
|
||||
|
||||
td.in = in;
|
||||
td.out = out;
|
||||
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
|
||||
ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
|
||||
|
||||
// ...
|
||||
|
||||
|
@ -38,7 +38,6 @@ OBJCCFLAGS = $(CPPFLAGS) $(CFLAGS) $(OBJCFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
CXXFLAGS := $(CPPFLAGS) $(CFLAGS) $(CXXFLAGS)
|
||||
X86ASMFLAGS += $(IFLAGS:%=%/) -I$(<D)/ -Pconfig.asm
|
||||
NVCCFLAGS += -ptx
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
@ -91,7 +90,7 @@ COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
|
||||
%.ptx: %.cu
|
||||
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
|
||||
$(COMPILE_NVCC)
|
||||
|
||||
%.ptx.c: %.ptx
|
||||
@ -161,7 +160,7 @@ $(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
@ -32,7 +32,7 @@ $(foreach P,$(AVPROGS-yes),$(eval $(call DOFFTOOL,$(P))))
|
||||
all: $(AVPROGS)
|
||||
|
||||
fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools
|
||||
OBJDIRS += fftools
|
||||
OUTDIRS += fftools
|
||||
|
||||
ifdef AVPROGS
|
||||
install: install-progs install-data
|
||||
|
@ -1018,7 +1018,7 @@ static int init_report(const char *env)
|
||||
av_free(key);
|
||||
}
|
||||
|
||||
av_bprint_init(&filename, 0, 1);
|
||||
av_bprint_init(&filename, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
expand_filename_template(&filename,
|
||||
av_x_if_null(filename_template, "%p-%t.log"), tm);
|
||||
av_free(filename_template);
|
||||
@ -1414,6 +1414,16 @@ static void print_codec(const AVCodec *c)
|
||||
AV_CODEC_CAP_SLICE_THREADS |
|
||||
AV_CODEC_CAP_AUTO_THREADS))
|
||||
printf("threads ");
|
||||
if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING)
|
||||
printf("avoidprobe ");
|
||||
if (c->capabilities & AV_CODEC_CAP_INTRA_ONLY)
|
||||
printf("intraonly ");
|
||||
if (c->capabilities & AV_CODEC_CAP_LOSSLESS)
|
||||
printf("lossless ");
|
||||
if (c->capabilities & AV_CODEC_CAP_HARDWARE)
|
||||
printf("hardware ");
|
||||
if (c->capabilities & AV_CODEC_CAP_HYBRID)
|
||||
printf("hybrid ");
|
||||
if (!c->capabilities)
|
||||
printf("none");
|
||||
printf("\n");
|
||||
@ -1434,6 +1444,17 @@ static void print_codec(const AVCodec *c)
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (avcodec_get_hw_config(c, 0)) {
|
||||
printf(" Supported hardware devices: ");
|
||||
for (int i = 0;; i++) {
|
||||
const AVCodecHWConfig *config = avcodec_get_hw_config(c, i);
|
||||
if (!config)
|
||||
break;
|
||||
printf("%s ", av_hwdevice_get_type_name(config->device_type));
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (c->supported_framerates) {
|
||||
const AVRational *fps = c->supported_framerates;
|
||||
|
||||
@ -1935,7 +1956,10 @@ static void show_help_bsf(const char *name)
|
||||
{
|
||||
const AVBitStreamFilter *bsf = av_bsf_get_by_name(name);
|
||||
|
||||
if (!bsf) {
|
||||
if (!name) {
|
||||
av_log(NULL, AV_LOG_ERROR, "No bitstream filter name specified.\n");
|
||||
return;
|
||||
} else if (!bsf) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown bit stream filter '%s'.\n", name);
|
||||
return;
|
||||
}
|
||||
|
279
fftools/ffmpeg.c
279
fftools/ffmpeg.c
@ -120,8 +120,14 @@ const char *const forced_keyframes_const_names[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
typedef struct BenchmarkTimeStamps {
|
||||
int64_t real_usec;
|
||||
int64_t user_usec;
|
||||
int64_t sys_usec;
|
||||
} BenchmarkTimeStamps;
|
||||
|
||||
static void do_video_stats(OutputStream *ost, int frame_size);
|
||||
static int64_t getutime(void);
|
||||
static BenchmarkTimeStamps get_benchmark_time_stamps(void);
|
||||
static int64_t getmaxrss(void);
|
||||
static int ifilter_has_all_input_formats(FilterGraph *fg);
|
||||
|
||||
@ -133,7 +139,7 @@ static int64_t decode_error_stat[2];
|
||||
|
||||
static int want_sdp = 1;
|
||||
|
||||
static int current_time;
|
||||
static BenchmarkTimeStamps current_time;
|
||||
AVIOContext *progress_avio = NULL;
|
||||
|
||||
static uint8_t *subtitle_out;
|
||||
@ -653,7 +659,7 @@ static void abort_codec_experimental(AVCodec *c, int encoder)
|
||||
static void update_benchmark(const char *fmt, ...)
|
||||
{
|
||||
if (do_benchmark_all) {
|
||||
int64_t t = getutime();
|
||||
BenchmarkTimeStamps t = get_benchmark_time_stamps();
|
||||
va_list va;
|
||||
char buf[1024];
|
||||
|
||||
@ -661,7 +667,11 @@ static void update_benchmark(const char *fmt, ...)
|
||||
va_start(va, fmt);
|
||||
vsnprintf(buf, sizeof(buf), fmt, va);
|
||||
va_end(va);
|
||||
av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
"bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
|
||||
t.user_usec - current_time.user_usec,
|
||||
t.sys_usec - current_time.sys_usec,
|
||||
t.real_usec - current_time.real_usec, buf);
|
||||
}
|
||||
current_time = t;
|
||||
}
|
||||
@ -714,11 +724,11 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
||||
if (ret < 0)
|
||||
exit_program(1);
|
||||
}
|
||||
ret = av_packet_ref(&tmp_pkt, pkt);
|
||||
ret = av_packet_make_refcounted(pkt);
|
||||
if (ret < 0)
|
||||
exit_program(1);
|
||||
av_packet_move_ref(&tmp_pkt, pkt);
|
||||
av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
|
||||
av_packet_unref(pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -762,7 +772,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
||||
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
|
||||
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
|
||||
}
|
||||
if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
||||
if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
|
||||
pkt->dts != AV_NOPTS_VALUE &&
|
||||
!(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
|
||||
ost->last_mux_dts != AV_NOPTS_VALUE) {
|
||||
@ -1069,6 +1079,7 @@ static void do_video_out(OutputFile *of,
|
||||
|
||||
if (!ost->filters_script &&
|
||||
!ost->filters &&
|
||||
(nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
|
||||
next_picture &&
|
||||
ist &&
|
||||
lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
||||
@ -1111,7 +1122,7 @@ static void do_video_out(OutputFile *of,
|
||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||
format_video_sync != VSYNC_DROP) {
|
||||
if (delta0 < -0.6) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
|
||||
} else
|
||||
av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
|
||||
sync_ipts = ost->sync_opts;
|
||||
@ -1183,33 +1194,27 @@ static void do_video_out(OutputFile *of,
|
||||
}
|
||||
ost->last_dropped = nb_frames == nb0_frames && next_picture;
|
||||
|
||||
/* duplicates frame if needed */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
AVFrame *in_picture;
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (i < nb0_frames && ost->last_frame) {
|
||||
in_picture = ost->last_frame;
|
||||
} else
|
||||
in_picture = next_picture;
|
||||
|
||||
if (!in_picture)
|
||||
return;
|
||||
|
||||
in_picture->pts = ost->sync_opts;
|
||||
|
||||
#if 1
|
||||
if (!check_recording_time(ost))
|
||||
#else
|
||||
if (ost->frame_number >= ost->max_frames)
|
||||
#endif
|
||||
return;
|
||||
|
||||
{
|
||||
/* duplicates frame if needed */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
AVFrame *in_picture;
|
||||
int forced_keyframe = 0;
|
||||
double pts_time;
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (i < nb0_frames && ost->last_frame) {
|
||||
in_picture = ost->last_frame;
|
||||
} else
|
||||
in_picture = next_picture;
|
||||
|
||||
if (!in_picture)
|
||||
return;
|
||||
|
||||
in_picture->pts = ost->sync_opts;
|
||||
|
||||
if (!check_recording_time(ost))
|
||||
return;
|
||||
|
||||
if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
|
||||
ost->top_field_first >= 0)
|
||||
@ -1226,8 +1231,12 @@ static void do_video_out(OutputFile *of,
|
||||
in_picture->quality = enc->global_quality;
|
||||
in_picture->pict_type = 0;
|
||||
|
||||
if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
|
||||
in_picture->pts != AV_NOPTS_VALUE)
|
||||
ost->forced_kf_ref_pts = in_picture->pts;
|
||||
|
||||
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
|
||||
in_picture->pts * av_q2d(enc->time_base) : NAN;
|
||||
(in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
|
||||
if (ost->forced_kf_index < ost->forced_kf_count &&
|
||||
in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
|
||||
ost->forced_kf_index++;
|
||||
@ -1278,6 +1287,8 @@ static void do_video_out(OutputFile *of,
|
||||
ret = avcodec_send_frame(enc, in_picture);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
// Make sure Closed Captions will not be duplicated
|
||||
av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
|
||||
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(enc, &pkt);
|
||||
@ -1314,18 +1325,17 @@ static void do_video_out(OutputFile *of,
|
||||
fprintf(ost->logfile, "%s", enc->stats_out);
|
||||
}
|
||||
}
|
||||
}
|
||||
ost->sync_opts++;
|
||||
/*
|
||||
* For video, number of frames in == number of packets out.
|
||||
* But there may be reordering, so we can't throw away frames on encoder
|
||||
* flush, we need to limit them here, before they go into encoder.
|
||||
*/
|
||||
ost->frame_number++;
|
||||
ost->sync_opts++;
|
||||
/*
|
||||
* For video, number of frames in == number of packets out.
|
||||
* But there may be reordering, so we can't throw away frames on encoder
|
||||
* flush, we need to limit them here, before they go into encoder.
|
||||
*/
|
||||
ost->frame_number++;
|
||||
|
||||
if (vstats_filename && frame_size)
|
||||
do_video_stats(ost, frame_size);
|
||||
}
|
||||
if (vstats_filename && frame_size)
|
||||
do_video_stats(ost, frame_size);
|
||||
}
|
||||
|
||||
if (!ost->last_frame)
|
||||
ost->last_frame = av_frame_alloc();
|
||||
@ -1478,8 +1488,6 @@ static int reap_filters(int flush)
|
||||
av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
|
||||
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
||||
}
|
||||
//if (ost->source_index >= 0)
|
||||
// *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
|
||||
|
||||
switch (av_buffersink_get_type(filter)) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
@ -1675,7 +1683,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
|
||||
vid = 0;
|
||||
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprint_init(&buf_script, 0, 1);
|
||||
av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
float q = -1;
|
||||
ost = output_streams[i];
|
||||
@ -1696,7 +1704,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
|
||||
frame_number, fps < 9.95, fps, q);
|
||||
av_bprintf(&buf_script, "frame=%d\n", frame_number);
|
||||
av_bprintf(&buf_script, "fps=%.1f\n", fps);
|
||||
av_bprintf(&buf_script, "fps=%.2f\n", fps);
|
||||
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
|
||||
ost->file_index, ost->index, q);
|
||||
if (is_last_report)
|
||||
@ -1780,9 +1788,11 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
|
||||
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
|
||||
if (pts == AV_NOPTS_VALUE) {
|
||||
av_bprintf(&buf_script, "out_time_us=N/A\n");
|
||||
av_bprintf(&buf_script, "out_time_ms=N/A\n");
|
||||
av_bprintf(&buf_script, "out_time=N/A\n");
|
||||
} else {
|
||||
av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
|
||||
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
|
||||
av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
|
||||
hours_sign, hours, mins, secs, us);
|
||||
@ -1808,7 +1818,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
} else
|
||||
av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
|
||||
|
||||
fflush(stderr);
|
||||
fflush(stderr);
|
||||
}
|
||||
av_bprint_finalize(&buf, NULL);
|
||||
|
||||
@ -1915,46 +1925,46 @@ static void flush_encoders(void)
|
||||
av_assert0(0);
|
||||
}
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
update_benchmark(NULL);
|
||||
update_benchmark(NULL);
|
||||
|
||||
while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
|
||||
ret = avcodec_send_frame(enc, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
|
||||
desc,
|
||||
av_err2str(ret));
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
|
||||
ret = avcodec_send_frame(enc, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
|
||||
desc,
|
||||
av_err2str(ret));
|
||||
exit_program(1);
|
||||
}
|
||||
if (ost->logfile && enc->stats_out) {
|
||||
fprintf(ost->logfile, "%s", enc->stats_out);
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
output_packet(of, &pkt, ost, 1);
|
||||
break;
|
||||
}
|
||||
if (ost->finished & MUXER_FINISHED) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
|
||||
pkt_size = pkt.size;
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
|
||||
do_video_stats(ost, pkt_size);
|
||||
}
|
||||
}
|
||||
|
||||
update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
|
||||
desc,
|
||||
av_err2str(ret));
|
||||
exit_program(1);
|
||||
}
|
||||
if (ost->logfile && enc->stats_out) {
|
||||
fprintf(ost->logfile, "%s", enc->stats_out);
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
output_packet(of, &pkt, ost, 1);
|
||||
break;
|
||||
}
|
||||
if (ost->finished & MUXER_FINISHED) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
|
||||
pkt_size = pkt.size;
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
|
||||
do_video_stats(ost, pkt_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2094,10 +2104,12 @@ static void check_decode_result(InputStream *ist, int *got_output, int ret)
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (exit_on_error && *got_output && ist) {
|
||||
if (*got_output && ist) {
|
||||
if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
|
||||
exit_program(1);
|
||||
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
|
||||
"%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
|
||||
if (exit_on_error)
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2301,14 +2313,12 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
||||
ist->samples_decoded += decoded_frame->nb_samples;
|
||||
ist->frames_decoded++;
|
||||
|
||||
#if 1
|
||||
/* increment next_dts to use for the case where the input stream does not
|
||||
have timestamps or there are multiple frames in the packet */
|
||||
ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
|
||||
avctx->sample_rate;
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
|
||||
avctx->sample_rate;
|
||||
#endif
|
||||
|
||||
if (decoded_frame->pts != AV_NOPTS_VALUE) {
|
||||
decoded_frame_tb = ist->st->time_base;
|
||||
@ -2700,6 +2710,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
||||
ist->dts = ist->next_dts;
|
||||
switch (ist->dec_ctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
av_assert1(pkt->duration >= 0);
|
||||
if (ist->dec_ctx->sample_rate) {
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
|
||||
ist->dec_ctx->sample_rate;
|
||||
@ -3059,7 +3070,13 @@ static int init_output_stream_streamcopy(OutputStream *ost)
|
||||
"Error setting up codec context options.\n");
|
||||
return ret;
|
||||
}
|
||||
avcodec_parameters_from_context(par_src, ost->enc_ctx);
|
||||
|
||||
ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Error getting reference codec parameters.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!codec_tag) {
|
||||
unsigned int codec_tag_tmp;
|
||||
@ -3324,7 +3341,7 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
"if you want a different framerate.\n",
|
||||
ost->file_index, ost->index);
|
||||
}
|
||||
// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
|
||||
|
||||
if (ost->enc->supported_framerates && !ost->force_fps) {
|
||||
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
||||
ost->frame_rate = ost->enc->supported_framerates[idx];
|
||||
@ -3387,6 +3404,12 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
|
||||
}
|
||||
|
||||
if (ost->top_field_first == 0) {
|
||||
enc_ctx->field_order = AV_FIELD_BB;
|
||||
} else if (ost->top_field_first == 1) {
|
||||
enc_ctx->field_order = AV_FIELD_TT;
|
||||
}
|
||||
|
||||
if (ost->forced_keyframes) {
|
||||
if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
|
||||
ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
|
||||
@ -3401,8 +3424,8 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
|
||||
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
|
||||
|
||||
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
|
||||
// parse it only for static kf timings
|
||||
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
|
||||
// parse it only for static kf timings
|
||||
} else if(strncmp(ost->forced_keyframes, "source", 6)) {
|
||||
parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
|
||||
}
|
||||
@ -3473,6 +3496,23 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
int input_props = 0, output_props = 0;
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
avcodec_descriptor_get(dec->codec_id);
|
||||
AVCodecDescriptor const *output_descriptor =
|
||||
avcodec_descriptor_get(ost->enc_ctx->codec_id);
|
||||
if (input_descriptor)
|
||||
input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (output_descriptor)
|
||||
output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (input_props && output_props && input_props != output_props) {
|
||||
snprintf(error, error_len,
|
||||
"Subtitle encoding currently only possible from text to text "
|
||||
"or bitmap to bitmap");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
|
||||
if (ret == AVERROR_EXPERIMENTAL)
|
||||
@ -3835,7 +3875,9 @@ static OutputStream *choose_output(void)
|
||||
av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
||||
AV_TIME_BASE_Q);
|
||||
if (ost->st->cur_dts == AV_NOPTS_VALUE)
|
||||
av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
|
||||
ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
|
||||
|
||||
if (!ost->initialized && !ost->inputs_done)
|
||||
return ost;
|
||||
@ -4128,7 +4170,7 @@ static void reset_eagain(void)
|
||||
|
||||
// set duration to max(tmp, duration) in a proper time base and return duration's time_base
|
||||
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
|
||||
AVRational time_base)
|
||||
AVRational time_base)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -4308,9 +4350,11 @@ static int process_input(int file_index)
|
||||
if (ist->discard)
|
||||
goto discard_packet;
|
||||
|
||||
if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
|
||||
exit_program(1);
|
||||
if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
|
||||
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
|
||||
"%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
|
||||
if (exit_on_error)
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (debug_ts) {
|
||||
@ -4430,7 +4474,10 @@ static int process_input(int file_index)
|
||||
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
|
||||
ifile->ts_offset -= delta;
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
||||
"timestamp discontinuity for stream #%d:%d "
|
||||
"(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
|
||||
ist->file_index, ist->st->index, ist->st->id,
|
||||
av_get_media_type_string(ist->dec_ctx->codec_type),
|
||||
delta, ifile->ts_offset);
|
||||
pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
|
||||
if (pkt.pts != AV_NOPTS_VALUE)
|
||||
@ -4745,23 +4792,30 @@ static int transcode(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int64_t getutime(void)
|
||||
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
|
||||
{
|
||||
BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
|
||||
#if HAVE_GETRUSAGE
|
||||
struct rusage rusage;
|
||||
|
||||
getrusage(RUSAGE_SELF, &rusage);
|
||||
return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
|
||||
time_stamps.user_usec =
|
||||
(rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
|
||||
time_stamps.sys_usec =
|
||||
(rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
|
||||
#elif HAVE_GETPROCESSTIMES
|
||||
HANDLE proc;
|
||||
FILETIME c, e, k, u;
|
||||
proc = GetCurrentProcess();
|
||||
GetProcessTimes(proc, &c, &e, &k, &u);
|
||||
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
||||
time_stamps.user_usec =
|
||||
((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
||||
time_stamps.sys_usec =
|
||||
((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
|
||||
#else
|
||||
return av_gettime_relative();
|
||||
time_stamps.user_usec = time_stamps.sys_usec = 0;
|
||||
#endif
|
||||
return time_stamps;
|
||||
}
|
||||
|
||||
static int64_t getmaxrss(void)
|
||||
@ -4789,7 +4843,7 @@ static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int i, ret;
|
||||
int64_t ti;
|
||||
BenchmarkTimeStamps ti;
|
||||
|
||||
init_dynload();
|
||||
|
||||
@ -4831,22 +4885,23 @@ int main(int argc, char **argv)
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
// if (nb_input_files == 0) {
|
||||
// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
|
||||
// exit_program(1);
|
||||
// }
|
||||
|
||||
for (i = 0; i < nb_output_files; i++) {
|
||||
if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
|
||||
want_sdp = 0;
|
||||
}
|
||||
|
||||
current_time = ti = getutime();
|
||||
current_time = ti = get_benchmark_time_stamps();
|
||||
if (transcode() < 0)
|
||||
exit_program(1);
|
||||
ti = getutime() - ti;
|
||||
if (do_benchmark) {
|
||||
av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
|
||||
int64_t utime, stime, rtime;
|
||||
current_time = get_benchmark_time_stamps();
|
||||
utime = current_time.user_usec - ti.user_usec;
|
||||
stime = current_time.sys_usec - ti.sys_usec;
|
||||
rtime = current_time.real_usec - ti.real_usec;
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
"bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
|
||||
utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
|
||||
}
|
||||
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
|
||||
decode_error_stat[0], decode_error_stat[1]);
|
||||
|
@ -72,7 +72,7 @@ typedef struct HWAccel {
|
||||
} HWAccel;
|
||||
|
||||
typedef struct HWDevice {
|
||||
char *name;
|
||||
const char *name;
|
||||
enum AVHWDeviceType type;
|
||||
AVBufferRef *device_ref;
|
||||
} HWDevice;
|
||||
@ -484,6 +484,7 @@ typedef struct OutputStream {
|
||||
AVRational frame_aspect_ratio;
|
||||
|
||||
/* forced key frames */
|
||||
int64_t forced_kf_ref_pts;
|
||||
int64_t *forced_kf_pts;
|
||||
int forced_kf_count;
|
||||
int forced_kf_index;
|
||||
|
@ -65,6 +65,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCod
|
||||
if (codec && codec->pix_fmts) {
|
||||
const enum AVPixelFormat *p = codec->pix_fmts;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||
//FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
|
||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||
|
||||
@ -292,10 +293,17 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
exit_program(1);
|
||||
}
|
||||
ist = input_streams[input_files[file_idx]->ist_index + st->index];
|
||||
if (ist->user_set_discard == AVDISCARD_ALL) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
|
||||
"matches a disabled input stream.\n", p, fg->graph_desc);
|
||||
exit_program(1);
|
||||
}
|
||||
} else {
|
||||
/* find the first unused stream of corresponding type */
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (ist->user_set_discard == AVDISCARD_ALL)
|
||||
continue;
|
||||
if (ist->dec_ctx->codec_type == type && ist->discard)
|
||||
break;
|
||||
}
|
||||
@ -731,6 +739,7 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
|
||||
if (!ist->sub2video.frame)
|
||||
return AVERROR(ENOMEM);
|
||||
ist->sub2video.last_pts = INT64_MIN;
|
||||
ist->sub2video.end_pts = INT64_MIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -774,7 +783,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
sar = ifilter->sample_aspect_ratio;
|
||||
if(!sar.den)
|
||||
sar = (AVRational){0,1};
|
||||
av_bprint_init(&args, 0, 1);
|
||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprintf(&args,
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||
"pixel_aspect=%d/%d:sws_param=flags=%d",
|
||||
|
@ -99,7 +99,7 @@ int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
||||
// -> av_hwdevice_ctx_create_derived()
|
||||
|
||||
AVDictionary *options = NULL;
|
||||
char *type_name = NULL, *name = NULL, *device = NULL;
|
||||
const char *type_name = NULL, *name = NULL, *device = NULL;
|
||||
enum AVHWDeviceType type;
|
||||
HWDevice *dev, *src;
|
||||
AVBufferRef *device_ref = NULL;
|
||||
@ -155,10 +155,12 @@ int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
||||
++p;
|
||||
q = strchr(p, ',');
|
||||
if (q) {
|
||||
device = av_strndup(p, q - p);
|
||||
if (!device) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
if (q - p > 0) {
|
||||
device = av_strndup(p, q - p);
|
||||
if (!device) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
err = av_dict_parse_string(&options, q + 1, "=", ",", 0);
|
||||
if (err < 0) {
|
||||
@ -168,7 +170,8 @@ int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
||||
}
|
||||
|
||||
err = av_hwdevice_ctx_create(&device_ref, type,
|
||||
device ? device : p, options, 0);
|
||||
q ? device : p[0] ? p : NULL,
|
||||
options, 0);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
|
@ -268,7 +268,7 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
OptionsContext *o = optctx;
|
||||
StreamMap *m = NULL;
|
||||
int i, negative = 0, file_idx;
|
||||
int i, negative = 0, file_idx, disabled = 0;
|
||||
int sync_file_idx = -1, sync_stream_idx = 0;
|
||||
char *p, *sync;
|
||||
char *map;
|
||||
@ -303,6 +303,11 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
"match any streams.\n", arg);
|
||||
exit_program(1);
|
||||
}
|
||||
if (input_streams[input_files[sync_file_idx]->ist_index + sync_stream_idx]->user_set_discard == AVDISCARD_ALL) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s matches a disabled input "
|
||||
"stream.\n", arg);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -339,6 +344,10 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
|
||||
*p == ':' ? p + 1 : p) <= 0)
|
||||
continue;
|
||||
if (input_streams[input_files[file_idx]->ist_index + i]->user_set_discard == AVDISCARD_ALL) {
|
||||
disabled = 1;
|
||||
continue;
|
||||
}
|
||||
GROW_ARRAY(o->stream_maps, o->nb_stream_maps);
|
||||
m = &o->stream_maps[o->nb_stream_maps - 1];
|
||||
|
||||
@ -358,6 +367,10 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
if (!m) {
|
||||
if (allow_unused) {
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Stream map '%s' matches no streams; ignoring.\n", arg);
|
||||
} else if (disabled) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches disabled streams.\n"
|
||||
"To ignore this, add a trailing '?' to the map.\n", arg);
|
||||
exit_program(1);
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n"
|
||||
"To ignore this, add a trailing '?' to the map.\n", arg);
|
||||
@ -437,7 +450,8 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
/* allow trailing ? to map_channel */
|
||||
if (allow_unused = strchr(mapchan, '?'))
|
||||
*allow_unused = 0;
|
||||
if (m->channel_idx < 0 || m->channel_idx >= st->codecpar->channels) {
|
||||
if (m->channel_idx < 0 || m->channel_idx >= st->codecpar->channels ||
|
||||
input_streams[input_files[m->file_idx]->ist_index + m->stream_idx]->user_set_discard == AVDISCARD_ALL) {
|
||||
if (allow_unused) {
|
||||
av_log(NULL, AV_LOG_VERBOSE, "mapchan: invalid audio channel #%d.%d.%d\n",
|
||||
m->file_idx, m->stream_idx, m->channel_idx);
|
||||
@ -746,6 +760,13 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
|
||||
MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
|
||||
ist->user_set_discard = AVDISCARD_NONE;
|
||||
|
||||
if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ||
|
||||
(o->audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ||
|
||||
(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) ||
|
||||
(o->data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA))
|
||||
ist->user_set_discard = AVDISCARD_ALL;
|
||||
|
||||
if (discard_str && av_opt_eval_int(&cc, discard_opt, discard_str, &ist->user_set_discard) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
|
||||
discard_str);
|
||||
@ -900,13 +921,14 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
|
||||
static void assert_file_overwrite(const char *filename)
|
||||
{
|
||||
const char *proto_name = avio_find_protocol_name(filename);
|
||||
|
||||
if (file_overwrite && no_file_overwrite) {
|
||||
fprintf(stderr, "Error, both -y and -n supplied. Exiting.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (!file_overwrite) {
|
||||
const char *proto_name = avio_find_protocol_name(filename);
|
||||
if (proto_name && !strcmp(proto_name, "file") && avio_check(filename, 0) == 0) {
|
||||
if (stdin_interaction && !no_file_overwrite) {
|
||||
fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
|
||||
@ -925,6 +947,19 @@ static void assert_file_overwrite(const char *filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (proto_name && !strcmp(proto_name, "file")) {
|
||||
for (int i = 0; i < nb_input_files; i++) {
|
||||
InputFile *file = input_files[i];
|
||||
if (file->ctx->iformat->flags & AVFMT_NOFILE)
|
||||
continue;
|
||||
if (!strcmp(filename, file->ctx->url)) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Output %s same as Input #%d - exiting\n", filename, i);
|
||||
av_log(NULL, AV_LOG_WARNING, "FFmpeg cannot edit existing files in-place.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_attachment(AVStream *st, const char *filename)
|
||||
@ -1103,9 +1138,22 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
}
|
||||
|
||||
if (o->start_time != AV_NOPTS_VALUE && o->start_time_eof != AV_NOPTS_VALUE) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Cannot use -ss and -sseof both, using -ss for %s\n", filename);
|
||||
o->start_time_eof = AV_NOPTS_VALUE;
|
||||
}
|
||||
|
||||
if (o->start_time_eof != AV_NOPTS_VALUE) {
|
||||
if (ic->duration>0) {
|
||||
if (o->start_time_eof >= 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "-sseof value must be negative; aborting\n");
|
||||
exit_program(1);
|
||||
}
|
||||
if (ic->duration > 0) {
|
||||
o->start_time = o->start_time_eof + ic->duration;
|
||||
if (o->start_time < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "-sseof value seeks to before start of file %s; ignored\n", filename);
|
||||
o->start_time = AV_NOPTS_VALUE;
|
||||
}
|
||||
} else
|
||||
av_log(NULL, AV_LOG_WARNING, "Cannot use -sseof, duration of %s not known\n", filename);
|
||||
}
|
||||
@ -1122,8 +1170,10 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
int dts_heuristic = 0;
|
||||
for (i=0; i<ic->nb_streams; i++) {
|
||||
const AVCodecParameters *par = ic->streams[i]->codecpar;
|
||||
if (par->video_delay)
|
||||
if (par->video_delay) {
|
||||
dts_heuristic = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (dts_heuristic) {
|
||||
seek_timestamp -= 3*AV_TIME_BASE / 23;
|
||||
@ -1324,6 +1374,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
ost->file_index = nb_output_files - 1;
|
||||
ost->index = idx;
|
||||
ost->st = st;
|
||||
ost->forced_kf_ref_pts = AV_NOPTS_VALUE;
|
||||
st->codecpar->codec_type = type;
|
||||
|
||||
ret = choose_encoder(o, oc, ost);
|
||||
@ -1630,6 +1681,8 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
|
||||
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
|
||||
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
|
||||
if (o->nb_filters > 1)
|
||||
av_log(NULL, AV_LOG_ERROR, "Only '-vf %s' read, ignoring remaining -vf options: Use ',' to separate filters\n", ost->filters);
|
||||
|
||||
if (!ost->stream_copy) {
|
||||
const char *p = NULL;
|
||||
@ -1811,6 +1864,8 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
|
||||
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
|
||||
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
|
||||
if (o->nb_filters > 1)
|
||||
av_log(NULL, AV_LOG_ERROR, "Only '-af %s' read, ignoring remaining -af options: Use ',' to separate filters\n", ost->filters);
|
||||
|
||||
if (!ost->stream_copy) {
|
||||
char *sample_fmt = NULL;
|
||||
@ -2143,7 +2198,10 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
int new_area;
|
||||
ist = input_streams[i];
|
||||
new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames;
|
||||
new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames
|
||||
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
||||
if (ist->user_set_discard == AVDISCARD_ALL)
|
||||
continue;
|
||||
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||
new_area = 1;
|
||||
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
@ -2164,7 +2222,10 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
int score;
|
||||
ist = input_streams[i];
|
||||
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames;
|
||||
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames
|
||||
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
||||
if (ist->user_set_discard == AVDISCARD_ALL)
|
||||
continue;
|
||||
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
|
||||
score > best_score) {
|
||||
best_score = score;
|
||||
@ -2186,6 +2247,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
AVCodec const *output_codec =
|
||||
avcodec_find_encoder(oc->oformat->subtitle_codec);
|
||||
int input_props = 0, output_props = 0;
|
||||
if (input_streams[i]->user_set_discard == AVDISCARD_ALL)
|
||||
continue;
|
||||
if (output_codec)
|
||||
output_descriptor = avcodec_descriptor_get(output_codec->id);
|
||||
if (input_descriptor)
|
||||
@ -2207,6 +2270,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (!o->data_disable ) {
|
||||
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
|
||||
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
|
||||
if (input_streams[i]->user_set_discard == AVDISCARD_ALL)
|
||||
continue;
|
||||
if (input_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_DATA
|
||||
&& input_streams[i]->st->codecpar->codec_id == codec_id )
|
||||
new_data_stream(o, oc, i);
|
||||
@ -2245,6 +2310,11 @@ loop_end:
|
||||
int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
|
||||
|
||||
ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
|
||||
if (ist->user_set_discard == AVDISCARD_ALL) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream #%d:%d is disabled and cannot be mapped.\n",
|
||||
map->file_index, map->stream_index);
|
||||
exit_program(1);
|
||||
}
|
||||
if(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
|
||||
continue;
|
||||
if(o-> audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
|
||||
@ -3157,7 +3227,9 @@ void show_help_default(const char *opt, const char *arg)
|
||||
#if CONFIG_SWSCALE
|
||||
show_help_children(sws_get_class(), flags);
|
||||
#endif
|
||||
#if CONFIG_SWRESAMPLE
|
||||
show_help_children(swr_get_class(), AV_OPT_FLAG_AUDIO_PARAM);
|
||||
#endif
|
||||
show_help_children(avfilter_get_class(), AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM);
|
||||
show_help_children(av_bsf_get_class(), AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_BSF_PARAM);
|
||||
}
|
||||
@ -3337,7 +3409,7 @@ const OptionDef options[] = {
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
|
||||
"set the start time offset", "time_off" },
|
||||
{ "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time_eof) },
|
||||
OPT_INPUT, { .off = OFFSET(start_time_eof) },
|
||||
"set the start time offset relative to EOF", "time_off" },
|
||||
{ "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
|
||||
OPT_INPUT, { .off = OFFSET(seek_timestamp) },
|
||||
|
@ -93,7 +93,7 @@ int qsv_init(AVCodecContext *s)
|
||||
frames_ctx->height = FFALIGN(s->coded_height, 32);
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = s->sw_pix_fmt;
|
||||
frames_ctx->initial_pool_size = 64;
|
||||
frames_ctx->initial_pool_size = 64 + s->extra_hw_frames;
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(ist->hw_frames_ctx);
|
||||
|
@ -52,6 +52,9 @@ static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
|
||||
#ifdef kCFCoreFoundationVersionNumber10_7
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
|
||||
#endif
|
||||
#if HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
|
||||
case kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_P010; break;
|
||||
#endif
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
|
123
fftools/ffplay.c
123
fftools/ffplay.c
@ -314,13 +314,17 @@ static int default_width = 640;
|
||||
static int default_height = 480;
|
||||
static int screen_width = 0;
|
||||
static int screen_height = 0;
|
||||
static int screen_left = SDL_WINDOWPOS_CENTERED;
|
||||
static int screen_top = SDL_WINDOWPOS_CENTERED;
|
||||
static int audio_disable;
|
||||
static int video_disable;
|
||||
static int subtitle_disable;
|
||||
static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
|
||||
static int seek_by_bytes = -1;
|
||||
static float seek_interval = 10;
|
||||
static int display_disable;
|
||||
static int borderless;
|
||||
static int alwaysontop;
|
||||
static int startup_volume = 100;
|
||||
static int show_status = 1;
|
||||
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
|
||||
@ -350,6 +354,7 @@ static char *afilters = NULL;
|
||||
#endif
|
||||
static int autorotate = 1;
|
||||
static int find_stream_info = 1;
|
||||
static int filter_nbthreads = 0;
|
||||
|
||||
/* current context */
|
||||
static int is_full_screen;
|
||||
@ -858,31 +863,27 @@ static void calculate_display_rect(SDL_Rect *rect,
|
||||
int scr_xleft, int scr_ytop, int scr_width, int scr_height,
|
||||
int pic_width, int pic_height, AVRational pic_sar)
|
||||
{
|
||||
float aspect_ratio;
|
||||
int width, height, x, y;
|
||||
AVRational aspect_ratio = pic_sar;
|
||||
int64_t width, height, x, y;
|
||||
|
||||
if (pic_sar.num == 0)
|
||||
aspect_ratio = 0;
|
||||
else
|
||||
aspect_ratio = av_q2d(pic_sar);
|
||||
if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
|
||||
aspect_ratio = av_make_q(1, 1);
|
||||
|
||||
if (aspect_ratio <= 0.0)
|
||||
aspect_ratio = 1.0;
|
||||
aspect_ratio *= (float)pic_width / (float)pic_height;
|
||||
aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
|
||||
|
||||
/* XXX: we suppose the screen has a 1.0 pixel ratio */
|
||||
height = scr_height;
|
||||
width = lrint(height * aspect_ratio) & ~1;
|
||||
width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
|
||||
if (width > scr_width) {
|
||||
width = scr_width;
|
||||
height = lrint(width / aspect_ratio) & ~1;
|
||||
height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
|
||||
}
|
||||
x = (scr_width - width) / 2;
|
||||
y = (scr_height - height) / 2;
|
||||
rect->x = scr_xleft + x;
|
||||
rect->y = scr_ytop + y;
|
||||
rect->w = FFMAX(width, 1);
|
||||
rect->h = FFMAX(height, 1);
|
||||
rect->w = FFMAX((int)width, 1);
|
||||
rect->h = FFMAX((int)height, 1);
|
||||
}
|
||||
|
||||
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
|
||||
@ -954,6 +955,22 @@ static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext *
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
|
||||
{
|
||||
#if SDL_VERSION_ATLEAST(2,0,8)
|
||||
SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
|
||||
if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
|
||||
if (frame->color_range == AVCOL_RANGE_JPEG)
|
||||
mode = SDL_YUV_CONVERSION_JPEG;
|
||||
else if (frame->colorspace == AVCOL_SPC_BT709)
|
||||
mode = SDL_YUV_CONVERSION_BT709;
|
||||
else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
|
||||
mode = SDL_YUV_CONVERSION_BT601;
|
||||
}
|
||||
SDL_SetYUVConversionMode(mode);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void video_image_display(VideoState *is)
|
||||
{
|
||||
Frame *vp;
|
||||
@ -1015,7 +1032,9 @@ static void video_image_display(VideoState *is)
|
||||
vp->flip_v = vp->frame->linesize[0] < 0;
|
||||
}
|
||||
|
||||
set_sdl_yuv_conversion_mode(vp->frame);
|
||||
SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
|
||||
set_sdl_yuv_conversion_mode(NULL);
|
||||
if (sp) {
|
||||
#if USE_ONEPASS_SUBTITLE_RENDER
|
||||
SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
|
||||
@ -1305,7 +1324,11 @@ static void sigterm_handler(int sig)
|
||||
static void set_default_window_size(int width, int height, AVRational sar)
|
||||
{
|
||||
SDL_Rect rect;
|
||||
calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
|
||||
int max_width = screen_width ? screen_width : INT_MAX;
|
||||
int max_height = screen_height ? screen_height : INT_MAX;
|
||||
if (max_width == INT_MAX && max_height == INT_MAX)
|
||||
max_height = height;
|
||||
calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
|
||||
default_width = rect.w;
|
||||
default_height = rect.h;
|
||||
}
|
||||
@ -1314,20 +1337,15 @@ static int video_open(VideoState *is)
|
||||
{
|
||||
int w,h;
|
||||
|
||||
if (screen_width) {
|
||||
w = screen_width;
|
||||
h = screen_height;
|
||||
} else {
|
||||
w = default_width;
|
||||
h = default_height;
|
||||
}
|
||||
w = screen_width ? screen_width : default_width;
|
||||
h = screen_height ? screen_height : default_height;
|
||||
|
||||
if (!window_title)
|
||||
window_title = input_filename;
|
||||
SDL_SetWindowTitle(window, window_title);
|
||||
|
||||
SDL_SetWindowSize(window, w, h);
|
||||
SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
|
||||
SDL_SetWindowPosition(window, screen_left, screen_top);
|
||||
if (is_full_screen)
|
||||
SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
|
||||
SDL_ShowWindow(window);
|
||||
@ -1938,6 +1956,7 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
|
||||
avfilter_graph_free(&is->agraph);
|
||||
if (!(is->agraph = avfilter_graph_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
is->agraph->nb_threads = filter_nbthreads;
|
||||
|
||||
while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
|
||||
av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
|
||||
@ -2087,10 +2106,10 @@ static int audio_thread(void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
|
||||
static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
|
||||
{
|
||||
packet_queue_start(d->queue);
|
||||
d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
|
||||
d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
|
||||
if (!d->decoder_tid) {
|
||||
av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
|
||||
return AVERROR(ENOMEM);
|
||||
@ -2109,26 +2128,17 @@ static int video_thread(void *arg)
|
||||
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
AVFilterGraph *graph = avfilter_graph_alloc();
|
||||
AVFilterGraph *graph = NULL;
|
||||
AVFilterContext *filt_out = NULL, *filt_in = NULL;
|
||||
int last_w = 0;
|
||||
int last_h = 0;
|
||||
enum AVPixelFormat last_format = -2;
|
||||
int last_serial = -1;
|
||||
int last_vfilter_idx = 0;
|
||||
if (!graph) {
|
||||
av_frame_free(&frame);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
if (!frame) {
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_graph_free(&graph);
|
||||
#endif
|
||||
if (!frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
ret = get_video_frame(is, frame);
|
||||
@ -2151,6 +2161,11 @@ static int video_thread(void *arg)
|
||||
(const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
|
||||
avfilter_graph_free(&graph);
|
||||
graph = avfilter_graph_alloc();
|
||||
if (!graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto the_end;
|
||||
}
|
||||
graph->nb_threads = filter_nbthreads;
|
||||
if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
|
||||
SDL_Event event;
|
||||
event.type = FF_QUIT_EVENT;
|
||||
@ -2193,6 +2208,8 @@ static int video_thread(void *arg)
|
||||
ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
|
||||
av_frame_unref(frame);
|
||||
#if CONFIG_AVFILTER
|
||||
if (is->videoq.serial != is->viddec.pkt_serial)
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2578,7 +2595,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
|
||||
"No codec could be found with name '%s'\n", forced_codec_name);
|
||||
else av_log(NULL, AV_LOG_WARNING,
|
||||
"No codec could be found with id %d\n", avctx->codec_id);
|
||||
"No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
@ -2658,7 +2675,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->auddec.start_pts = is->audio_st->start_time;
|
||||
is->auddec.start_pts_tb = is->audio_st->time_base;
|
||||
}
|
||||
if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
|
||||
if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
|
||||
goto out;
|
||||
SDL_PauseAudioDevice(audio_dev, 0);
|
||||
break;
|
||||
@ -2667,7 +2684,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->video_st = ic->streams[stream_index];
|
||||
|
||||
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
|
||||
if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
|
||||
if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
|
||||
goto out;
|
||||
is->queue_attachments_req = 1;
|
||||
break;
|
||||
@ -2676,7 +2693,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->subtitle_st = ic->streams[stream_index];
|
||||
|
||||
decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
|
||||
if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
|
||||
if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
|
||||
goto out;
|
||||
break;
|
||||
default:
|
||||
@ -3251,15 +3268,14 @@ static void event_loop(VideoState *cur_stream)
|
||||
refresh_loop_wait_event(cur_stream, &event);
|
||||
switch (event.type) {
|
||||
case SDL_KEYDOWN:
|
||||
if (exit_on_keydown) {
|
||||
if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
|
||||
do_exit(cur_stream);
|
||||
break;
|
||||
}
|
||||
// If we don't yet have a window, skip all key events, because read_thread might still be initializing...
|
||||
if (!cur_stream->width)
|
||||
continue;
|
||||
switch (event.key.keysym.sym) {
|
||||
case SDLK_ESCAPE:
|
||||
case SDLK_q:
|
||||
do_exit(cur_stream);
|
||||
break;
|
||||
case SDLK_f:
|
||||
toggle_full_screen(cur_stream);
|
||||
cur_stream->force_refresh = 1;
|
||||
@ -3324,10 +3340,10 @@ static void event_loop(VideoState *cur_stream)
|
||||
seek_chapter(cur_stream, -1);
|
||||
break;
|
||||
case SDLK_LEFT:
|
||||
incr = -10.0;
|
||||
incr = seek_interval ? -seek_interval : -10.0;
|
||||
goto do_seek;
|
||||
case SDLK_RIGHT:
|
||||
incr = 10.0;
|
||||
incr = seek_interval ? seek_interval : 10.0;
|
||||
goto do_seek;
|
||||
case SDLK_UP:
|
||||
incr = 60.0;
|
||||
@ -3420,7 +3436,7 @@ static void event_loop(VideoState *cur_stream)
|
||||
break;
|
||||
case SDL_WINDOWEVENT:
|
||||
switch (event.window.event) {
|
||||
case SDL_WINDOWEVENT_RESIZED:
|
||||
case SDL_WINDOWEVENT_SIZE_CHANGED:
|
||||
screen_width = cur_stream->width = event.window.data1;
|
||||
screen_height = cur_stream->height = event.window.data2;
|
||||
if (cur_stream->vis_texture) {
|
||||
@ -3563,8 +3579,10 @@ static const OptionDef options[] = {
|
||||
{ "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
|
||||
{ "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
|
||||
{ "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
|
||||
{ "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
|
||||
{ "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
|
||||
{ "noborder", OPT_BOOL, { &borderless }, "borderless window" },
|
||||
{ "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
|
||||
{ "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
|
||||
{ "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
|
||||
{ "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
|
||||
@ -3581,6 +3599,8 @@ static const OptionDef options[] = {
|
||||
{ "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
|
||||
{ "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
|
||||
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
|
||||
{ "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
|
||||
{ "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
|
||||
#if CONFIG_AVFILTER
|
||||
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
|
||||
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
|
||||
@ -3596,6 +3616,7 @@ static const OptionDef options[] = {
|
||||
{ "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
|
||||
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||
"read and decode the streams to fill missing information with heuristics" },
|
||||
{ "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
@ -3633,7 +3654,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||
"c cycle program\n"
|
||||
"w cycle video filters or show modes\n"
|
||||
"s activate frame-step mode\n"
|
||||
"left/right seek backward/forward 10 seconds\n"
|
||||
"left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
|
||||
"down/up seek backward/forward 1 minute\n"
|
||||
"page down/page up seek backward/forward 10 minutes\n"
|
||||
"right mouse click seek to percentage in file corresponding to fraction of width\n"
|
||||
@ -3703,6 +3724,12 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!display_disable) {
|
||||
int flags = SDL_WINDOW_HIDDEN;
|
||||
if (alwaysontop)
|
||||
#if SDL_VERSION_ATLEAST(2,0,5)
|
||||
flags |= SDL_WINDOW_ALWAYS_ON_TOP;
|
||||
#else
|
||||
av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
|
||||
#endif
|
||||
if (borderless)
|
||||
flags |= SDL_WINDOW_BORDERLESS;
|
||||
else
|
||||
|
@ -165,6 +165,8 @@ typedef enum {
|
||||
SECTION_ID_FRAME_TAGS,
|
||||
SECTION_ID_FRAME_SIDE_DATA_LIST,
|
||||
SECTION_ID_FRAME_SIDE_DATA,
|
||||
SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST,
|
||||
SECTION_ID_FRAME_SIDE_DATA_TIMECODE,
|
||||
SECTION_ID_FRAME_LOG,
|
||||
SECTION_ID_FRAME_LOGS,
|
||||
SECTION_ID_LIBRARY_VERSION,
|
||||
@ -209,7 +211,9 @@ static struct section sections[] = {
|
||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } },
|
||||
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
|
||||
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" },
|
||||
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, -1 } },
|
||||
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
|
||||
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
|
||||
[SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } },
|
||||
[SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, },
|
||||
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
|
||||
@ -2199,6 +2203,18 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, *(int64_t *)(sd->data));
|
||||
print_str("timecode", tcbuf);
|
||||
} else if (sd->type == AV_FRAME_DATA_S12M_TIMECODE && sd->size == 16) {
|
||||
uint32_t *tc = (uint32_t*)sd->data;
|
||||
int m = FFMIN(tc[0],3);
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST);
|
||||
for (int j = 1; j <= m ; j++) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0);
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE);
|
||||
print_str("value", tcbuf);
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
} else if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) {
|
||||
AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data;
|
||||
|
||||
@ -2413,9 +2429,7 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
av_packet_unref(&pkt);
|
||||
//Flush remaining frames that are cached in the decoder
|
||||
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||
pkt.stream_index = i;
|
||||
@ -2646,20 +2660,20 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
} while (0)
|
||||
|
||||
if (do_show_stream_disposition) {
|
||||
writer_print_section_header(w, in_program ? SECTION_ID_PROGRAM_STREAM_DISPOSITION : SECTION_ID_STREAM_DISPOSITION);
|
||||
PRINT_DISPOSITION(DEFAULT, "default");
|
||||
PRINT_DISPOSITION(DUB, "dub");
|
||||
PRINT_DISPOSITION(ORIGINAL, "original");
|
||||
PRINT_DISPOSITION(COMMENT, "comment");
|
||||
PRINT_DISPOSITION(LYRICS, "lyrics");
|
||||
PRINT_DISPOSITION(KARAOKE, "karaoke");
|
||||
PRINT_DISPOSITION(FORCED, "forced");
|
||||
PRINT_DISPOSITION(HEARING_IMPAIRED, "hearing_impaired");
|
||||
PRINT_DISPOSITION(VISUAL_IMPAIRED, "visual_impaired");
|
||||
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
|
||||
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
|
||||
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
|
||||
writer_print_section_footer(w);
|
||||
writer_print_section_header(w, in_program ? SECTION_ID_PROGRAM_STREAM_DISPOSITION : SECTION_ID_STREAM_DISPOSITION);
|
||||
PRINT_DISPOSITION(DEFAULT, "default");
|
||||
PRINT_DISPOSITION(DUB, "dub");
|
||||
PRINT_DISPOSITION(ORIGINAL, "original");
|
||||
PRINT_DISPOSITION(COMMENT, "comment");
|
||||
PRINT_DISPOSITION(LYRICS, "lyrics");
|
||||
PRINT_DISPOSITION(KARAOKE, "karaoke");
|
||||
PRINT_DISPOSITION(FORCED, "forced");
|
||||
PRINT_DISPOSITION(HEARING_IMPAIRED, "hearing_impaired");
|
||||
PRINT_DISPOSITION(VISUAL_IMPAIRED, "visual_impaired");
|
||||
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
|
||||
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
|
||||
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
if (do_show_stream_tags)
|
||||
|
@ -145,7 +145,7 @@ typedef struct FourXContext {
|
||||
int mv[256];
|
||||
VLC pre_vlc;
|
||||
int last_dc;
|
||||
DECLARE_ALIGNED(16, int16_t, block)[6][64];
|
||||
DECLARE_ALIGNED(32, int16_t, block)[6][64];
|
||||
void *bitstream_buffer;
|
||||
unsigned int bitstream_buffer_size;
|
||||
int version;
|
||||
@ -158,7 +158,7 @@ typedef struct FourXContext {
|
||||
#define FIX_1_847759065 121095
|
||||
#define FIX_2_613125930 171254
|
||||
|
||||
#define MULTIPLY(var, const) (((var) * (const)) >> 16)
|
||||
#define MULTIPLY(var, const) ((int)((var) * (unsigned)(const)) >> 16)
|
||||
|
||||
static void idct(int16_t block[64])
|
||||
{
|
||||
@ -351,6 +351,8 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, const uint16_t *src,
|
||||
index = size2index[log2h][log2w];
|
||||
av_assert0(index >= 0);
|
||||
|
||||
if (get_bits_left(&f->gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
h = 1 << log2h;
|
||||
code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table,
|
||||
BLOCK_TYPE_VLC_BITS, 1);
|
||||
@ -697,6 +699,7 @@ static const uint8_t *read_huffman_tables(FourXContext *f,
|
||||
len_tab[j] = len;
|
||||
}
|
||||
|
||||
ff_free_vlc(&f->pre_vlc);
|
||||
if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257, len_tab, 1, 1,
|
||||
bits_tab, 4, 4, 0))
|
||||
return NULL;
|
||||
|
@ -44,6 +44,7 @@ OBJS = ac3_parser.o \
|
||||
options.o \
|
||||
mjpegenc_huffman.o \
|
||||
parser.o \
|
||||
parsers.o \
|
||||
profiles.o \
|
||||
qsv_api.o \
|
||||
raw.o \
|
||||
@ -62,9 +63,12 @@ OBJS-$(CONFIG_BLOCKDSP) += blockdsp.o
|
||||
OBJS-$(CONFIG_BSWAPDSP) += bswapdsp.o
|
||||
OBJS-$(CONFIG_CABAC) += cabac.o
|
||||
OBJS-$(CONFIG_CBS) += cbs.o
|
||||
OBJS-$(CONFIG_CBS_AV1) += cbs_av1.o
|
||||
OBJS-$(CONFIG_CBS_H264) += cbs_h2645.o h2645_parse.o
|
||||
OBJS-$(CONFIG_CBS_H265) += cbs_h2645.o h2645_parse.o
|
||||
OBJS-$(CONFIG_CBS_JPEG) += cbs_jpeg.o
|
||||
OBJS-$(CONFIG_CBS_MPEG2) += cbs_mpeg2.o
|
||||
OBJS-$(CONFIG_CBS_VP9) += cbs_vp9.o
|
||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
|
||||
@ -169,6 +173,7 @@ OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbd
|
||||
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
|
||||
ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o
|
||||
OBJS-$(CONFIG_AGM_DECODER) += agm.o
|
||||
OBJS-$(CONFIG_AIC_DECODER) += aic.o
|
||||
OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o alacdsp.o
|
||||
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
|
||||
@ -194,6 +199,7 @@ OBJS-$(CONFIG_APTX_HD_DECODER) += aptx.o
|
||||
OBJS-$(CONFIG_APTX_HD_ENCODER) += aptx.o
|
||||
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_APNG_ENCODER) += png.o pngenc.o
|
||||
OBJS-$(CONFIG_ARBC_DECODER) += arbc.o
|
||||
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o
|
||||
OBJS-$(CONFIG_SSA_ENCODER) += assenc.o ass.o
|
||||
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o
|
||||
@ -209,6 +215,7 @@ OBJS-$(CONFIG_ATRAC3P_DECODER) += atrac3plusdec.o atrac3plus.o \
|
||||
atrac3plusdsp.o atrac.o
|
||||
OBJS-$(CONFIG_ATRAC3PAL_DECODER) += atrac3plusdec.o atrac3plus.o \
|
||||
atrac3plusdsp.o atrac.o
|
||||
OBJS-$(CONFIG_ATRAC9_DECODER) += atrac9dec.o
|
||||
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
|
||||
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
|
||||
OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o mjpegdec.o
|
||||
@ -351,12 +358,13 @@ OBJS-$(CONFIG_H264_OMX_ENCODER) += omx.o
|
||||
OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec_h2645.o
|
||||
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
|
||||
OBJS-$(CONFIG_H264_RKMPP_DECODER) += rkmppdec.o
|
||||
OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_encode_h264.o
|
||||
OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_encode_h264.o h264_levels.o
|
||||
OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o
|
||||
OBJS-$(CONFIG_H264_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||
OBJS-$(CONFIG_H264_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
|
||||
OBJS-$(CONFIG_HAP_DECODER) += hapdec.o hap.o
|
||||
OBJS-$(CONFIG_HAP_ENCODER) += hapenc.o hap.o
|
||||
OBJS-$(CONFIG_HCOM_DECODER) += hcom.o
|
||||
OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
|
||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||
hevcdsp.o hevc_filter.o hevc_data.o
|
||||
@ -369,7 +377,7 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec_h2645.o
|
||||
OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \
|
||||
hevc_data.o
|
||||
OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o
|
||||
OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o
|
||||
OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o
|
||||
OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||
OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
|
||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
||||
@ -378,10 +386,13 @@ OBJS-$(CONFIG_HQ_HQA_DECODER) += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
|
||||
OBJS-$(CONFIG_HQX_DECODER) += hqx.o hqxvlc.o hqxdsp.o canopus.o
|
||||
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
|
||||
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
|
||||
OBJS-$(CONFIG_HYMT_DECODER) += huffyuv.o huffyuvdec.o
|
||||
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
|
||||
OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_IFF_ILBM_DECODER) += iff.o
|
||||
OBJS-$(CONFIG_ILBC_DECODER) += ilbcdec.o
|
||||
OBJS-$(CONFIG_IMC_DECODER) += imc.o
|
||||
OBJS-$(CONFIG_IMM4_DECODER) += imm4.o
|
||||
OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
|
||||
OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
|
||||
OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi.o
|
||||
@ -402,6 +413,7 @@ OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
|
||||
OBJS-$(CONFIG_LAGARITH_DECODER) += lagarith.o lagarithrac.o
|
||||
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_LOCO_DECODER) += loco.o
|
||||
OBJS-$(CONFIG_LSCR_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_M101_DECODER) += m101.o
|
||||
OBJS-$(CONFIG_MACE3_DECODER) += mace.o
|
||||
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
|
||||
@ -478,13 +490,15 @@ OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
|
||||
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o
|
||||
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
|
||||
OBJS-$(CONFIG_MWSC_DECODER) += mwsc.o
|
||||
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o opus_rc.o \
|
||||
opus_pvq.o opus_silk.o opustab.o vorbis_data.o
|
||||
opus_pvq.o opus_silk.o opustab.o vorbis_data.o \
|
||||
opusdsp.o
|
||||
OBJS-$(CONFIG_OPUS_ENCODER) += opusenc.o opus.o opus_rc.o opustab.o opus_pvq.o \
|
||||
opusenc_psy.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
||||
@ -508,10 +522,10 @@ OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
|
||||
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
|
||||
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o
|
||||
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o proresdata.o
|
||||
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
|
||||
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
|
||||
OBJS-$(CONFIG_PRORES_AW_ENCODER) += proresenc_anatoliy.o
|
||||
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o proresdata.o
|
||||
OBJS-$(CONFIG_PRORES_AW_ENCODER) += proresenc_anatoliy.o proresdata.o
|
||||
OBJS-$(CONFIG_PRORES_KS_ENCODER) += proresenc_kostya.o proresdata.o
|
||||
OBJS-$(CONFIG_PROSUMER_DECODER) += prosumer.o
|
||||
OBJS-$(CONFIG_PSD_DECODER) += psd.o
|
||||
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
|
||||
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o \
|
||||
@ -531,6 +545,7 @@ OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
|
||||
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_filters.o
|
||||
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
|
||||
OBJS-$(CONFIG_RASC_DECODER) += rasc.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
|
||||
OBJS-$(CONFIG_RAWVIDEO_ENCODER) += rawenc.o
|
||||
OBJS-$(CONFIG_REALTEXT_DECODER) += realtextdec.o ass.o
|
||||
@ -574,7 +589,7 @@ OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
|
||||
OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
|
||||
OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
||||
OBJS-$(CONFIG_SPEEDHQ_DECODER) += speedhq.o simple_idct.o
|
||||
OBJS-$(CONFIG_SPEEDHQ_DECODER) += speedhq.o mpeg12.o mpeg12data.o simple_idct.o
|
||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
||||
OBJS-$(CONFIG_SRGC_DECODER) += mscc.o
|
||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o htmlsubtitles.o
|
||||
@ -672,6 +687,7 @@ OBJS-$(CONFIG_VP9_V4L2M2M_DECODER) += v4l2_m2m_dec.o
|
||||
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
|
||||
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
|
||||
OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
|
||||
OBJS-$(CONFIG_WCMV_DECODER) += wcmv.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += webp.o
|
||||
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o ass.o
|
||||
OBJS-$(CONFIG_WEBVTT_ENCODER) += webvttenc.o ass_split.o
|
||||
@ -727,6 +743,7 @@ OBJS-$(CONFIG_PCM_ALAW_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_ALAW_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_BLURAY_DECODER) += pcm-bluray.o
|
||||
OBJS-$(CONFIG_PCM_DVD_DECODER) += pcm-dvd.o
|
||||
OBJS-$(CONFIG_PCM_DVD_ENCODER) += pcm-dvdenc.o
|
||||
OBJS-$(CONFIG_PCM_F16LE_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_F24LE_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_F32BE_DECODER) += pcm.o
|
||||
@ -784,12 +801,15 @@ OBJS-$(CONFIG_PCM_U32BE_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_U32BE_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_U32LE_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_VIDC_DECODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_VIDC_ENCODER) += pcm.o
|
||||
OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
|
||||
|
||||
OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o adx.o
|
||||
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o adx.o
|
||||
OBJS-$(CONFIG_ADPCM_AFC_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_AGM_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_AICA_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_DTK_DECODER) += adpcm.o adpcm_data.o
|
||||
@ -941,9 +961,12 @@ OBJS-$(CONFIG_PCM_ALAW_AT_ENCODER) += audiotoolboxenc.o
|
||||
OBJS-$(CONFIG_PCM_MULAW_AT_ENCODER) += audiotoolboxenc.o
|
||||
OBJS-$(CONFIG_LIBAOM_AV1_DECODER) += libaomdec.o
|
||||
OBJS-$(CONFIG_LIBAOM_AV1_ENCODER) += libaomenc.o
|
||||
OBJS-$(CONFIG_LIBARIBB24_DECODER) += libaribb24.o ass.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBCODEC2_DECODER) += libcodec2.o codec2utils.o
|
||||
OBJS-$(CONFIG_LIBCODEC2_ENCODER) += libcodec2.o codec2utils.o
|
||||
OBJS-$(CONFIG_LIBDAV1D_DECODER) += libdav1d.o
|
||||
OBJS-$(CONFIG_LIBDAVS2_DECODER) += libdavs2.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsmdec.o
|
||||
@ -985,7 +1008,8 @@ OBJS-$(CONFIG_LIBX262_ENCODER) += libx264.o
|
||||
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
|
||||
OBJS-$(CONFIG_LIBX265_ENCODER) += libx265.o
|
||||
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
|
||||
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o libxvid_rc.o
|
||||
OBJS-$(CONFIG_LIBXAVS2_ENCODER) += libxavs2.o
|
||||
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
|
||||
OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER) += libzvbi-teletextdec.o ass.o
|
||||
|
||||
# parsers
|
||||
@ -994,6 +1018,8 @@ OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
|
||||
mpeg4audio.o
|
||||
OBJS-$(CONFIG_AC3_PARSER) += ac3tab.o aac_ac3_parser.o
|
||||
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
|
||||
OBJS-$(CONFIG_AV1_PARSER) += av1_parser.o av1_parse.o
|
||||
OBJS-$(CONFIG_AVS2_PARSER) += avs2_parser.o
|
||||
OBJS-$(CONFIG_BMP_PARSER) += bmp_parser.o
|
||||
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
|
||||
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
|
||||
@ -1007,14 +1033,16 @@ OBJS-$(CONFIG_DVD_NAV_PARSER) += dvd_nav_parser.o
|
||||
OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsub_parser.o
|
||||
OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_G723_1_PARSER) += g723_1_parser.o
|
||||
OBJS-$(CONFIG_G729_PARSER) += g729_parser.o
|
||||
OBJS-$(CONFIG_GIF_PARSER) += gif_parser.o
|
||||
OBJS-$(CONFIG_GSM_PARSER) += gsm_parser.o
|
||||
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
|
||||
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
|
||||
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264_sei.o h264data.o
|
||||
OBJS-$(CONFIG_HEVC_PARSER) += hevc_parser.o hevc_data.o
|
||||
OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
|
||||
OBJS-$(CONFIG_MLP_PARSER) += mlp_parser.o mlp.o
|
||||
OBJS-$(CONFIG_MLP_PARSER) += mlp_parse.o mlp_parser.o mlp.o
|
||||
OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
|
||||
mpeg4videodec.o mpeg4video.o \
|
||||
ituh263dec.o h263dec.o h263data.o
|
||||
@ -1040,18 +1068,20 @@ OBJS-$(CONFIG_XMA_PARSER) += xma_parser.o
|
||||
|
||||
# bitstream filters
|
||||
OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_AV1_METADATA_BSF) += av1_metadata_bsf.o
|
||||
OBJS-$(CONFIG_AV1_FRAME_SPLIT_BSF) += av1_frame_split_bsf.o
|
||||
OBJS-$(CONFIG_CHOMP_BSF) += chomp_bsf.o
|
||||
OBJS-$(CONFIG_DUMP_EXTRADATA_BSF) += dump_extradata_bsf.o
|
||||
OBJS-$(CONFIG_DCA_CORE_BSF) += dca_core_bsf.o
|
||||
OBJS-$(CONFIG_EAC3_CORE_BSF) += eac3_core_bsf.o
|
||||
OBJS-$(CONFIG_EXTRACT_EXTRADATA_BSF) += extract_extradata_bsf.o \
|
||||
h2645_parse.o
|
||||
av1_parse.o h2645_parse.o
|
||||
OBJS-$(CONFIG_FILTER_UNITS_BSF) += filter_units_bsf.o
|
||||
OBJS-$(CONFIG_H264_METADATA_BSF) += h264_metadata_bsf.o
|
||||
OBJS-$(CONFIG_H264_METADATA_BSF) += h264_metadata_bsf.o h264_levels.o
|
||||
OBJS-$(CONFIG_H264_MP4TOANNEXB_BSF) += h264_mp4toannexb_bsf.o
|
||||
OBJS-$(CONFIG_H264_REDUNDANT_PPS_BSF) += h264_redundant_pps_bsf.o
|
||||
OBJS-$(CONFIG_HAPQA_EXTRACT_BSF) += hapqa_extract_bsf.o hap.o
|
||||
OBJS-$(CONFIG_HEVC_METADATA_BSF) += h265_metadata_bsf.o
|
||||
OBJS-$(CONFIG_HEVC_METADATA_BSF) += h265_metadata_bsf.o h265_profile_level.o
|
||||
OBJS-$(CONFIG_HEVC_MP4TOANNEXB_BSF) += hevc_mp4toannexb_bsf.o
|
||||
OBJS-$(CONFIG_IMX_DUMP_HEADER_BSF) += imx_dump_header_bsf.o
|
||||
OBJS-$(CONFIG_MJPEG2JPEG_BSF) += mjpeg2jpeg_bsf.o
|
||||
@ -1063,9 +1093,12 @@ OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
|
||||
OBJS-$(CONFIG_MPEG2_METADATA_BSF) += mpeg2_metadata_bsf.o
|
||||
OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
|
||||
OBJS-$(CONFIG_NULL_BSF) += null_bsf.o
|
||||
OBJS-$(CONFIG_PRORES_METADATA_BSF) += prores_metadata_bsf.o
|
||||
OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
|
||||
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
|
||||
OBJS-$(CONFIG_TRACE_HEADERS_BSF) += trace_headers_bsf.o
|
||||
OBJS-$(CONFIG_TRUEHD_CORE_BSF) += truehd_core_bsf.o mlp_parse.o mlp.o
|
||||
OBJS-$(CONFIG_VP9_METADATA_BSF) += vp9_metadata_bsf.o
|
||||
OBJS-$(CONFIG_VP9_RAW_REORDER_BSF) += vp9_raw_reorder_bsf.o
|
||||
OBJS-$(CONFIG_VP9_SUPERFRAME_BSF) += vp9_superframe_bsf.o
|
||||
OBJS-$(CONFIG_VP9_SUPERFRAME_SPLIT_BSF) += vp9_superframe_split_bsf.o
|
||||
@ -1127,6 +1160,8 @@ TESTPROGS-$(CONFIG_IDCTDSP) += dct
|
||||
TESTPROGS-$(CONFIG_IIRFILTER) += iirfilter
|
||||
TESTPROGS-$(HAVE_MMX) += motion
|
||||
TESTPROGS-$(CONFIG_MPEGVIDEO) += mpeg12framerate
|
||||
TESTPROGS-$(CONFIG_H264_METADATA_BSF) += h264_levels
|
||||
TESTPROGS-$(CONFIG_HEVC_METADATA_BSF) += h265_levels
|
||||
TESTPROGS-$(CONFIG_RANGECODER) += rangecoder
|
||||
TESTPROGS-$(CONFIG_SNOW_ENCODER) += snowenc
|
||||
|
||||
|
@ -60,11 +60,11 @@ typedef struct A64Context {
|
||||
} A64Context;
|
||||
|
||||
/* gray gradient */
|
||||
static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
||||
static const uint8_t mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
||||
|
||||
/* other possible gradients - to be tested */
|
||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||
//static const uint8_t mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||
//static const uint8_t mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||
|
||||
static void to_meta_with_crop(AVCodecContext *avctx,
|
||||
const AVFrame *p, int *dest)
|
||||
|
@ -368,7 +368,7 @@ struct AACContext {
|
||||
INTFLOAT *in, IndividualChannelStream *ics);
|
||||
void (*update_ltp)(AACContext *ac, SingleChannelElement *sce);
|
||||
void (*vector_pow43)(int *coefs, int len);
|
||||
void (*subband_scale)(int *dst, int *src, int scale, int offset, int len);
|
||||
void (*subband_scale)(int *dst, int *src, int scale, int offset, int len, void *log_context);
|
||||
|
||||
};
|
||||
|
||||
|
@ -247,14 +247,12 @@ static void apply_independent_coupling(AACContext *ac,
|
||||
SingleChannelElement *target,
|
||||
ChannelElement *cce, int index)
|
||||
{
|
||||
int i;
|
||||
const float gain = cce->coup.gain[index][0];
|
||||
const float *src = cce->ch[0].ret;
|
||||
float *dest = target->ret;
|
||||
const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
dest[i] += gain * src[i];
|
||||
ac->fdsp->vector_fmac_scalar(dest, src, gain, len);
|
||||
}
|
||||
|
||||
#include "aacdec_template.c"
|
||||
@ -561,7 +559,7 @@ AVCodec ff_aac_decoder = {
|
||||
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
|
||||
},
|
||||
.capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
|
||||
.channel_layouts = aac_channel_layout,
|
||||
.flush = flush,
|
||||
.priv_class = &aac_decoder_class,
|
||||
@ -586,7 +584,7 @@ AVCodec ff_aac_latm_decoder = {
|
||||
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
|
||||
},
|
||||
.capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
|
||||
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
|
||||
.channel_layouts = aac_channel_layout,
|
||||
.flush = flush,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
|
||||
|
@ -162,7 +162,7 @@ static void vector_pow43(int *coefs, int len)
|
||||
}
|
||||
}
|
||||
|
||||
static void subband_scale(int *dst, int *src, int scale, int offset, int len)
|
||||
static void subband_scale(int *dst, int *src, int scale, int offset, int len, void *log_context)
|
||||
{
|
||||
int ssign = scale < 0 ? -1 : 1;
|
||||
int s = FFABS(scale);
|
||||
@ -189,18 +189,18 @@ static void subband_scale(int *dst, int *src, int scale, int offset, int len)
|
||||
dst[i] = out * (unsigned)ssign;
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Overflow in subband_scale()\n");
|
||||
av_log(log_context, AV_LOG_ERROR, "Overflow in subband_scale()\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void noise_scale(int *coefs, int scale, int band_energy, int len)
|
||||
{
|
||||
int ssign = scale < 0 ? -1 : 1;
|
||||
int s = FFABS(scale);
|
||||
int s = -scale;
|
||||
unsigned int round;
|
||||
int i, out, c = exp2tab[s & 3];
|
||||
int nlz = 0;
|
||||
|
||||
av_assert0(s >= 0);
|
||||
while (band_energy > 0x7fff) {
|
||||
band_energy >>= 1;
|
||||
nlz++;
|
||||
@ -216,15 +216,20 @@ static void noise_scale(int *coefs, int scale, int band_energy, int len)
|
||||
round = s ? 1 << (s-1) : 0;
|
||||
for (i=0; i<len; i++) {
|
||||
out = (int)(((int64_t)coefs[i] * c) >> 32);
|
||||
coefs[i] = ((int)(out+round) >> s) * ssign;
|
||||
coefs[i] = -((int)(out+round) >> s);
|
||||
}
|
||||
}
|
||||
else {
|
||||
s = s + 32;
|
||||
round = 1 << (s-1);
|
||||
for (i=0; i<len; i++) {
|
||||
out = (int)((int64_t)((int64_t)coefs[i] * c + round) >> s);
|
||||
coefs[i] = out * ssign;
|
||||
if (s > 0) {
|
||||
round = 1 << (s-1);
|
||||
for (i=0; i<len; i++) {
|
||||
out = (int)((int64_t)((int64_t)coefs[i] * c + round) >> s);
|
||||
coefs[i] = -out;
|
||||
}
|
||||
} else {
|
||||
for (i=0; i<len; i++)
|
||||
coefs[i] = -(int64_t)coefs[i] * c * (1 << -s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1673,25 +1673,24 @@ static int decode_spectrum_and_dequant(AACContext *ac, INTFLOAT coef[1024],
|
||||
}
|
||||
} else if (cbt_m1 == NOISE_BT - 1) {
|
||||
for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
|
||||
#if !USE_FIXED
|
||||
float scale;
|
||||
#endif /* !USE_FIXED */
|
||||
INTFLOAT band_energy;
|
||||
|
||||
#if USE_FIXED
|
||||
for (k = 0; k < off_len; k++) {
|
||||
ac->random_state = lcg_random(ac->random_state);
|
||||
#if USE_FIXED
|
||||
cfo[k] = ac->random_state >> 3;
|
||||
#else
|
||||
cfo[k] = ac->random_state;
|
||||
#endif /* USE_FIXED */
|
||||
}
|
||||
|
||||
#if USE_FIXED
|
||||
band_energy = ac->fdsp->scalarproduct_fixed(cfo, cfo, off_len);
|
||||
band_energy = fixed_sqrt(band_energy, 31);
|
||||
noise_scale(cfo, sf[idx], band_energy, off_len);
|
||||
#else
|
||||
float scale;
|
||||
|
||||
for (k = 0; k < off_len; k++) {
|
||||
ac->random_state = lcg_random(ac->random_state);
|
||||
cfo[k] = ac->random_state;
|
||||
}
|
||||
|
||||
band_energy = ac->fdsp->scalarproduct_float(cfo, cfo, off_len);
|
||||
scale = sf[idx] / sqrtf(band_energy);
|
||||
ac->fdsp->vector_fmul_scalar(cfo, cfo, scale, off_len);
|
||||
@ -1927,7 +1926,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, INTFLOAT coef[1024],
|
||||
if (cbt_m1 < NOISE_BT - 1) {
|
||||
for (group = 0; group < (int)g_len; group++, cfo+=128) {
|
||||
ac->vector_pow43(cfo, off_len);
|
||||
ac->subband_scale(cfo, cfo, sf[idx], 34, off_len);
|
||||
ac->subband_scale(cfo, cfo, sf[idx], 34, off_len, ac->avctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2158,7 +2157,7 @@ static void apply_intensity_stereo(AACContext *ac,
|
||||
coef0 + group * 128 + offsets[i],
|
||||
scale,
|
||||
23,
|
||||
offsets[i + 1] - offsets[i]);
|
||||
offsets[i + 1] - offsets[i] ,ac->avctx);
|
||||
#else
|
||||
ac->fdsp->vector_fmul_scalar(coef1 + group * 128 + offsets[i],
|
||||
coef0 + group * 128 + offsets[i],
|
||||
@ -2493,6 +2492,9 @@ static void apply_tns(INTFLOAT coef_param[1024], TemporalNoiseShaping *tns,
|
||||
INTFLOAT tmp[TNS_MAX_ORDER+1];
|
||||
UINTFLOAT *coef = coef_param;
|
||||
|
||||
if(!mmm)
|
||||
return;
|
||||
|
||||
for (w = 0; w < ics->num_windows; w++) {
|
||||
bottom = ics->num_swb;
|
||||
for (filt = 0; filt < tns->n_filt[w]; filt++) {
|
||||
@ -2657,7 +2659,7 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
|
||||
ac->mdct.imdct_half(&ac->mdct, buf, in);
|
||||
#if USE_FIXED
|
||||
for (i=0; i<1024; i++)
|
||||
buf[i] = (buf[i] + 4) >> 3;
|
||||
buf[i] = (buf[i] + 4LL) >> 3;
|
||||
#endif /* USE_FIXED */
|
||||
}
|
||||
|
||||
@ -3122,6 +3124,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
int samples = 0, multiplier, audio_found = 0, pce_found = 0;
|
||||
int is_dmono, sce_count = 0;
|
||||
int payload_alignment;
|
||||
uint8_t che_presence[4][MAX_ELEM_ID] = {{0}};
|
||||
|
||||
ac->frame = data;
|
||||
|
||||
@ -3159,6 +3162,17 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
if (elem_type < TYPE_DSE) {
|
||||
if (che_presence[elem_type][elem_id]) {
|
||||
int error = che_presence[elem_type][elem_id] > 1;
|
||||
av_log(ac->avctx, error ? AV_LOG_ERROR : AV_LOG_DEBUG, "channel element %d.%d duplicate\n",
|
||||
elem_type, elem_id);
|
||||
if (error) {
|
||||
err = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
che_presence[elem_type][elem_id]++;
|
||||
|
||||
if (!(che=get_che(ac, elem_type, elem_id))) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
|
||||
elem_type, elem_id);
|
||||
@ -3324,20 +3338,14 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
||||
AV_PKT_DATA_JP_DUALMONO,
|
||||
&jp_dualmono_size);
|
||||
|
||||
if (new_extradata && 0) {
|
||||
av_free(avctx->extradata);
|
||||
avctx->extradata = av_mallocz(new_extradata_size +
|
||||
AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!avctx->extradata)
|
||||
return AVERROR(ENOMEM);
|
||||
avctx->extradata_size = new_extradata_size;
|
||||
memcpy(avctx->extradata, new_extradata, new_extradata_size);
|
||||
push_output_configuration(ac);
|
||||
if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
|
||||
avctx->extradata,
|
||||
avctx->extradata_size*8LL, 1) < 0) {
|
||||
pop_output_configuration(ac);
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (new_extradata) {
|
||||
/* discard previous configuration */
|
||||
ac->oc[1].status = OC_NONE;
|
||||
err = decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
|
||||
new_extradata,
|
||||
new_extradata_size * 8LL, 1);
|
||||
if (err < 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -982,11 +982,13 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
if (s->needs_pce) {
|
||||
char buf[64];
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(aac_pce_configs); i++)
|
||||
if (avctx->channel_layout == aac_pce_configs[i].layout)
|
||||
break;
|
||||
ERROR_IF(i == FF_ARRAY_ELEMS(aac_pce_configs), "Unsupported channel layout\n");
|
||||
av_log(avctx, AV_LOG_INFO, "Using a PCE to encode channel layout\n");
|
||||
av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
|
||||
ERROR_IF(i == FF_ARRAY_ELEMS(aac_pce_configs), "Unsupported channel layout \"%s\"\n", buf);
|
||||
av_log(avctx, AV_LOG_INFO, "Using a PCE to encode channel layout \"%s\"\n", buf);
|
||||
s->pce = aac_pce_configs[i];
|
||||
s->reorder_map = s->pce.reorder_map;
|
||||
s->chan_map = s->pce.config_map;
|
||||
|
@ -144,7 +144,7 @@ void ff_aac_adjust_common_ltp(AACEncContext *s, ChannelElement *cpe)
|
||||
int sum = sce0->ics.ltp.used[sfb] + sce1->ics.ltp.used[sfb];
|
||||
if (sum != 2) {
|
||||
sce0->ics.ltp.used[sfb] = 0;
|
||||
} else if (sum == 2) {
|
||||
} else {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ static int read_ ## PAR ## _data(AVCodecContext *avctx, GetBitContext *gb, PSCon
|
||||
return 0; \
|
||||
err: \
|
||||
av_log(avctx, AV_LOG_ERROR, "illegal "#PAR"\n"); \
|
||||
return -1; \
|
||||
return AVERROR_INVALIDDATA; \
|
||||
}
|
||||
|
||||
READ_PAR_DATA(iid, huff_offset[table_idx], 0, FFABS(ps->iid_par[e][b]) > 7 + 8 * ps->iid_quant)
|
||||
|
@ -54,10 +54,10 @@ static void ps_hybrid_analysis_c(INTFLOAT (*out)[2], INTFLOAT (*in)[2],
|
||||
INT64FLOAT sum_im = (INT64FLOAT)filter[i][6][0] * in[6][1];
|
||||
|
||||
for (j = 0; j < 6; j++) {
|
||||
INTFLOAT in0_re = in[j][0];
|
||||
INTFLOAT in0_im = in[j][1];
|
||||
INTFLOAT in1_re = in[12-j][0];
|
||||
INTFLOAT in1_im = in[12-j][1];
|
||||
INT64FLOAT in0_re = in[j][0];
|
||||
INT64FLOAT in0_im = in[j][1];
|
||||
INT64FLOAT in1_re = in[12-j][0];
|
||||
INT64FLOAT in1_im = in[12-j][1];
|
||||
sum_re += (INT64FLOAT)filter[i][j][0] * (in0_re + in1_re) -
|
||||
(INT64FLOAT)filter[i][j][1] * (in0_im - in1_im);
|
||||
sum_im += (INT64FLOAT)filter[i][j][0] * (in0_im + in1_im) +
|
||||
|
@ -111,16 +111,4 @@ static DECLARE_ALIGNED(32, INTFLOAT, sbr_qmf_window_us)[640] = {
|
||||
Q31( 0.8537385600f),
|
||||
};
|
||||
|
||||
static av_cold void aacsbr_tableinit(void)
|
||||
{
|
||||
int n;
|
||||
for (n = 1; n < 320; n++)
|
||||
sbr_qmf_window_us[320 + n] = sbr_qmf_window_us[320 - n];
|
||||
sbr_qmf_window_us[384] = -sbr_qmf_window_us[384];
|
||||
sbr_qmf_window_us[512] = -sbr_qmf_window_us[512];
|
||||
|
||||
for (n = 0; n < 320; n++)
|
||||
sbr_qmf_window_ds[n] = sbr_qmf_window_us[2*n];
|
||||
}
|
||||
|
||||
#endif /* AVCODEC_AACSBR_TABLEGEN_COMMON_H */
|
||||
|
@ -34,6 +34,18 @@
|
||||
|
||||
#include "libavutil/qsort.h"
|
||||
|
||||
static av_cold void aacsbr_tableinit(void)
|
||||
{
|
||||
int n;
|
||||
for (n = 1; n < 320; n++)
|
||||
sbr_qmf_window_us[320 + n] = sbr_qmf_window_us[320 - n];
|
||||
sbr_qmf_window_us[384] = -sbr_qmf_window_us[384];
|
||||
sbr_qmf_window_us[512] = -sbr_qmf_window_us[512];
|
||||
|
||||
for (n = 0; n < 320; n++)
|
||||
sbr_qmf_window_ds[n] = sbr_qmf_window_us[2*n];
|
||||
}
|
||||
|
||||
av_cold void AAC_RENAME(ff_aac_sbr_init)(void)
|
||||
{
|
||||
static const struct {
|
||||
|
@ -9,11 +9,13 @@ OBJS-$(CONFIG_HPELDSP) += aarch64/hpeldsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_init.o
|
||||
OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
|
||||
OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp_init.o
|
||||
OBJS-$(CONFIG_VP8DSP) += aarch64/vp8dsp_init_aarch64.o
|
||||
|
||||
# decoders/encoders
|
||||
OBJS-$(CONFIG_AAC_DECODER) += aarch64/aacpsdsp_init_aarch64.o \
|
||||
aarch64/sbrdsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_DCA_DECODER) += aarch64/synth_filter_init.o
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += aarch64/opusdsp_init.o
|
||||
OBJS-$(CONFIG_RV40_DECODER) += aarch64/rv40dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VC1DSP) += aarch64/vc1dsp_init_aarch64.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_init.o
|
||||
@ -43,10 +45,12 @@ NEON-OBJS-$(CONFIG_IDCTDSP) += aarch64/idctdsp_init_aarch64.o \
|
||||
aarch64/simple_idct_neon.o
|
||||
NEON-OBJS-$(CONFIG_MDCT) += aarch64/mdct_neon.o
|
||||
NEON-OBJS-$(CONFIG_MPEGAUDIODSP) += aarch64/mpegaudiodsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_VP8DSP) += aarch64/vp8dsp_neon.o
|
||||
|
||||
# decoders/encoders
|
||||
NEON-OBJS-$(CONFIG_AAC_DECODER) += aarch64/aacpsdsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_DCA_DECODER) += aarch64/synth_filter_neon.o
|
||||
NEON-OBJS-$(CONFIG_OPUS_DECODER) += aarch64/opusdsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_neon.o
|
||||
NEON-OBJS-$(CONFIG_VP9_DECODER) += aarch64/vp9itxfm_16bpp_neon.o \
|
||||
aarch64/vp9itxfm_neon.o \
|
||||
|
@ -19,14 +19,6 @@
|
||||
#ifndef AVCODEC_AARCH64_ASM_OFFSETS_H
|
||||
#define AVCODEC_AARCH64_ASM_OFFSETS_H
|
||||
|
||||
/* CeltIMDCTContext */
|
||||
#define CELT_EXPTAB 0x20
|
||||
#define CELT_FFT_N 0x00
|
||||
#define CELT_LEN2 0x04
|
||||
#define CELT_LEN4 (CELT_LEN2 + 0x4) // loaded as pair
|
||||
#define CELT_TMP 0x10
|
||||
#define CELT_TWIDDLE (CELT_TMP + 0x8) // loaded as pair
|
||||
|
||||
/* FFTContext */
|
||||
#define IMDCT_HALF 0x48
|
||||
|
||||
|
@ -25,29 +25,43 @@
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/h264dsp.h"
|
||||
|
||||
void ff_h264_v_loop_filter_luma_neon(uint8_t *pix, int stride, int alpha,
|
||||
void ff_h264_v_loop_filter_luma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta, int8_t *tc0);
|
||||
void ff_h264_h_loop_filter_luma_neon(uint8_t *pix, int stride, int alpha,
|
||||
void ff_h264_h_loop_filter_luma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta, int8_t *tc0);
|
||||
void ff_h264_v_loop_filter_chroma_neon(uint8_t *pix, int stride, int alpha,
|
||||
void ff_h264_v_loop_filter_luma_intra_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta);
|
||||
void ff_h264_h_loop_filter_luma_intra_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta);
|
||||
void ff_h264_v_loop_filter_chroma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta, int8_t *tc0);
|
||||
void ff_h264_h_loop_filter_chroma_neon(uint8_t *pix, int stride, int alpha,
|
||||
void ff_h264_h_loop_filter_chroma_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta, int8_t *tc0);
|
||||
void ff_h264_h_loop_filter_chroma422_neon(uint8_t *pix, ptrdiff_t stride, int alpha,
|
||||
int beta, int8_t *tc0);
|
||||
void ff_h264_v_loop_filter_chroma_intra_neon(uint8_t *pix, ptrdiff_t stride,
|
||||
int alpha, int beta);
|
||||
void ff_h264_h_loop_filter_chroma_intra_neon(uint8_t *pix, ptrdiff_t stride,
|
||||
int alpha, int beta);
|
||||
void ff_h264_h_loop_filter_chroma422_intra_neon(uint8_t *pix, ptrdiff_t stride,
|
||||
int alpha, int beta);
|
||||
void ff_h264_h_loop_filter_chroma_mbaff_intra_neon(uint8_t *pix, ptrdiff_t stride,
|
||||
int alpha, int beta);
|
||||
|
||||
void ff_weight_h264_pixels_16_neon(uint8_t *dst, int stride, int height,
|
||||
void ff_weight_h264_pixels_16_neon(uint8_t *dst, ptrdiff_t stride, int height,
|
||||
int log2_den, int weight, int offset);
|
||||
void ff_weight_h264_pixels_8_neon(uint8_t *dst, int stride, int height,
|
||||
void ff_weight_h264_pixels_8_neon(uint8_t *dst, ptrdiff_t stride, int height,
|
||||
int log2_den, int weight, int offset);
|
||||
void ff_weight_h264_pixels_4_neon(uint8_t *dst, int stride, int height,
|
||||
void ff_weight_h264_pixels_4_neon(uint8_t *dst, ptrdiff_t stride, int height,
|
||||
int log2_den, int weight, int offset);
|
||||
|
||||
void ff_biweight_h264_pixels_16_neon(uint8_t *dst, uint8_t *src, int stride,
|
||||
void ff_biweight_h264_pixels_16_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
|
||||
int height, int log2_den, int weightd,
|
||||
int weights, int offset);
|
||||
void ff_biweight_h264_pixels_8_neon(uint8_t *dst, uint8_t *src, int stride,
|
||||
void ff_biweight_h264_pixels_8_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
|
||||
int height, int log2_den, int weightd,
|
||||
int weights, int offset);
|
||||
void ff_biweight_h264_pixels_4_neon(uint8_t *dst, uint8_t *src, int stride,
|
||||
void ff_biweight_h264_pixels_4_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
|
||||
int height, int log2_den, int weightd,
|
||||
int weights, int offset);
|
||||
|
||||
@ -77,9 +91,22 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
||||
if (have_neon(cpu_flags) && bit_depth == 8) {
|
||||
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
||||
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
||||
c->h264_v_loop_filter_luma_intra= ff_h264_v_loop_filter_luma_intra_neon;
|
||||
c->h264_h_loop_filter_luma_intra= ff_h264_h_loop_filter_luma_intra_neon;
|
||||
|
||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||
c->h264_v_loop_filter_chroma_intra = ff_h264_v_loop_filter_chroma_intra_neon;
|
||||
|
||||
if (chroma_format_idc <= 1) {
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma_intra_neon;
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_mbaff_intra_neon;
|
||||
} else {
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma422_neon;
|
||||
c->h264_h_loop_filter_chroma_mbaff = ff_h264_h_loop_filter_chroma_neon;
|
||||
c->h264_h_loop_filter_chroma_intra = ff_h264_h_loop_filter_chroma422_intra_neon;
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra = ff_h264_h_loop_filter_chroma_intra_neon;
|
||||
}
|
||||
|
||||
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
||||
c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels_8_neon;
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
|
||||
* Copyright (c) 2013 Janne Grunau <janne-libav@jannau.net>
|
||||
* Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@ -27,9 +28,9 @@
|
||||
ldr w6, [x4]
|
||||
ccmp w3, #0, #0, ne
|
||||
mov v24.S[0], w6
|
||||
and w6, w6, w6, lsl #16
|
||||
and w8, w6, w6, lsl #16
|
||||
b.eq 1f
|
||||
ands w6, w6, w6, lsl #8
|
||||
ands w8, w8, w8, lsl #8
|
||||
b.ge 2f
|
||||
1:
|
||||
ret
|
||||
@ -54,9 +55,12 @@
|
||||
uabd v17.16B, v20.16B, v16.16B // abs(p2 - p0)
|
||||
and v21.16B, v21.16B, v28.16B
|
||||
uabd v19.16B, v4.16B, v0.16B // abs(q2 - q0)
|
||||
and v21.16B, v21.16B, v30.16B // < beta
|
||||
shrn v30.8b, v21.8h, #4
|
||||
mov x7, v30.d[0]
|
||||
cmhi v17.16B, v22.16B, v17.16B // < beta
|
||||
and v21.16B, v21.16B, v30.16B
|
||||
cmhi v19.16B, v22.16B, v19.16B // < beta
|
||||
cbz x7, 9f
|
||||
and v17.16B, v17.16B, v21.16B
|
||||
and v19.16B, v19.16B, v21.16B
|
||||
and v24.16B, v24.16B, v21.16B
|
||||
@ -124,12 +128,13 @@ function ff_h264_v_loop_filter_luma_neon, export=1
|
||||
st1 {v16.16B}, [x0], x1
|
||||
st1 {v0.16B}, [x0], x1
|
||||
st1 {v19.16B}, [x0]
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_luma_neon, export=1
|
||||
h264_loop_filter_start
|
||||
sxtw x1, w1
|
||||
|
||||
sub x0, x0, #4
|
||||
ld1 {v6.8B}, [x0], x1
|
||||
@ -173,32 +178,231 @@ function ff_h264_h_loop_filter_luma_neon, export=1
|
||||
st1 {v16.S}[3], [x0], x1
|
||||
st1 {v0.S}[3], [x0], x1
|
||||
st1 {v19.S}[3], [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
|
||||
.macro h264_loop_filter_start_intra
|
||||
orr w4, w2, w3
|
||||
cbnz w4, 1f
|
||||
ret
|
||||
1:
|
||||
sxtw x1, w1
|
||||
dup v30.16b, w2 // alpha
|
||||
dup v31.16b, w3 // beta
|
||||
.endm
|
||||
|
||||
.macro h264_loop_filter_luma_intra
|
||||
uabd v16.16b, v7.16b, v0.16b // abs(p0 - q0)
|
||||
uabd v17.16b, v6.16b, v7.16b // abs(p1 - p0)
|
||||
uabd v18.16b, v1.16b, v0.16b // abs(q1 - q0)
|
||||
cmhi v19.16b, v30.16b, v16.16b // < alpha
|
||||
cmhi v17.16b, v31.16b, v17.16b // < beta
|
||||
cmhi v18.16b, v31.16b, v18.16b // < beta
|
||||
|
||||
movi v29.16b, #2
|
||||
ushr v30.16b, v30.16b, #2 // alpha >> 2
|
||||
add v30.16b, v30.16b, v29.16b // (alpha >> 2) + 2
|
||||
cmhi v16.16b, v30.16b, v16.16b // < (alpha >> 2) + 2
|
||||
|
||||
and v19.16b, v19.16b, v17.16b
|
||||
and v19.16b, v19.16b, v18.16b
|
||||
shrn v20.8b, v19.8h, #4
|
||||
mov x4, v20.d[0]
|
||||
cbz x4, 9f
|
||||
|
||||
ushll v20.8h, v6.8b, #1
|
||||
ushll v22.8h, v1.8b, #1
|
||||
ushll2 v21.8h, v6.16b, #1
|
||||
ushll2 v23.8h, v1.16b, #1
|
||||
uaddw v20.8h, v20.8h, v7.8b
|
||||
uaddw v22.8h, v22.8h, v0.8b
|
||||
uaddw2 v21.8h, v21.8h, v7.16b
|
||||
uaddw2 v23.8h, v23.8h, v0.16b
|
||||
uaddw v20.8h, v20.8h, v1.8b
|
||||
uaddw v22.8h, v22.8h, v6.8b
|
||||
uaddw2 v21.8h, v21.8h, v1.16b
|
||||
uaddw2 v23.8h, v23.8h, v6.16b
|
||||
|
||||
rshrn v24.8b, v20.8h, #2 // p0'_1
|
||||
rshrn v25.8b, v22.8h, #2 // q0'_1
|
||||
rshrn2 v24.16b, v21.8h, #2 // p0'_1
|
||||
rshrn2 v25.16b, v23.8h, #2 // q0'_1
|
||||
|
||||
uabd v17.16b, v5.16b, v7.16b // abs(p2 - p0)
|
||||
uabd v18.16b, v2.16b, v0.16b // abs(q2 - q0)
|
||||
cmhi v17.16b, v31.16b, v17.16b // < beta
|
||||
cmhi v18.16b, v31.16b, v18.16b // < beta
|
||||
|
||||
and v17.16b, v16.16b, v17.16b // if_2 && if_3
|
||||
and v18.16b, v16.16b, v18.16b // if_2 && if_4
|
||||
|
||||
not v30.16b, v17.16b
|
||||
not v31.16b, v18.16b
|
||||
|
||||
and v30.16b, v30.16b, v19.16b // if_1 && !(if_2 && if_3)
|
||||
and v31.16b, v31.16b, v19.16b // if_1 && !(if_2 && if_4)
|
||||
|
||||
and v17.16b, v19.16b, v17.16b // if_1 && if_2 && if_3
|
||||
and v18.16b, v19.16b, v18.16b // if_1 && if_2 && if_4
|
||||
|
||||
//calc p, v7, v6, v5, v4, v17, v7, v6, v5, v4
|
||||
uaddl v26.8h, v5.8b, v7.8b
|
||||
uaddl2 v27.8h, v5.16b, v7.16b
|
||||
uaddw v26.8h, v26.8h, v0.8b
|
||||
uaddw2 v27.8h, v27.8h, v0.16b
|
||||
add v20.8h, v20.8h, v26.8h
|
||||
add v21.8h, v21.8h, v27.8h
|
||||
uaddw v20.8h, v20.8h, v0.8b
|
||||
uaddw2 v21.8h, v21.8h, v0.16b
|
||||
rshrn v20.8b, v20.8h, #3 // p0'_2
|
||||
rshrn2 v20.16b, v21.8h, #3 // p0'_2
|
||||
uaddw v26.8h, v26.8h, v6.8b
|
||||
uaddw2 v27.8h, v27.8h, v6.16b
|
||||
rshrn v21.8b, v26.8h, #2 // p1'_2
|
||||
rshrn2 v21.16b, v27.8h, #2 // p1'_2
|
||||
uaddl v28.8h, v4.8b, v5.8b
|
||||
uaddl2 v29.8h, v4.16b, v5.16b
|
||||
shl v28.8h, v28.8h, #1
|
||||
shl v29.8h, v29.8h, #1
|
||||
add v28.8h, v28.8h, v26.8h
|
||||
add v29.8h, v29.8h, v27.8h
|
||||
rshrn v19.8b, v28.8h, #3 // p2'_2
|
||||
rshrn2 v19.16b, v29.8h, #3 // p2'_2
|
||||
|
||||
//calc q, v0, v1, v2, v3, v18, v0, v1, v2, v3
|
||||
uaddl v26.8h, v2.8b, v0.8b
|
||||
uaddl2 v27.8h, v2.16b, v0.16b
|
||||
uaddw v26.8h, v26.8h, v7.8b
|
||||
uaddw2 v27.8h, v27.8h, v7.16b
|
||||
add v22.8h, v22.8h, v26.8h
|
||||
add v23.8h, v23.8h, v27.8h
|
||||
uaddw v22.8h, v22.8h, v7.8b
|
||||
uaddw2 v23.8h, v23.8h, v7.16b
|
||||
rshrn v22.8b, v22.8h, #3 // q0'_2
|
||||
rshrn2 v22.16b, v23.8h, #3 // q0'_2
|
||||
uaddw v26.8h, v26.8h, v1.8b
|
||||
uaddw2 v27.8h, v27.8h, v1.16b
|
||||
rshrn v23.8b, v26.8h, #2 // q1'_2
|
||||
rshrn2 v23.16b, v27.8h, #2 // q1'_2
|
||||
uaddl v28.8h, v2.8b, v3.8b
|
||||
uaddl2 v29.8h, v2.16b, v3.16b
|
||||
shl v28.8h, v28.8h, #1
|
||||
shl v29.8h, v29.8h, #1
|
||||
add v28.8h, v28.8h, v26.8h
|
||||
add v29.8h, v29.8h, v27.8h
|
||||
rshrn v26.8b, v28.8h, #3 // q2'_2
|
||||
rshrn2 v26.16b, v29.8h, #3 // q2'_2
|
||||
|
||||
bit v7.16b, v24.16b, v30.16b // p0'_1
|
||||
bit v0.16b, v25.16b, v31.16b // q0'_1
|
||||
bit v7.16b, v20.16b, v17.16b // p0'_2
|
||||
bit v6.16b, v21.16b, v17.16b // p1'_2
|
||||
bit v5.16b, v19.16b, v17.16b // p2'_2
|
||||
bit v0.16b, v22.16b, v18.16b // q0'_2
|
||||
bit v1.16b, v23.16b, v18.16b // q1'_2
|
||||
bit v2.16b, v26.16b, v18.16b // q2'_2
|
||||
.endm
|
||||
|
||||
function ff_h264_v_loop_filter_luma_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
|
||||
ld1 {v0.16b}, [x0], x1 // q0
|
||||
ld1 {v1.16b}, [x0], x1 // q1
|
||||
ld1 {v2.16b}, [x0], x1 // q2
|
||||
ld1 {v3.16b}, [x0], x1 // q3
|
||||
sub x0, x0, x1, lsl #3
|
||||
ld1 {v4.16b}, [x0], x1 // p3
|
||||
ld1 {v5.16b}, [x0], x1 // p2
|
||||
ld1 {v6.16b}, [x0], x1 // p1
|
||||
ld1 {v7.16b}, [x0] // p0
|
||||
|
||||
h264_loop_filter_luma_intra
|
||||
|
||||
sub x0, x0, x1, lsl #1
|
||||
st1 {v5.16b}, [x0], x1 // p2
|
||||
st1 {v6.16b}, [x0], x1 // p1
|
||||
st1 {v7.16b}, [x0], x1 // p0
|
||||
st1 {v0.16b}, [x0], x1 // q0
|
||||
st1 {v1.16b}, [x0], x1 // q1
|
||||
st1 {v2.16b}, [x0] // q2
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_luma_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
|
||||
sub x0, x0, #4
|
||||
ld1 {v4.8b}, [x0], x1
|
||||
ld1 {v5.8b}, [x0], x1
|
||||
ld1 {v6.8b}, [x0], x1
|
||||
ld1 {v7.8b}, [x0], x1
|
||||
ld1 {v0.8b}, [x0], x1
|
||||
ld1 {v1.8b}, [x0], x1
|
||||
ld1 {v2.8b}, [x0], x1
|
||||
ld1 {v3.8b}, [x0], x1
|
||||
ld1 {v4.d}[1], [x0], x1
|
||||
ld1 {v5.d}[1], [x0], x1
|
||||
ld1 {v6.d}[1], [x0], x1
|
||||
ld1 {v7.d}[1], [x0], x1
|
||||
ld1 {v0.d}[1], [x0], x1
|
||||
ld1 {v1.d}[1], [x0], x1
|
||||
ld1 {v2.d}[1], [x0], x1
|
||||
ld1 {v3.d}[1], [x0], x1
|
||||
|
||||
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
||||
|
||||
h264_loop_filter_luma_intra
|
||||
|
||||
transpose_8x16B v4, v5, v6, v7, v0, v1, v2, v3, v21, v23
|
||||
|
||||
sub x0, x0, x1, lsl #4
|
||||
st1 {v4.8b}, [x0], x1
|
||||
st1 {v5.8b}, [x0], x1
|
||||
st1 {v6.8b}, [x0], x1
|
||||
st1 {v7.8b}, [x0], x1
|
||||
st1 {v0.8b}, [x0], x1
|
||||
st1 {v1.8b}, [x0], x1
|
||||
st1 {v2.8b}, [x0], x1
|
||||
st1 {v3.8b}, [x0], x1
|
||||
st1 {v4.d}[1], [x0], x1
|
||||
st1 {v5.d}[1], [x0], x1
|
||||
st1 {v6.d}[1], [x0], x1
|
||||
st1 {v7.d}[1], [x0], x1
|
||||
st1 {v0.d}[1], [x0], x1
|
||||
st1 {v1.d}[1], [x0], x1
|
||||
st1 {v2.d}[1], [x0], x1
|
||||
st1 {v3.d}[1], [x0], x1
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
.macro h264_loop_filter_chroma
|
||||
dup v22.8B, w2 // alpha
|
||||
dup v23.8B, w3 // beta
|
||||
uxtl v24.8H, v24.8B
|
||||
uabd v26.8B, v16.8B, v0.8B // abs(p0 - q0)
|
||||
uxtl v4.8H, v0.8B
|
||||
uabd v28.8B, v18.8B, v16.8B // abs(p1 - p0)
|
||||
usubw v4.8H, v4.8H, v16.8B
|
||||
sli v24.8H, v24.8H, #8
|
||||
shl v4.8H, v4.8H, #2
|
||||
uabd v30.8B, v2.8B, v0.8B // abs(q1 - q0)
|
||||
uaddw v4.8H, v4.8H, v18.8B
|
||||
cmhi v26.8B, v22.8B, v26.8B // < alpha
|
||||
cmhi v28.8B, v23.8B, v28.8B // < beta
|
||||
cmhi v30.8B, v23.8B, v30.8B // < beta
|
||||
uxtl v4.8H, v0.8B
|
||||
and v26.8B, v26.8B, v28.8B
|
||||
usubw v4.8H, v4.8H, v16.8B
|
||||
and v26.8B, v26.8B, v30.8B
|
||||
shl v4.8H, v4.8H, #2
|
||||
mov x8, v26.d[0]
|
||||
sli v24.8H, v24.8H, #8
|
||||
uaddw v4.8H, v4.8H, v18.8B
|
||||
cbz x8, 9f
|
||||
usubw v4.8H, v4.8H, v2.8B
|
||||
dup v22.8B, w3 // beta
|
||||
rshrn v4.8B, v4.8H, #3
|
||||
cmhi v28.8B, v22.8B, v28.8B // < beta
|
||||
cmhi v30.8B, v22.8B, v30.8B // < beta
|
||||
smin v4.8B, v4.8B, v24.8B
|
||||
neg v25.8B, v24.8B
|
||||
and v26.8B, v26.8B, v28.8B
|
||||
smax v4.8B, v4.8B, v25.8B
|
||||
and v26.8B, v26.8B, v30.8B
|
||||
uxtl v22.8H, v0.8B
|
||||
and v4.8B, v4.8B, v26.8B
|
||||
uxtl v28.8H, v16.8B
|
||||
@ -210,6 +414,7 @@ endfunc
|
||||
|
||||
function ff_h264_v_loop_filter_chroma_neon, export=1
|
||||
h264_loop_filter_start
|
||||
sxtw x1, w1
|
||||
|
||||
sub x0, x0, x1, lsl #1
|
||||
ld1 {v18.8B}, [x0], x1
|
||||
@ -222,14 +427,16 @@ function ff_h264_v_loop_filter_chroma_neon, export=1
|
||||
sub x0, x0, x1, lsl #1
|
||||
st1 {v16.8B}, [x0], x1
|
||||
st1 {v0.8B}, [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_chroma_neon, export=1
|
||||
h264_loop_filter_start
|
||||
sxtw x1, w1
|
||||
|
||||
sub x0, x0, #2
|
||||
h_loop_filter_chroma420:
|
||||
ld1 {v18.S}[0], [x0], x1
|
||||
ld1 {v16.S}[0], [x0], x1
|
||||
ld1 {v0.S}[0], [x0], x1
|
||||
@ -254,10 +461,134 @@ function ff_h264_h_loop_filter_chroma_neon, export=1
|
||||
st1 {v16.S}[1], [x0], x1
|
||||
st1 {v0.S}[1], [x0], x1
|
||||
st1 {v2.S}[1], [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_chroma422_neon, export=1
|
||||
sxtw x1, w1
|
||||
h264_loop_filter_start
|
||||
add x5, x0, x1
|
||||
sub x0, x0, #2
|
||||
add x1, x1, x1
|
||||
mov x7, x30
|
||||
bl h_loop_filter_chroma420
|
||||
mov x30, x7
|
||||
sub x0, x5, #2
|
||||
mov v24.s[0], w6
|
||||
b h_loop_filter_chroma420
|
||||
endfunc
|
||||
|
||||
.macro h264_loop_filter_chroma_intra
|
||||
uabd v26.8b, v16.8b, v17.8b // abs(p0 - q0)
|
||||
uabd v27.8b, v18.8b, v16.8b // abs(p1 - p0)
|
||||
uabd v28.8b, v19.8b, v17.8b // abs(q1 - q0)
|
||||
cmhi v26.8b, v30.8b, v26.8b // < alpha
|
||||
cmhi v27.8b, v31.8b, v27.8b // < beta
|
||||
cmhi v28.8b, v31.8b, v28.8b // < beta
|
||||
and v26.8b, v26.8b, v27.8b
|
||||
and v26.8b, v26.8b, v28.8b
|
||||
mov x2, v26.d[0]
|
||||
|
||||
ushll v4.8h, v18.8b, #1
|
||||
ushll v6.8h, v19.8b, #1
|
||||
cbz x2, 9f
|
||||
uaddl v20.8h, v16.8b, v19.8b
|
||||
uaddl v22.8h, v17.8b, v18.8b
|
||||
add v20.8h, v20.8h, v4.8h
|
||||
add v22.8h, v22.8h, v6.8h
|
||||
uqrshrn v24.8b, v20.8h, #2
|
||||
uqrshrn v25.8b, v22.8h, #2
|
||||
bit v16.8b, v24.8b, v26.8b
|
||||
bit v17.8b, v25.8b, v26.8b
|
||||
.endm
|
||||
|
||||
function ff_h264_v_loop_filter_chroma_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
|
||||
sub x0, x0, x1, lsl #1
|
||||
ld1 {v18.8b}, [x0], x1
|
||||
ld1 {v16.8b}, [x0], x1
|
||||
ld1 {v17.8b}, [x0], x1
|
||||
ld1 {v19.8b}, [x0]
|
||||
|
||||
h264_loop_filter_chroma_intra
|
||||
|
||||
sub x0, x0, x1, lsl #1
|
||||
st1 {v16.8b}, [x0], x1
|
||||
st1 {v17.8b}, [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_chroma_mbaff_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
|
||||
sub x4, x0, #2
|
||||
sub x0, x0, #1
|
||||
ld1 {v18.8b}, [x4], x1
|
||||
ld1 {v16.8b}, [x4], x1
|
||||
ld1 {v17.8b}, [x4], x1
|
||||
ld1 {v19.8b}, [x4], x1
|
||||
|
||||
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
||||
|
||||
h264_loop_filter_chroma_intra
|
||||
|
||||
st2 {v16.b,v17.b}[0], [x0], x1
|
||||
st2 {v16.b,v17.b}[1], [x0], x1
|
||||
st2 {v16.b,v17.b}[2], [x0], x1
|
||||
st2 {v16.b,v17.b}[3], [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_chroma_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
|
||||
sub x4, x0, #2
|
||||
sub x0, x0, #1
|
||||
h_loop_filter_chroma420_intra:
|
||||
ld1 {v18.8b}, [x4], x1
|
||||
ld1 {v16.8b}, [x4], x1
|
||||
ld1 {v17.8b}, [x4], x1
|
||||
ld1 {v19.8b}, [x4], x1
|
||||
ld1 {v18.s}[1], [x4], x1
|
||||
ld1 {v16.s}[1], [x4], x1
|
||||
ld1 {v17.s}[1], [x4], x1
|
||||
ld1 {v19.s}[1], [x4], x1
|
||||
|
||||
transpose_4x8B v18, v16, v17, v19, v26, v27, v28, v29
|
||||
|
||||
h264_loop_filter_chroma_intra
|
||||
|
||||
st2 {v16.b,v17.b}[0], [x0], x1
|
||||
st2 {v16.b,v17.b}[1], [x0], x1
|
||||
st2 {v16.b,v17.b}[2], [x0], x1
|
||||
st2 {v16.b,v17.b}[3], [x0], x1
|
||||
st2 {v16.b,v17.b}[4], [x0], x1
|
||||
st2 {v16.b,v17.b}[5], [x0], x1
|
||||
st2 {v16.b,v17.b}[6], [x0], x1
|
||||
st2 {v16.b,v17.b}[7], [x0], x1
|
||||
|
||||
9:
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_h264_h_loop_filter_chroma422_intra_neon, export=1
|
||||
h264_loop_filter_start_intra
|
||||
sub x4, x0, #2
|
||||
add x5, x0, x1, lsl #3
|
||||
sub x0, x0, #1
|
||||
mov x7, x30
|
||||
bl h_loop_filter_chroma420_intra
|
||||
sub x0, x5, #1
|
||||
mov x30, x7
|
||||
b h_loop_filter_chroma420_intra
|
||||
endfunc
|
||||
|
||||
.macro biweight_16 macs, macd
|
||||
dup v0.16B, w5
|
||||
dup v1.16B, w6
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "neon.S"
|
||||
|
||||
function ff_h264_idct_add_neon, export=1
|
||||
.L_ff_h264_idct_add_neon:
|
||||
ld1 {v0.4H, v1.4H, v2.4H, v3.4H}, [x1]
|
||||
sxtw x2, w2
|
||||
movi v30.8H, #0
|
||||
@ -77,6 +78,7 @@ function ff_h264_idct_add_neon, export=1
|
||||
endfunc
|
||||
|
||||
function ff_h264_idct_dc_add_neon, export=1
|
||||
.L_ff_h264_idct_dc_add_neon:
|
||||
sxtw x2, w2
|
||||
mov w3, #0
|
||||
ld1r {v2.8H}, [x1]
|
||||
@ -106,8 +108,8 @@ function ff_h264_idct_add16_neon, export=1
|
||||
mov w9, w3 // stride
|
||||
movrel x7, scan8
|
||||
mov x10, #16
|
||||
movrel x13, X(ff_h264_idct_dc_add_neon)
|
||||
movrel x14, X(ff_h264_idct_add_neon)
|
||||
movrel x13, .L_ff_h264_idct_dc_add_neon
|
||||
movrel x14, .L_ff_h264_idct_add_neon
|
||||
1: mov w2, w9
|
||||
ldrb w3, [x7], #1
|
||||
ldrsw x0, [x5], #4
|
||||
@ -133,8 +135,8 @@ function ff_h264_idct_add16intra_neon, export=1
|
||||
mov w9, w3 // stride
|
||||
movrel x7, scan8
|
||||
mov x10, #16
|
||||
movrel x13, X(ff_h264_idct_dc_add_neon)
|
||||
movrel x14, X(ff_h264_idct_add_neon)
|
||||
movrel x13, .L_ff_h264_idct_dc_add_neon
|
||||
movrel x14, .L_ff_h264_idct_add_neon
|
||||
1: mov w2, w9
|
||||
ldrb w3, [x7], #1
|
||||
ldrsw x0, [x5], #4
|
||||
@ -160,8 +162,8 @@ function ff_h264_idct_add8_neon, export=1
|
||||
add x5, x1, #16*4 // block_offset
|
||||
add x9, x2, #16*32 // block
|
||||
mov w19, w3 // stride
|
||||
movrel x13, X(ff_h264_idct_dc_add_neon)
|
||||
movrel x14, X(ff_h264_idct_add_neon)
|
||||
movrel x13, .L_ff_h264_idct_dc_add_neon
|
||||
movrel x14, .L_ff_h264_idct_add_neon
|
||||
movrel x7, scan8, 16
|
||||
mov x10, #0
|
||||
mov x11, #16
|
||||
@ -263,6 +265,7 @@ endfunc
|
||||
.endm
|
||||
|
||||
function ff_h264_idct8_add_neon, export=1
|
||||
.L_ff_h264_idct8_add_neon:
|
||||
movi v19.8H, #0
|
||||
sxtw x2, w2
|
||||
ld1 {v24.8H, v25.8H}, [x1]
|
||||
@ -326,6 +329,7 @@ function ff_h264_idct8_add_neon, export=1
|
||||
endfunc
|
||||
|
||||
function ff_h264_idct8_dc_add_neon, export=1
|
||||
.L_ff_h264_idct8_dc_add_neon:
|
||||
mov w3, #0
|
||||
sxtw x2, w2
|
||||
ld1r {v31.8H}, [x1]
|
||||
@ -375,8 +379,8 @@ function ff_h264_idct8_add4_neon, export=1
|
||||
mov w2, w3
|
||||
movrel x7, scan8
|
||||
mov w10, #16
|
||||
movrel x13, X(ff_h264_idct8_dc_add_neon)
|
||||
movrel x14, X(ff_h264_idct8_add_neon)
|
||||
movrel x13, .L_ff_h264_idct8_dc_add_neon
|
||||
movrel x14, .L_ff_h264_idct8_add_neon
|
||||
1: ldrb w9, [x7], #4
|
||||
ldrsw x0, [x5], #16
|
||||
ldrb w9, [x4, w9, UXTW]
|
||||
|
35
libavcodec/aarch64/opusdsp_init.c
Normal file
35
libavcodec/aarch64/opusdsp_init.c
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/opusdsp.h"
|
||||
|
||||
void ff_opus_postfilter_neon(float *data, int period, float *gains, int len);
|
||||
float ff_opus_deemphasis_neon(float *out, float *in, float coeff, int len);
|
||||
|
||||
av_cold void ff_opus_dsp_init_aarch64(OpusDSP *ctx)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags)) {
|
||||
ctx->postfilter = ff_opus_postfilter_neon;
|
||||
ctx->deemphasis = ff_opus_deemphasis_neon;
|
||||
}
|
||||
}
|
113
libavcodec/aarch64/opusdsp_neon.S
Normal file
113
libavcodec/aarch64/opusdsp_neon.S
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/aarch64/asm.S"
|
||||
|
||||
// 0.85..^1 0.85..^2 0.85..^3 0.85..^4
|
||||
const tab_st, align=4
|
||||
.word 0x3f599a00, 0x3f38f671, 0x3f1d382a, 0x3f05a32f
|
||||
endconst
|
||||
const tab_x0, align=4
|
||||
.word 0x0, 0x3f599a00, 0x3f38f671, 0x3f1d382a
|
||||
endconst
|
||||
const tab_x1, align=4
|
||||
.word 0x0, 0x0, 0x3f599a00, 0x3f38f671
|
||||
endconst
|
||||
const tab_x2, align=4
|
||||
.word 0x0, 0x0, 0x0, 0x3f599a00
|
||||
endconst
|
||||
|
||||
function ff_opus_deemphasis_neon, export=1
|
||||
movrel x4, tab_st
|
||||
ld1 {v4.4s}, [x4]
|
||||
movrel x4, tab_x0
|
||||
ld1 {v5.4s}, [x4]
|
||||
movrel x4, tab_x1
|
||||
ld1 {v6.4s}, [x4]
|
||||
movrel x4, tab_x2
|
||||
ld1 {v7.4s}, [x4]
|
||||
|
||||
fmul v0.4s, v4.4s, v0.s[0]
|
||||
|
||||
1: ld1 {v1.4s, v2.4s}, [x1], #32
|
||||
|
||||
fmla v0.4s, v5.4s, v1.s[0]
|
||||
fmul v3.4s, v7.4s, v2.s[2]
|
||||
|
||||
fmla v0.4s, v6.4s, v1.s[1]
|
||||
fmla v3.4s, v6.4s, v2.s[1]
|
||||
|
||||
fmla v0.4s, v7.4s, v1.s[2]
|
||||
fmla v3.4s, v5.4s, v2.s[0]
|
||||
|
||||
fadd v1.4s, v1.4s, v0.4s
|
||||
fadd v2.4s, v2.4s, v3.4s
|
||||
|
||||
fmla v2.4s, v4.4s, v1.s[3]
|
||||
|
||||
st1 {v1.4s, v2.4s}, [x0], #32
|
||||
fmul v0.4s, v4.4s, v2.s[3]
|
||||
|
||||
subs w2, w2, #8
|
||||
b.gt 1b
|
||||
|
||||
mov s0, v2.s[3]
|
||||
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function ff_opus_postfilter_neon, export=1
|
||||
ld1 {v0.4s}, [x2]
|
||||
dup v1.4s, v0.s[1]
|
||||
dup v2.4s, v0.s[2]
|
||||
dup v0.4s, v0.s[0]
|
||||
|
||||
add w1, w1, #2
|
||||
sub x1, x0, x1, lsl #2
|
||||
|
||||
ld1 {v3.4s}, [x1]
|
||||
fmul v3.4s, v3.4s, v2.4s
|
||||
|
||||
1: add x1, x1, #4
|
||||
ld1 {v4.4s}, [x1]
|
||||
add x1, x1, #4
|
||||
ld1 {v5.4s}, [x1]
|
||||
add x1, x1, #4
|
||||
ld1 {v6.4s}, [x1]
|
||||
add x1, x1, #4
|
||||
ld1 {v7.4s}, [x1]
|
||||
|
||||
fmla v3.4s, v7.4s, v2.4s
|
||||
fadd v6.4s, v6.4s, v4.4s
|
||||
|
||||
ld1 {v8.4s}, [x0]
|
||||
fmla v8.4s, v5.4s, v0.4s
|
||||
|
||||
fmul v6.4s, v6.4s, v1.4s
|
||||
fadd v6.4s, v6.4s, v3.4s
|
||||
|
||||
fadd v8.4s, v8.4s, v6.4s
|
||||
fmul v3.4s, v7.4s, v2.4s
|
||||
|
||||
st1 {v8.4s}, [x0], #16
|
||||
|
||||
subs w3, w3, #4
|
||||
b.gt 1b
|
||||
|
||||
ret
|
||||
endfunc
|
75
libavcodec/aarch64/vp8dsp.h
Normal file
75
libavcodec/aarch64/vp8dsp.h
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_AARCH64_VP8DSP_H
|
||||
#define AVCODEC_AARCH64_VP8DSP_H
|
||||
|
||||
#include "libavcodec/vp8dsp.h"
|
||||
|
||||
#define VP8_LF_Y(hv, inner, opt) \
|
||||
void ff_vp8_##hv##_loop_filter16##inner##_##opt(uint8_t *dst, \
|
||||
ptrdiff_t stride, \
|
||||
int flim_E, int flim_I, \
|
||||
int hev_thresh)
|
||||
|
||||
#define VP8_LF_UV(hv, inner, opt) \
|
||||
void ff_vp8_##hv##_loop_filter8uv##inner##_##opt(uint8_t *dstU, \
|
||||
uint8_t *dstV, \
|
||||
ptrdiff_t stride, \
|
||||
int flim_E, int flim_I, \
|
||||
int hev_thresh)
|
||||
|
||||
#define VP8_LF_SIMPLE(hv, opt) \
|
||||
void ff_vp8_##hv##_loop_filter16_simple_##opt(uint8_t *dst, \
|
||||
ptrdiff_t stride, \
|
||||
int flim)
|
||||
|
||||
#define VP8_LF_HV(inner, opt) \
|
||||
VP8_LF_Y(h, inner, opt); \
|
||||
VP8_LF_Y(v, inner, opt); \
|
||||
VP8_LF_UV(h, inner, opt); \
|
||||
VP8_LF_UV(v, inner, opt)
|
||||
|
||||
#define VP8_LF(opt) \
|
||||
VP8_LF_HV(, opt); \
|
||||
VP8_LF_HV(_inner, opt); \
|
||||
VP8_LF_SIMPLE(h, opt); \
|
||||
VP8_LF_SIMPLE(v, opt)
|
||||
|
||||
#define VP8_MC(n, opt) \
|
||||
void ff_put_vp8_##n##_##opt(uint8_t *dst, ptrdiff_t dststride, \
|
||||
uint8_t *src, ptrdiff_t srcstride, \
|
||||
int h, int x, int y)
|
||||
|
||||
#define VP8_EPEL(w, opt) \
|
||||
VP8_MC(pixels ## w, opt); \
|
||||
VP8_MC(epel ## w ## _h4, opt); \
|
||||
VP8_MC(epel ## w ## _h6, opt); \
|
||||
VP8_MC(epel ## w ## _v4, opt); \
|
||||
VP8_MC(epel ## w ## _h4v4, opt); \
|
||||
VP8_MC(epel ## w ## _h6v4, opt); \
|
||||
VP8_MC(epel ## w ## _v6, opt); \
|
||||
VP8_MC(epel ## w ## _h4v6, opt); \
|
||||
VP8_MC(epel ## w ## _h6v6, opt)
|
||||
|
||||
#define VP8_BILIN(w, opt) \
|
||||
VP8_MC(bilin ## w ## _h, opt); \
|
||||
VP8_MC(bilin ## w ## _v, opt); \
|
||||
VP8_MC(bilin ## w ## _hv, opt)
|
||||
|
||||
#endif /* AVCODEC_AARCH64_VP8DSP_H */
|
124
libavcodec/aarch64/vp8dsp_init_aarch64.c
Normal file
124
libavcodec/aarch64/vp8dsp_init_aarch64.c
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/vp8dsp.h"
|
||||
#include "vp8dsp.h"
|
||||
|
||||
void ff_vp8_luma_dc_wht_neon(int16_t block[4][4][16], int16_t dc[16]);
|
||||
|
||||
void ff_vp8_idct_add_neon(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
|
||||
void ff_vp8_idct_dc_add_neon(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
|
||||
void ff_vp8_idct_dc_add4y_neon(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
|
||||
void ff_vp8_idct_dc_add4uv_neon(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
|
||||
|
||||
VP8_LF(neon);
|
||||
|
||||
VP8_EPEL(16, neon);
|
||||
VP8_EPEL(8, neon);
|
||||
VP8_EPEL(4, neon);
|
||||
|
||||
VP8_BILIN(16, neon);
|
||||
VP8_BILIN(8, neon);
|
||||
VP8_BILIN(4, neon);
|
||||
|
||||
av_cold void ff_vp78dsp_init_aarch64(VP8DSPContext *dsp)
|
||||
{
|
||||
if (!have_neon(av_get_cpu_flags()))
|
||||
return;
|
||||
dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[0][2][2] = ff_put_vp8_epel16_h6v6_neon;
|
||||
|
||||
dsp->put_vp8_epel_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][1][0] = ff_put_vp8_epel8_v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][1][1] = ff_put_vp8_epel8_h4v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][1][2] = ff_put_vp8_epel8_h6v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][2][0] = ff_put_vp8_epel8_v6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][2][1] = ff_put_vp8_epel8_h4v6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[1][2][2] = ff_put_vp8_epel8_h6v6_neon;
|
||||
|
||||
dsp->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][1][0] = ff_put_vp8_epel4_v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][1][1] = ff_put_vp8_epel4_h4v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][1][2] = ff_put_vp8_epel4_h6v4_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][2][0] = ff_put_vp8_epel4_v6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][2][1] = ff_put_vp8_epel4_h4v6_neon;
|
||||
dsp->put_vp8_epel_pixels_tab[2][2][2] = ff_put_vp8_epel4_h6v6_neon;
|
||||
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][1][0] = ff_put_vp8_bilin16_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][1][1] = ff_put_vp8_bilin16_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][1][2] = ff_put_vp8_bilin16_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][2][0] = ff_put_vp8_bilin16_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][2][1] = ff_put_vp8_bilin16_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[0][2][2] = ff_put_vp8_bilin16_hv_neon;
|
||||
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][1][0] = ff_put_vp8_bilin8_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][1][1] = ff_put_vp8_bilin8_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][1][2] = ff_put_vp8_bilin8_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][2][0] = ff_put_vp8_bilin8_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][2][1] = ff_put_vp8_bilin8_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[1][2][2] = ff_put_vp8_bilin8_hv_neon;
|
||||
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][1][0] = ff_put_vp8_bilin4_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][1][1] = ff_put_vp8_bilin4_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon;
|
||||
dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon;
|
||||
}
|
||||
|
||||
av_cold void ff_vp8dsp_init_aarch64(VP8DSPContext *dsp)
|
||||
{
|
||||
if (!have_neon(av_get_cpu_flags()))
|
||||
return;
|
||||
dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon;
|
||||
|
||||
dsp->vp8_idct_add = ff_vp8_idct_add_neon;
|
||||
dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon;
|
||||
dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon;
|
||||
dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon;
|
||||
|
||||
dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon;
|
||||
dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon;
|
||||
dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon;
|
||||
dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon;
|
||||
|
||||
dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon;
|
||||
dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon;
|
||||
dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon;
|
||||
dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon;
|
||||
|
||||
dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon;
|
||||
dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon;
|
||||
}
|
1790
libavcodec/aarch64/vp8dsp_neon.S
Normal file
1790
libavcodec/aarch64/vp8dsp_neon.S
Normal file
File diff suppressed because it is too large
Load Diff
@ -452,7 +452,7 @@ static int decode_exponents(AC3DecodeContext *s,
|
||||
prevexp += dexp[i] - 2;
|
||||
if (prevexp > 24U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "exponent %d is out-of-range\n", prevexp);
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
switch (group_size) {
|
||||
case 4: dexps[j++] = prevexp;
|
||||
@ -1467,7 +1467,8 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
int buf_size, full_buf_size = avpkt->size;
|
||||
AC3DecodeContext *s = avctx->priv_data;
|
||||
int blk, ch, err, offset, ret;
|
||||
int got_independent_frame = 0;
|
||||
int i;
|
||||
int skip = 0, got_independent_frame = 0;
|
||||
const uint8_t *channel_map;
|
||||
uint8_t extended_channel_map[EAC3_MAX_CHANNELS];
|
||||
const SHORTFLOAT *output[AC3_MAX_CHANNELS];
|
||||
@ -1477,6 +1478,23 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
|
||||
s->superframe_size = 0;
|
||||
|
||||
buf_size = full_buf_size;
|
||||
for (i = 1; i < buf_size; i += 2) {
|
||||
if (buf[i] == 0x77 || buf[i] == 0x0B) {
|
||||
if ((buf[i] ^ buf[i-1]) == (0x77 ^ 0x0B)) {
|
||||
i--;
|
||||
break;
|
||||
} else if ((buf[i] ^ buf[i+1]) == (0x77 ^ 0x0B)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (i >= buf_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (i > 10)
|
||||
return i;
|
||||
buf += i;
|
||||
buf_size -= i;
|
||||
|
||||
/* copy input buffer to decoder context to avoid reading past the end
|
||||
of the buffer, which can be caused by a damaged input stream. */
|
||||
if (buf_size >= 2 && AV_RB16(buf) == 0x770B) {
|
||||
@ -1637,6 +1655,11 @@ dependent_frame:
|
||||
AC3HeaderInfo hdr;
|
||||
int err;
|
||||
|
||||
if (buf_size - s->frame_size <= 16) {
|
||||
skip = buf_size - s->frame_size;
|
||||
goto skip;
|
||||
}
|
||||
|
||||
if ((ret = init_get_bits8(&s->gbc, buf + s->frame_size, buf_size - s->frame_size)) < 0)
|
||||
return ret;
|
||||
|
||||
@ -1657,6 +1680,7 @@ dependent_frame:
|
||||
}
|
||||
}
|
||||
}
|
||||
skip:
|
||||
|
||||
frame->decode_error_flags = err ? FF_DECODE_ERROR_INVALID_BITSTREAM : 0;
|
||||
|
||||
@ -1796,9 +1820,9 @@ dependent_frame:
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
if (!s->superframe_size)
|
||||
return FFMIN(full_buf_size, s->frame_size);
|
||||
return FFMIN(full_buf_size, s->frame_size + skip);
|
||||
|
||||
return FFMIN(full_buf_size, s->superframe_size);
|
||||
return FFMIN(full_buf_size, s->superframe_size + skip);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -652,7 +652,7 @@ void ff_ac3_process_exponents(AC3EncodeContext *s)
|
||||
*/
|
||||
static void count_frame_bits_fixed(AC3EncodeContext *s)
|
||||
{
|
||||
static const int frame_bits_inc[8] = { 0, 0, 2, 2, 2, 4, 2, 4 };
|
||||
static const uint8_t frame_bits_inc[8] = { 0, 0, 2, 2, 2, 4, 2, 4 };
|
||||
int blk;
|
||||
int frame_bits;
|
||||
|
||||
@ -1800,7 +1800,7 @@ static int validate_float_option(float v, const float *v_list, int v_list_size)
|
||||
break;
|
||||
}
|
||||
if (i == v_list_size)
|
||||
return -1;
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ int16_t ff_acelp_decode_gain_code(
|
||||
(mr_energy >> 15) - 25
|
||||
);
|
||||
#else
|
||||
mr_energy = gain_corr_factor * exp(M_LN10 / (20 << 23) * mr_energy) /
|
||||
mr_energy = gain_corr_factor * ff_exp10((double)mr_energy / (20 << 23)) /
|
||||
sqrt(adsp->scalarproduct_int16(fc_v, fc_v, subframe_size));
|
||||
return mr_energy >> 12;
|
||||
#endif
|
||||
|
@ -57,7 +57,7 @@
|
||||
*/
|
||||
|
||||
/* These are for CD-ROM XA ADPCM */
|
||||
static const int xa_adpcm_table[5][2] = {
|
||||
static const int8_t xa_adpcm_table[5][2] = {
|
||||
{ 0, 0 },
|
||||
{ 60, 0 },
|
||||
{ 115, -52 },
|
||||
@ -65,7 +65,7 @@ static const int xa_adpcm_table[5][2] = {
|
||||
{ 122, -60 }
|
||||
};
|
||||
|
||||
static const int ea_adpcm_table[] = {
|
||||
static const int16_t ea_adpcm_table[] = {
|
||||
0, 240, 460, 392,
|
||||
0, 0, -208, -220,
|
||||
0, 1, 3, 4,
|
||||
@ -74,7 +74,7 @@ static const int ea_adpcm_table[] = {
|
||||
};
|
||||
|
||||
// padded to zero where table size is less then 16
|
||||
static const int swf_index_tables[4][16] = {
|
||||
static const int8_t swf_index_tables[4][16] = {
|
||||
/*2*/ { -1, 2 },
|
||||
/*3*/ { -1, -1, 2, 4 },
|
||||
/*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
|
||||
@ -177,6 +177,50 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
|
||||
{
|
||||
int delta, pred, step, add;
|
||||
|
||||
pred = c->predictor;
|
||||
delta = nibble & 7;
|
||||
step = c->step;
|
||||
add = (delta * 2 + 1) * step;
|
||||
if (add < 0)
|
||||
add = add + 7;
|
||||
|
||||
if ((nibble & 8) == 0)
|
||||
pred = av_clip(pred + (add >> 3), -32767, 32767);
|
||||
else
|
||||
pred = av_clip(pred - (add >> 3), -32767, 32767);
|
||||
|
||||
switch (delta) {
|
||||
case 7:
|
||||
step *= 0x99;
|
||||
break;
|
||||
case 6:
|
||||
c->step = av_clip(c->step * 2, 127, 24576);
|
||||
c->predictor = pred;
|
||||
return pred;
|
||||
case 5:
|
||||
step *= 0x66;
|
||||
break;
|
||||
case 4:
|
||||
step *= 0x4d;
|
||||
break;
|
||||
default:
|
||||
step *= 0x39;
|
||||
break;
|
||||
}
|
||||
|
||||
if (step < 0)
|
||||
step += 0x3f;
|
||||
|
||||
c->step = step >> 6;
|
||||
c->step = av_clip(c->step, 127, 24576);
|
||||
c->predictor = pred;
|
||||
return pred;
|
||||
}
|
||||
|
||||
static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
|
||||
{
|
||||
int step_index;
|
||||
@ -440,7 +484,7 @@ static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_
|
||||
{
|
||||
ADPCMDecodeContext *c = avctx->priv_data;
|
||||
GetBitContext gb;
|
||||
const int *table;
|
||||
const int8_t *table;
|
||||
int k0, signmask, nb_bits, count;
|
||||
int size = buf_size*8;
|
||||
int i;
|
||||
@ -549,6 +593,7 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
|
||||
header_size = 0;
|
||||
switch (avctx->codec->id) {
|
||||
case AV_CODEC_ID_ADPCM_4XM:
|
||||
case AV_CODEC_ID_ADPCM_AGM:
|
||||
case AV_CODEC_ID_ADPCM_IMA_DAT4:
|
||||
case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
|
||||
case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
|
||||
@ -863,6 +908,18 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_AGM:
|
||||
for (i = 0; i < avctx->channels; i++)
|
||||
c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
for (i = 0; i < avctx->channels; i++)
|
||||
c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
|
||||
|
||||
for (n = 0; n < nb_samples >> (1 - st); n++) {
|
||||
int v = bytestream2_get_byteu(&gb);
|
||||
*samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
|
||||
*samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
|
||||
}
|
||||
break;
|
||||
case AV_CODEC_ID_ADPCM_MS:
|
||||
{
|
||||
int block_predictor;
|
||||
@ -1679,7 +1736,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
av_assert0(0); // unsupported codec_id should not happen
|
||||
}
|
||||
|
||||
if (avpkt->size && bytestream2_tell(&gb) == 0) {
|
||||
@ -1729,6 +1786,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
|
||||
/* Note: Do not forget to add new entries to the Makefile as well. */
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16p, adpcm_4xm, "ADPCM 4X Movie");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
|
||||
ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user