Update patches for 7.0.2

Signed-off-by: nyanmisaka <nst799610810@gmail.com>
This commit is contained in:
nyanmisaka 2024-08-04 01:20:23 +08:00
parent 8b6e13f596
commit 69a9c336cf
102 changed files with 7155 additions and 13613 deletions

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavformat/segment.c
Index: FFmpeg/libavformat/segment.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/segment.c
+++ jellyfin-ffmpeg/libavformat/segment.c
--- FFmpeg.orig/libavformat/segment.c
+++ FFmpeg/libavformat/segment.c
@@ -88,6 +88,7 @@ typedef struct SegmentContext {
int64_t last_val; ///< remember last time for wrap around detection
int cut_pending;
@ -10,7 +10,7 @@ Index: jellyfin-ffmpeg/libavformat/segment.c
char *entry_prefix; ///< prefix to add to list entry filenames
int list_type; ///< set the list type
@@ -712,6 +713,7 @@ static int seg_init(AVFormatContext *s)
@@ -707,6 +708,7 @@ static int seg_init(AVFormatContext *s)
if ((ret = parse_frames(s, &seg->frames, &seg->nb_frames, seg->frames_str)) < 0)
return ret;
} else {
@ -18,7 +18,7 @@ Index: jellyfin-ffmpeg/libavformat/segment.c
if (seg->use_clocktime) {
if (seg->time <= 0) {
av_log(s, AV_LOG_ERROR, "Invalid negative segment_time with segment_atclocktime option set\n");
@@ -895,7 +897,15 @@ calc_times:
@@ -890,7 +892,15 @@ calc_times:
seg->cut_pending = 1;
seg->last_val = wrapped_val;
} else {

View File

@ -1,25 +0,0 @@
Index: jellyfin-ffmpeg/libavformat/webvttenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/webvttenc.c
+++ jellyfin-ffmpeg/libavformat/webvttenc.c
@@ -50,8 +50,8 @@ static int webvtt_write_header(AVFormatC
AVCodecParameters *par = ctx->streams[0]->codecpar;
AVIOContext *pb = ctx->pb;
- if (ctx->nb_streams != 1 || par->codec_id != AV_CODEC_ID_WEBVTT) {
- av_log(ctx, AV_LOG_ERROR, "Exactly one WebVTT stream is needed.\n");
+ if (par->codec_id != AV_CODEC_ID_WEBVTT) {
+ av_log(ctx, AV_LOG_ERROR, "First stream must be WebVTT.\n");
return AVERROR(EINVAL);
}
@@ -69,6 +69,9 @@ static int webvtt_write_packet(AVFormatC
int id_size_int, settings_size_int;
uint8_t *id, *settings;
+ if (pkt->stream_index != 0)
+ return 0;
+
avio_printf(pb, "\n");
id = av_packet_get_side_data(pkt, AV_PKT_DATA_WEBVTT_IDENTIFIER,

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/compat/cuda/cuda_runtime.h
Index: FFmpeg/compat/cuda/cuda_runtime.h
===================================================================
--- jellyfin-ffmpeg.orig/compat/cuda/cuda_runtime.h
+++ jellyfin-ffmpeg/compat/cuda/cuda_runtime.h
--- FFmpeg.orig/compat/cuda/cuda_runtime.h
+++ FFmpeg/compat/cuda/cuda_runtime.h
@@ -24,6 +24,7 @@
#define COMPAT_CUDA_CUDA_RUNTIME_H

View File

@ -2,7 +2,7 @@ Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3143,6 +3143,8 @@ scale_cuda_filter_deps="ffnvcodec"
@@ -3291,6 +3291,8 @@ scale_cuda_filter_deps="ffnvcodec"
scale_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
thumbnail_cuda_filter_deps="ffnvcodec"
thumbnail_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
@ -11,7 +11,7 @@ Index: FFmpeg/configure
transpose_npp_filter_deps="ffnvcodec libnpp"
overlay_cuda_filter_deps="ffnvcodec"
overlay_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
@@ -3911,7 +3913,7 @@ enable doc
@@ -4082,7 +4084,7 @@ enable doc
enable faan faandct faanidct
enable large_tests
enable optimizations
@ -20,7 +20,7 @@ Index: FFmpeg/configure
enable runtime_cpudetect
enable safe_bitstream_reader
enable static
@@ -4456,7 +4458,7 @@ if enabled cuda_nvcc; then
@@ -4630,7 +4632,7 @@ if enabled cuda_nvcc; then
nvccflags_default="-gencode arch=compute_30,code=sm_30 -O2"
else
nvcc_default="clang"
@ -29,7 +29,7 @@ Index: FFmpeg/configure
NVCC_C=""
fi
@@ -6458,7 +6460,7 @@ fi
@@ -6711,7 +6713,7 @@ fi
if enabled cuda_nvcc; then
nvccflags="$nvccflags -ptx"
else
@ -54,20 +54,20 @@ Index: FFmpeg/libavfilter/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -509,6 +509,8 @@ OBJS-$(CONFIG_TMIX_FILTER)
@@ -522,6 +522,8 @@ OBJS-$(CONFIG_TMEDIAN_FILTER)
OBJS-$(CONFIG_TMIDEQUALIZER_FILTER) += vf_tmidequalizer.o
OBJS-$(CONFIG_TMIX_FILTER) += vf_mix.o framesync.o
OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o opencl.o \
opencl/tonemap.o opencl/colorspace_common.o
+OBJS-$(CONFIG_TONEMAP_CUDA_FILTER) += vf_tonemap_cuda.o cuda/tonemap.ptx.o \
+ cuda/host_util.o
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o opencl.o \
opencl/tonemap.o opencl/colorspace_common.o
OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER) += vf_tonemap_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -478,6 +478,7 @@ extern const AVFilter ff_vf_tmedian;
@@ -493,6 +493,7 @@ extern const AVFilter ff_vf_tmedian;
extern const AVFilter ff_vf_tmidequalizer;
extern const AVFilter ff_vf_tmix;
extern const AVFilter ff_vf_tonemap;
@ -1578,7 +1578,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_cuda.c
===================================================================
--- /dev/null
+++ FFmpeg/libavfilter/vf_tonemap_cuda.c
@@ -0,0 +1,1101 @@
@@ -0,0 +1,1100 @@
+/*
+ * This file is part of FFmpeg.
+ *
@ -1617,7 +1617,6 @@ Index: FFmpeg/libavfilter/vf_tonemap_cuda.c
+#include "cuda/host_util.h"
+#include "cuda/shared.h"
+#include "cuda/tonemap.h"
+#include "formats.h"
+#include "internal.h"
+#include "scale_eval.h"
+#include "video.h"
@ -2595,44 +2594,44 @@ Index: FFmpeg/libavfilter/vf_tonemap_cuda.c
+#define OFFSET(x) offsetof(TonemapCUDAContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption options[] = {
+ { "tonemap", "Tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_COUNT - 1, FLAGS, "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_BT2390}, 0, 0, FLAGS, "tonemap" },
+ { "tonemap_mode", "Tonemap mode selection", OFFSET(tonemap_mode), AV_OPT_TYPE_INT, {.i64 = TONEMAP_MODE_MAX}, TONEMAP_MODE_MAX, TONEMAP_MODE_COUNT - 1, FLAGS, "tonemap_mode" },
+ { "max", "Brightest channel based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_MAX}, 0, 0, FLAGS, "tonemap_mode" },
+ { "rgb", "Per-channel based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_RGB}, 0, 0, FLAGS, "tonemap_mode" },
+ { "lum", "Relative luminance based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_LUM}, 0, 0, FLAGS, "tonemap_mode" },
+ { "transfer", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
+ { "t", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, "transfer" },
+ { "smpte2084", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE2084}, 0, 0, FLAGS, "transfer" },
+ { "matrix", "Set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, "matrix" },
+ { "m", "Set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, "matrix" },
+ { "primaries", "Set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, "primaries" },
+ { "p", "Set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, "primaries" },
+ { "range", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, "range" },
+ { "r", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "tonemap", "Tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_COUNT - 1, FLAGS, .unit = "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_BT2390}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "tonemap_mode", "Tonemap mode selection", OFFSET(tonemap_mode), AV_OPT_TYPE_INT, {.i64 = TONEMAP_MODE_MAX}, TONEMAP_MODE_MAX, TONEMAP_MODE_COUNT - 1, FLAGS, .unit = "tonemap_mode" },
+ { "max", "Brightest channel based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_MAX}, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "rgb", "Per-channel based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_RGB}, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "lum", "Relative luminance based tonemap", 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MODE_LUM}, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "transfer", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "t", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, .unit = "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, .unit = "transfer" },
+ { "smpte2084", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE2084}, 0, 0, FLAGS, .unit = "transfer" },
+ { "matrix", "Set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "m", "Set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, .unit = "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, .unit = "matrix" },
+ { "primaries", "Set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "p", "Set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, .unit = "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, .unit = "primaries" },
+ { "range", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "r", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "format", "Output format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+ { "apply_dovi", "Apply Dolby Vision metadata if possible", OFFSET(apply_dovi), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ { "tradeoff", "Apply tradeoffs to offload computing", OFFSET(tradeoff), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, FLAGS, "tradeoff" },
+ { "auto", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, "tradeoff" },
+ { "disabled", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, FLAGS, "tradeoff" },
+ { "enabled", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, FLAGS, "tradeoff" },
+ { "tradeoff", "Apply tradeoffs to offload computing", OFFSET(tradeoff), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, FLAGS, .unit = "tradeoff" },
+ { "auto", 0, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, FLAGS, .unit = "tradeoff" },
+ { "disabled", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, FLAGS, .unit = "tradeoff" },
+ { "enabled", 0, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, FLAGS, .unit = "tradeoff" },
+ { "peak", "Signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
+ { "param", "Tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
+ { "desat", "Desaturation parameter", OFFSET(desat_param), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0, DBL_MAX, FLAGS },

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/configure
Index: FFmpeg/configure
===================================================================
--- jellyfin-ffmpeg.orig/configure
+++ jellyfin-ffmpeg/configure
@@ -3725,6 +3725,7 @@ rubberband_filter_deps="librubberband"
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3889,6 +3889,7 @@ rubberband_filter_deps="librubberband"
sab_filter_deps="gpl swscale"
scale2ref_filter_deps="swscale"
scale_filter_deps="swscale"
@ -10,34 +10,34 @@ Index: jellyfin-ffmpeg/configure
scale_qsv_filter_deps="libmfx"
scale_qsv_filter_select="qsvvpp"
scdet_filter_select="scene_sad"
Index: jellyfin-ffmpeg/libavfilter/Makefile
Index: FFmpeg/libavfilter/Makefile
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/Makefile
+++ jellyfin-ffmpeg/libavfilter/Makefile
@@ -446,6 +446,7 @@ OBJS-$(CONFIG_SCALE_FILTER)
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -459,6 +459,7 @@ OBJS-$(CONFIG_SCALE_FILTER)
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o scale_eval.o \
vf_scale_cuda.ptx.o cuda/load_helper.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale_eval.o
+OBJS-$(CONFIG_SCALE_OPENCL_FILTER) += vf_scale_opencl.o opencl.o opencl/scale.o scale_eval.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_vpp_qsv.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE_VULKAN_FILTER) += vf_scale_vulkan.o vulkan.o vulkan_filter.o
Index: jellyfin-ffmpeg/libavfilter/allfilters.c
OBJS-$(CONFIG_SCALE_VT_FILTER) += vf_scale_vt.o scale_eval.o
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/allfilters.c
+++ jellyfin-ffmpeg/libavfilter/allfilters.c
@@ -419,6 +419,7 @@ extern const AVFilter ff_vf_sab;
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -432,6 +432,7 @@ extern const AVFilter ff_vf_sab;
extern const AVFilter ff_vf_scale;
extern const AVFilter ff_vf_scale_cuda;
extern const AVFilter ff_vf_scale_npp;
+extern const AVFilter ff_vf_scale_opencl;
extern const AVFilter ff_vf_scale_qsv;
extern const AVFilter ff_vf_scale_vaapi;
extern const AVFilter ff_vf_scale_vulkan;
Index: jellyfin-ffmpeg/libavfilter/opencl/scale.cl
extern const AVFilter ff_vf_scale_vt;
Index: FFmpeg/libavfilter/opencl/scale.cl
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/opencl/scale.cl
+++ FFmpeg/libavfilter/opencl/scale.cl
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2018 Gabriel Machado
@ -315,23 +315,23 @@ Index: jellyfin-ffmpeg/libavfilter/opencl/scale.cl
+#endif
+}
+#endif
Index: jellyfin-ffmpeg/libavfilter/opencl_source.h
Index: FFmpeg/libavfilter/opencl_source.h
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/opencl_source.h
+++ jellyfin-ffmpeg/libavfilter/opencl_source.h
@@ -27,6 +27,7 @@ extern const char *ff_opencl_source_desh
extern const char *ff_opencl_source_neighbor;
extern const char *ff_opencl_source_nlmeans;
extern const char *ff_opencl_source_overlay;
+extern const char *ff_opencl_source_scale;
extern const char *ff_opencl_source_pad;
extern const char *ff_opencl_source_remap;
extern const char *ff_opencl_source_tonemap;
Index: jellyfin-ffmpeg/libavfilter/vf_scale_opencl.c
--- FFmpeg.orig/libavfilter/opencl_source.h
+++ FFmpeg/libavfilter/opencl_source.h
@@ -29,6 +29,7 @@ extern const char *ff_source_nlmeans_cl;
extern const char *ff_source_overlay_cl;
extern const char *ff_source_pad_cl;
extern const char *ff_source_remap_cl;
+extern const char *ff_source_scale_cl;
extern const char *ff_source_tonemap_cl;
extern const char *ff_source_transpose_cl;
extern const char *ff_source_unsharp_cl;
Index: FFmpeg/libavfilter/vf_scale_opencl.c
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/vf_scale_opencl.c
@@ -0,0 +1,776 @@
+++ FFmpeg/libavfilter/vf_scale_opencl.c
@@ -0,0 +1,777 @@
+/*
+ * Copyright (c) 2018 Gabriel Machado
+ * Copyright (c) 2021 NyanMisaka
@ -659,7 +659,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_scale_opencl.c
+
+ av_log(avctx, AV_LOG_DEBUG, "Generated OpenCL header:\n%s\n", header.str);
+ opencl_sources[0] = header.str;
+ opencl_sources[1] = ff_opencl_source_scale;
+ opencl_sources[1] = ff_source_scale_cl;
+ err = ff_opencl_filter_load_program(avctx, opencl_sources, OPENCL_SOURCE_NB);
+
+ av_bprint_finalize(&header, NULL);
@ -1056,22 +1056,22 @@ Index: jellyfin-ffmpeg/libavfilter/vf_scale_opencl.c
+static const AVOption scale_opencl_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
+ { "format", "Output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, FLAGS, "fmt" },
+ { "format", "Output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, FLAGS, .unit = "fmt" },
+ { "passthrough", "Do not process frames at all if parameters match", OFFSET(passthrough), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "algo", "Scaling algorithm", OFFSET(algorithm), AV_OPT_TYPE_INT, { .i64 = F_BILINEAR }, INT_MIN, INT_MAX, FLAGS, "algo" },
+ { "area", "Area averaging", 0, AV_OPT_TYPE_CONST, { .i64 = F_AREA }, 0, 0, FLAGS, "algo" },
+ { "bicubic", "Bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = F_BICUBIC }, 0, 0, FLAGS, "algo" },
+ { "bilinear", "Bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = F_BILINEAR }, 0, 0, FLAGS, "algo" },
+ { "gauss", "Gaussian", 0, AV_OPT_TYPE_CONST, { .i64 = F_GAUSSIAN }, 0, 0, FLAGS, "algo" },
+ { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = F_LANCZOS }, 0, 0, FLAGS, "algo" },
+ { "neighbor", "Nearest Neighbor", 0, AV_OPT_TYPE_CONST, { .i64 = F_NEIGHBOR }, 0, 0, FLAGS, "algo" },
+ { "sinc", "Sinc", 0, AV_OPT_TYPE_CONST, { .i64 = F_SINC }, 0, 0, FLAGS, "algo" },
+ { "spline", "Bicubic Spline", 0, AV_OPT_TYPE_CONST, { .i64 = F_SPLINE }, 0, 0, FLAGS, "algo" },
+ { "experimental", "Experimental", 0, AV_OPT_TYPE_CONST, { .i64 = F_EXPERIMENTAL }, 0, 0, FLAGS, "algo" },
+ { "force_original_aspect_ratio", "Decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
+ { "algo", "Scaling algorithm", OFFSET(algorithm), AV_OPT_TYPE_INT, { .i64 = F_BILINEAR }, INT_MIN, INT_MAX, FLAGS, .unit = "algo" },
+ { "area", "Area averaging", 0, AV_OPT_TYPE_CONST, { .i64 = F_AREA }, 0, 0, FLAGS, .unit = "algo" },
+ { "bicubic", "Bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = F_BICUBIC }, 0, 0, FLAGS, .unit = "algo" },
+ { "bilinear", "Bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = F_BILINEAR }, 0, 0, FLAGS, .unit = "algo" },
+ { "gauss", "Gaussian", 0, AV_OPT_TYPE_CONST, { .i64 = F_GAUSSIAN }, 0, 0, FLAGS, .unit = "algo" },
+ { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = F_LANCZOS }, 0, 0, FLAGS, .unit = "algo" },
+ { "neighbor", "Nearest Neighbor", 0, AV_OPT_TYPE_CONST, { .i64 = F_NEIGHBOR }, 0, 0, FLAGS, .unit = "algo" },
+ { "sinc", "Sinc", 0, AV_OPT_TYPE_CONST, { .i64 = F_SINC }, 0, 0, FLAGS, .unit = "algo" },
+ { "spline", "Bicubic Spline", 0, AV_OPT_TYPE_CONST, { .i64 = F_SPLINE }, 0, 0, FLAGS, .unit = "algo" },
+ { "experimental", "Experimental", 0, AV_OPT_TYPE_CONST, { .i64 = F_EXPERIMENTAL }, 0, 0, FLAGS, .unit = "algo" },
+ { "force_original_aspect_ratio", "Decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, .unit = "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, .unit = "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, .unit = "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, .unit = "force_oar" },
+ { "force_divisible_by", "Enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, FLAGS },
+ { NULL }
+};
@ -1107,4 +1107,5 @@ Index: jellyfin-ffmpeg/libavfilter/vf_scale_opencl.c
+ FILTER_OUTPUTS(scale_opencl_outputs),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_OPENCL),
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+ .flags = AVFILTER_FLAG_HWDEVICE,
+};

View File

@ -2,7 +2,7 @@ Index: FFmpeg/libavfilter/opencl.c
===================================================================
--- FFmpeg.orig/libavfilter/opencl.c
+++ FFmpeg/libavfilter/opencl.c
@@ -170,7 +170,7 @@ int ff_opencl_filter_load_program(AVFilt
@@ -169,7 +169,7 @@ int ff_opencl_filter_load_program(AVFilt
}
cle = clBuildProgram(ctx->program, 1, &ctx->hwctx->device_id,
@ -11,7 +11,7 @@ Index: FFmpeg/libavfilter/opencl.c
if (cle != CL_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Failed to build program: %d.\n", cle);
@@ -331,7 +331,7 @@ void ff_opencl_print_const_matrix_3x3(AV
@@ -330,7 +330,7 @@ void ff_opencl_print_const_matrix_3x3(AV
av_bprintf(buf, "__constant float %s[9] = {\n", name_str);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++)
@ -1069,7 +1069,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
static int get_rgb2rgb_matrix(enum AVColorPrimaries in, enum AVColorPrimaries out,
double rgb2rgb[3][3]) {
double rgb2xyz[3][3], xyz2rgb[3][3];
@@ -108,23 +194,149 @@ static int get_rgb2rgb_matrix(enum AVCol
@@ -108,23 +194,150 @@ static int get_rgb2rgb_matrix(enum AVCol
return 0;
}
@ -1222,13 +1222,14 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
-
- av_bprint_init(&header, 1024, AV_BPRINT_SIZE_AUTOMATIC);
+ cl_mem_flags dovi_buf_flags = CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR;
+ char *device_vendor = NULL;
+ char *device_name = NULL;
+ char *device_exts = NULL;
+ int i, j, err;
switch(ctx->tonemap) {
case TONEMAP_GAMMA:
@@ -144,48 +356,148 @@ static int tonemap_opencl_init(AVFilterC
@@ -144,48 +357,156 @@ static int tonemap_opencl_init(AVFilterC
if (isnan(ctx->param))
ctx->param = 1.0f;
@ -1285,6 +1286,14 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
+ }
+ av_free(device_name);
+ }
+ } else if (device_is_integrated == CL_TRUE) {
+ device_vendor = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_VENDOR);
+ device_name = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_NAME);
+ if (!strstr(device_vendor, "ARM") &&
+ !strstr(device_name, "Mali"))
+ ctx->tradeoff = 0;
+ av_free(device_vendor);
+ av_free(device_name);
+ } else {
+ ctx->tradeoff = 0;
+ }
@ -1392,7 +1401,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
if (ctx->range_in == AVCOL_RANGE_JPEG)
av_bprintf(&header, "#define FULL_RANGE_IN\n");
@@ -199,19 +511,41 @@ static int tonemap_opencl_init(AVFilterC
@@ -199,19 +520,41 @@ static int tonemap_opencl_init(AVFilterC
else
ff_opencl_print_const_matrix_3x3(&header, "rgb2rgb", rgb2rgb);
@ -1441,7 +1450,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
ctx->colorspace_out, av_color_space_name(ctx->colorspace_out));
goto fail;
}
@@ -219,24 +553,23 @@ static int tonemap_opencl_init(AVFilterC
@@ -219,24 +562,23 @@ static int tonemap_opencl_init(AVFilterC
ff_fill_rgb2yuv_table(luma_dst, rgb2yuv);
ff_opencl_print_const_matrix_3x3(&header, "yuv_matrix", rgb2yuv);
@ -1481,7 +1490,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
av_log(avctx, AV_LOG_DEBUG, "Generated OpenCL header:\n%s\n", header.str);
opencl_sources[0] = header.str;
@@ -254,46 +587,171 @@ static int tonemap_opencl_init(AVFilterC
@@ -254,46 +596,171 @@ static int tonemap_opencl_init(AVFilterC
CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create OpenCL "
"command queue %d.\n", cle);
@ -1637,7 +1646,6 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
+ if (!inlink->hw_frames_ctx)
return AVERROR(EINVAL);
- }
- }
+ in_frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
+ in_format = in_frames_ctx->sw_format;
+ out_format = (ctx->format == AV_PIX_FMT_NONE) ? in_format : ctx->format;
@ -1658,8 +1666,9 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
+ av_log(ctx, AV_LOG_ERROR, "Unsupported input format depth: %d\n",
+ in_desc->comp[0].depth);
+ return AVERROR(ENOSYS);
+ }
+
}
- s->ocf.output_format = s->format == AV_PIX_FMT_NONE ? AV_PIX_FMT_NV12 : s->format;
+ ctx->in_fmt = in_format;
+ ctx->out_fmt = out_format;
+ ctx->in_desc = in_desc;
@ -1667,12 +1676,11 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
+ ctx->in_planes = av_pix_fmt_count_planes(in_format);
+ ctx->out_planes = av_pix_fmt_count_planes(out_format);
+ ctx->ocf.output_format = out_format;
- s->ocf.output_format = s->format == AV_PIX_FMT_NONE ? AV_PIX_FMT_NV12 : s->format;
+
ret = ff_opencl_filter_config_output(outlink);
if (ret < 0)
return ret;
@@ -308,13 +766,46 @@ static int launch_kernel(AVFilterContext
@@ -308,13 +775,46 @@ static int launch_kernel(AVFilterContext
size_t global_work[2];
size_t local_work[2];
cl_int cle;
@ -1721,7 +1729,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
local_work[0] = 16;
local_work[1] = 16;
@@ -338,13 +829,10 @@ static int tonemap_opencl_filter_frame(A
@@ -338,13 +838,10 @@ static int tonemap_opencl_filter_frame(A
AVFilterContext *avctx = inlink->dst;
AVFilterLink *outlink = avctx->outputs[0];
TonemapOpenCLContext *ctx = avctx->priv;
@ -1736,7 +1744,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
av_get_pix_fmt_name(input->format),
@@ -363,9 +851,6 @@ static int tonemap_opencl_filter_frame(A
@@ -363,9 +860,6 @@ static int tonemap_opencl_filter_frame(A
if (err < 0)
goto fail;
@ -1746,7 +1754,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
if (ctx->trc != -1)
output->color_trc = ctx->trc;
if (ctx->primaries != -1)
@@ -385,72 +870,92 @@ static int tonemap_opencl_filter_frame(A
@@ -385,72 +879,92 @@ static int tonemap_opencl_filter_frame(A
ctx->range_out = output->color_range;
ctx->chroma_loc = output->chroma_location;
@ -1879,7 +1887,7 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
av_frame_free(&input);
av_frame_free(&output);
return err;
@@ -458,24 +963,9 @@ fail:
@@ -458,24 +972,9 @@ fail:
static av_cold void tonemap_opencl_uninit(AVFilterContext *avctx)
{
@ -1906,37 +1914,37 @@ Index: FFmpeg/libavfilter/vf_tonemap_opencl.c
ff_opencl_filter_uninit(avctx);
}
@@ -483,37 +973,48 @@ static av_cold void tonemap_opencl_unini
@@ -483,37 +982,48 @@ static av_cold void tonemap_opencl_unini
#define OFFSET(x) offsetof(TonemapOpenCLContext, x)
#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption tonemap_opencl_options[] = {
- { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, "tonemap" },
- { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, "tonemap" },
- { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, "tonemap" },
- { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, "tonemap" },
- { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, "tonemap" },
- { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, "tonemap" },
- { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, "tonemap" },
- { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, "tonemap" },
- { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
- { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, "transfer" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, "transfer" },
- { "matrix", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "matrix" },
- { "m", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "matrix" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, "matrix" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, "matrix" },
- { "primaries", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "primaries" },
- { "p", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "primaries" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, "primaries" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, "primaries" },
- { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "range" },
- { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, "range" },
- { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
- { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
- { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
- { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
- { "format", "output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, FLAGS, "fmt" },
- { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, .unit = "tonemap" },
- { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, .unit = "tonemap" },
- { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, .unit = "tonemap" },
- { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, .unit = "tonemap" },
- { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, .unit = "tonemap" },
- { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, .unit = "tonemap" },
- { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, .unit = "tonemap" },
- { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, .unit = "tonemap" },
- { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
- { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, .unit = "transfer" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, .unit = "transfer" },
- { "matrix", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "matrix" },
- { "m", "set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "matrix" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, .unit = "matrix" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, .unit = "matrix" },
- { "primaries", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "primaries" },
- { "p", "set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "primaries" },
- { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, .unit = "primaries" },
- { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, .unit = "primaries" },
- { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "range" },
- { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, FLAGS, .unit = "range" },
- { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
- { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
- { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
- { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
- { "format", "output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, FLAGS, .unit = "fmt" },
- { "peak", "signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
- { "param", "tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
- { "desat", "desaturation parameter", OFFSET(desat_param), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0, DBL_MAX, FLAGS },

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavfilter/opencl/overlay.cl
Index: FFmpeg/libavfilter/opencl/overlay.cl
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/opencl/overlay.cl
+++ jellyfin-ffmpeg/libavfilter/opencl/overlay.cl
--- FFmpeg.orig/libavfilter/opencl/overlay.cl
+++ FFmpeg/libavfilter/opencl/overlay.cl
@@ -16,15 +16,24 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@ -142,10 +142,10 @@ Index: jellyfin-ffmpeg/libavfilter/opencl/overlay.cl
float4 val = in_overlay * in_alpha.x + in_main * (1.0f - in_alpha.x);
write_imagef(dst, loc, val);
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_opencl.c
Index: FFmpeg/libavfilter/vf_overlay_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_opencl.c
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_opencl.c
--- FFmpeg.orig/libavfilter/vf_overlay_opencl.c
+++ FFmpeg/libavfilter/vf_overlay_opencl.c
@@ -27,72 +27,117 @@
#include "opencl_source.h"
#include "video.h"
@ -228,7 +228,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_opencl.c
{
OverlayOpenCLContext *ctx = avctx->priv;
cl_int cle;
- const char *source = ff_opencl_source_overlay;
- const char *source = ff_source_overlay_cl;
- const char *kernel;
- const AVPixFmtDescriptor *main_desc, *overlay_desc;
- int err, i, main_planes, overlay_planes;
@ -295,7 +295,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_opencl.c
+ av_log(avctx, AV_LOG_DEBUG, "Using kernel %s.\n", ctx->kernel_name);
- err = ff_opencl_filter_load_program(avctx, &source, 1);
+ err = ff_opencl_filter_load_program(avctx, &ff_opencl_source_overlay, 1);
+ err = ff_opencl_filter_load_program(avctx, &ff_source_overlay_cl, 1);
if (err < 0)
goto fail;
@ -627,10 +627,10 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_opencl.c
OFFSET(y_position), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
+ { "eof_action", "Action to take when encountering EOF from secondary input ",
+ OFFSET(opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
+ EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
+ { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
+ { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
+ { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
+ EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, .unit = "eof_action" },
+ { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, .unit = "eof_action" },
+ { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, .unit = "eof_action" },
+ { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, .unit = "eof_action" },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(opt_repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
{ NULL },

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
Index: FFmpeg/libavutil/hwcontext_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_opencl.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
--- FFmpeg.orig/libavutil/hwcontext_opencl.c
+++ FFmpeg/libavutil/hwcontext_opencl.c
@@ -64,6 +64,16 @@
#if HAVE_OPENCL_D3D11
#include <CL/cl_d3d11.h>
@ -19,13 +19,12 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
#endif
#if HAVE_OPENCL_DRM_ARM
@@ -119,12 +129,19 @@ typedef struct OpenCLDeviceContext {
@@ -129,12 +139,18 @@ typedef struct OpenCLDeviceContext {
#if HAVE_OPENCL_D3D11
int d3d11_mapping_usable;
+ int d3d11_map_amd;
+ int d3d11_map_intel;
+
clCreateFromD3D11Texture2DKHR_fn
clCreateFromD3D11Texture2DKHR;
clEnqueueAcquireD3D11ObjectsKHR_fn
@ -39,15 +38,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
#endif
#if HAVE_OPENCL_DRM_ARM
@@ -148,7 +165,6 @@ typedef struct OpenCLFramesContext {
#endif
} OpenCLFramesContext;
-
static void CL_CALLBACK opencl_error_callback(const char *errinfo,
const void *private_info,
size_t cb,
@@ -497,8 +513,10 @@ static int opencl_device_create_internal
@@ -512,8 +528,10 @@ static int opencl_device_create_internal
cl_uint nb_platforms;
cl_platform_id *platforms = NULL;
cl_platform_id platform_id;
@ -58,7 +49,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
AVOpenCLDeviceContext *hwctx = hwdev->hwctx;
cl_int cle;
cl_context_properties default_props[3];
@@ -577,6 +595,11 @@ static int opencl_device_create_internal
@@ -592,6 +610,11 @@ static int opencl_device_create_internal
++found;
platform_id = platforms[p];
hwctx->device_id = devices[d];
@ -70,7 +61,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
}
av_freep(&devices);
@@ -588,9 +611,10 @@ static int opencl_device_create_internal
@@ -603,9 +626,10 @@ static int opencl_device_create_internal
goto fail;
}
if (found > 1) {
@ -84,7 +75,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
}
if (!props) {
@@ -826,17 +850,25 @@ static int opencl_device_init(AVHWDevice
@@ -841,17 +865,25 @@ static int opencl_device_init(AVHWDevice
#if HAVE_OPENCL_D3D11
{
const char *d3d11_ext = "cl_khr_d3d11_sharing";
@ -115,7 +106,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
}
CL_FUNC(clCreateFromD3D11Texture2DKHR,
@@ -846,6 +878,13 @@ static int opencl_device_init(AVHWDevice
@@ -861,6 +893,13 @@ static int opencl_device_init(AVHWDevice
CL_FUNC(clEnqueueReleaseD3D11ObjectsKHR,
"D3D11 in OpenCL release");
@ -129,7 +120,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
if (fail) {
av_log(hwdev, AV_LOG_WARNING, "D3D11 to OpenCL mapping "
"not usable.\n");
@@ -1248,7 +1287,7 @@ static int opencl_device_derive(AVHWDevi
@@ -1263,7 +1302,7 @@ static int opencl_device_derive(AVHWDevi
CL_CONTEXT_VA_API_DISPLAY_INTEL,
(intptr_t)src_hwctx->display,
CL_CONTEXT_INTEROP_USER_SYNC,
@ -138,7 +129,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
0,
};
OpenCLDeviceSelector selector = {
@@ -1287,11 +1326,13 @@ static int opencl_device_derive(AVHWDevi
@@ -1302,11 +1341,13 @@ static int opencl_device_derive(AVHWDevi
device_handle,
&device, FALSE);
if (SUCCEEDED(hr)) {
@ -153,7 +144,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
0,
};
OpenCLDeviceSelector selector = {
@@ -1324,11 +1365,13 @@ static int opencl_device_derive(AVHWDevi
@@ -1339,11 +1380,13 @@ static int opencl_device_derive(AVHWDevi
case AV_HWDEVICE_TYPE_D3D11VA:
{
AVD3D11VADeviceContext *src_hwctx = src_ctx->hwctx;
@ -168,26 +159,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
0,
};
OpenCLDeviceSelector selector = {
@@ -2013,7 +2056,8 @@ static int opencl_map_frame(AVHWFramesCo
goto fail;
}
- dst->data[p] = map->address[p];
+ dst->data[p] = map->address[p];
+ dst->linesize[p] = row_pitch;
av_log(hwfc, AV_LOG_DEBUG, "Map plane %d (%p -> %p).\n",
p, src->data[p], dst->data[p]);
@@ -2346,7 +2390,7 @@ static void opencl_unmap_from_dxva2(AVHW
{
AVOpenCLFrameDescriptor *desc = hwmap->priv;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
- OpenCLFramesContext *frames_priv = dst_fc->device_ctx->internal->priv;
+ OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
cl_event event;
cl_int cle;
@@ -2439,11 +2483,13 @@ static int opencl_frames_derive_from_dxv
@@ -2461,8 +2504,9 @@ static int opencl_frames_derive_from_dxv
cl_int cle;
int err, i, p, nb_planes;
@ -199,16 +171,8 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
"for DXVA2 to OpenCL mapping.\n");
return AVERROR(EINVAL);
}
+
nb_planes = 2;
if (src_fc->initial_pool_size == 0) {
@@ -2511,15 +2557,25 @@ static void opencl_unmap_from_d3d11(AVHW
{
AVOpenCLFrameDescriptor *desc = hwmap->priv;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
- OpenCLFramesContext *frames_priv = dst_fc->device_ctx->internal->priv;
+ OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
@@ -2536,12 +2580,22 @@ static void opencl_unmap_from_d3d11(AVHW
OpenCLFramesContext *frames_priv = dst_fc->hwctx;
cl_event event;
cl_int cle;
+ const cl_mem *mem_objs;
@ -232,7 +196,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
"handle: %d.\n", cle);
}
@@ -2534,7 +2590,9 @@ static int opencl_map_from_d3d11(AVHWFra
@@ -2556,7 +2610,9 @@ static int opencl_map_from_d3d11(AVHWFra
AVOpenCLFrameDescriptor *desc;
cl_event event;
cl_int cle;
@ -243,7 +207,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
index = (intptr_t)src->data[1];
if (index >= frames_priv->nb_mapped_frames) {
@@ -2543,16 +2601,25 @@ static int opencl_map_from_d3d11(AVHWFra
@@ -2565,16 +2621,25 @@ static int opencl_map_from_d3d11(AVHWFra
return AVERROR(EINVAL);
}
@ -271,7 +235,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
"handle: %d.\n", cle);
return AVERROR(EIO);
}
@@ -2561,7 +2628,7 @@ static int opencl_map_from_d3d11(AVHWFra
@@ -2583,7 +2648,7 @@ static int opencl_map_from_d3d11(AVHWFra
if (err < 0)
goto fail;
@ -280,7 +244,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
dst->data[i] = (uint8_t*)desc->planes[i];
err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
@@ -2576,7 +2643,7 @@ static int opencl_map_from_d3d11(AVHWFra
@@ -2598,7 +2663,7 @@ static int opencl_map_from_d3d11(AVHWFra
fail:
cle = device_priv->clEnqueueReleaseD3D11ObjectsKHR(
@ -289,10 +253,10 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
0, NULL, &event);
if (cle == CL_SUCCESS)
opencl_wait_events(dst_fc, &event, 1);
@@ -2591,16 +2658,26 @@ static int opencl_frames_derive_from_d3d
AVD3D11VAFramesContext *src_hwctx = src_fc->hwctx;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
@@ -2613,16 +2678,25 @@ static int opencl_frames_derive_from_d3d
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
AVOpenCLDeviceContext *dst_dev = &device_priv->p;
OpenCLFramesContext *frames_priv = dst_fc->hwctx;
+ cl_mem plane_uint;
cl_mem_flags cl_flags;
cl_int cle;
@ -316,12 +280,11 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ return AVERROR(ENOSYS);
}
- nb_planes = 2;
+
+ nb_planes = device_priv->d3d11_map_amd ? 3 : 2;
if (src_fc->initial_pool_size == 0) {
av_log(dst_fc, AV_LOG_ERROR, "Only fixed-size pools are supported "
@@ -2623,27 +2700,94 @@ static int opencl_frames_derive_from_d3d
@@ -2645,27 +2719,94 @@ static int opencl_frames_derive_from_d3d
for (i = 0; i < frames_priv->nb_mapped_frames; i++) {
AVOpenCLFrameDescriptor *desc = &frames_priv->mapped_frames[i];
desc->nb_planes = nb_planes;

View File

@ -1,20 +1,21 @@
Index: jellyfin-ffmpeg/libavfilter/avfilter.h
Index: FFmpeg/libavfilter/avfilter.h
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/avfilter.h
+++ jellyfin-ffmpeg/libavfilter/avfilter.h
@@ -532,6 +532,7 @@ struct AVFilterLink {
int w; ///< agreed upon image width
int h; ///< agreed upon image height
AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
--- FFmpeg.orig/libavfilter/avfilter.h
+++ FFmpeg/libavfilter/avfilter.h
@@ -564,6 +564,8 @@ struct AVFilterLink {
enum AVColorSpace colorspace; ///< agreed upon YUV color space
enum AVColorRange color_range; ///< agreed upon YUV color range
+ int fixed_pool_size; ///< fixed size of the frame pool for reverse hw mapping
+
/* These parameters apply only to audio */
#if FF_API_OLD_CHANNEL_LAYOUT
/**
Index: jellyfin-ffmpeg/libavfilter/opencl.c
int sample_rate; ///< samples per second
AVChannelLayout ch_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
Index: FFmpeg/libavfilter/opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/opencl.c
+++ jellyfin-ffmpeg/libavfilter/opencl.c
@@ -76,6 +76,9 @@ int ff_opencl_filter_config_input(AVFilt
--- FFmpeg.orig/libavfilter/opencl.c
+++ FFmpeg/libavfilter/opencl.c
@@ -75,6 +75,9 @@ int ff_opencl_filter_config_input(AVFilt
if (!ctx->output_height)
ctx->output_height = inlink->h;
@ -24,7 +25,7 @@ Index: jellyfin-ffmpeg/libavfilter/opencl.c
return 0;
}
@@ -124,6 +127,9 @@ int ff_opencl_filter_config_output(AVFil
@@ -123,6 +126,9 @@ int ff_opencl_filter_config_output(AVFil
outlink->w = ctx->output_width;
outlink->h = ctx->output_height;
@ -34,10 +35,10 @@ Index: jellyfin-ffmpeg/libavfilter/opencl.c
return 0;
fail:
av_buffer_unref(&output_frames_ref);
Index: jellyfin-ffmpeg/libavfilter/vf_hwmap.c
Index: FFmpeg/libavfilter/vf_hwmap.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_hwmap.c
+++ jellyfin-ffmpeg/libavfilter/vf_hwmap.c
--- FFmpeg.orig/libavfilter/vf_hwmap.c
+++ FFmpeg/libavfilter/vf_hwmap.c
@@ -22,6 +22,10 @@
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
@ -62,7 +63,17 @@ Index: jellyfin-ffmpeg/libavfilter/vf_hwmap.c
} else if (inlink->format == hwfc->format &&
(desc->flags & AV_PIX_FMT_FLAG_HWACCEL) &&
ctx->reverse) {
@@ -144,8 +154,20 @@ static int hwmap_config_output(AVFilterL
@@ -131,6 +141,9 @@ static int hwmap_config_output(AVFilterL
// mapped from that back to the source type.
AVBufferRef *source;
AVHWFramesContext *frames;
+#if HAVE_OPENCL_D3D11
+ D3D11_TEXTURE2D_DESC texDesc = { .BindFlags = D3D11_BIND_DECODER, };
+#endif
ctx->hwframes_ref = av_hwframe_ctx_alloc(device);
if (!ctx->hwframes_ref) {
@@ -144,8 +157,19 @@ static int hwmap_config_output(AVFilterL
frames->width = hwfc->width;
frames->height = hwfc->height;
@ -78,17 +89,16 @@ Index: jellyfin-ffmpeg/libavfilter/vf_hwmap.c
+ }
+
+#if HAVE_OPENCL_D3D11
+ D3D11_TEXTURE2D_DESC texDesc = { .BindFlags = D3D11_BIND_DECODER, };
+ if (frames->format == AV_PIX_FMT_D3D11)
+ frames->user_opaque = &texDesc;
+#endif
err = av_hwframe_ctx_init(ctx->hwframes_ref);
if (err < 0) {
Index: jellyfin-ffmpeg/libavfilter/vf_hwupload.c
Index: FFmpeg/libavfilter/vf_hwupload.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_hwupload.c
+++ jellyfin-ffmpeg/libavfilter/vf_hwupload.c
--- FFmpeg.orig/libavfilter/vf_hwupload.c
+++ FFmpeg/libavfilter/vf_hwupload.c
@@ -23,6 +23,10 @@
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
@ -100,12 +110,21 @@ Index: jellyfin-ffmpeg/libavfilter/vf_hwupload.c
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
@@ -151,6 +155,12 @@ static int hwupload_config_output(AVFilt
@@ -110,6 +114,9 @@ static int hwupload_config_output(AVFilt
AVFilterLink *inlink = avctx->inputs[0];
HWUploadContext *ctx = avctx->priv;
int err;
+#if HAVE_OPENCL_D3D11
+ D3D11_TEXTURE2D_DESC texDesc = { .BindFlags = D3D11_BIND_DECODER, };
+#endif
av_buffer_unref(&ctx->hwframes_ref);
@@ -151,6 +158,11 @@ static int hwupload_config_output(AVFilt
if (avctx->extra_hw_frames >= 0)
ctx->hwframes->initial_pool_size = 2 + avctx->extra_hw_frames;
+#if HAVE_OPENCL_D3D11
+ D3D11_TEXTURE2D_DESC texDesc = { .BindFlags = D3D11_BIND_DECODER, };
+ if (ctx->hwframes->format == AV_PIX_FMT_D3D11)
+ ctx->hwframes->user_opaque = &texDesc;
+#endif
@ -113,11 +132,11 @@ Index: jellyfin-ffmpeg/libavfilter/vf_hwupload.c
err = av_hwframe_ctx_init(ctx->hwframes_ref);
if (err < 0)
goto fail;
Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
Index: FFmpeg/libavutil/hwcontext_d3d11va.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
@@ -213,7 +213,7 @@ static AVBufferRef *d3d11va_alloc_single
--- FFmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ FFmpeg/libavutil/hwcontext_d3d11va.c
@@ -227,7 +227,7 @@ static AVBufferRef *d3d11va_alloc_single
.ArraySize = 1,
.Usage = D3D11_USAGE_DEFAULT,
.BindFlags = hwctx->BindFlags,
@ -126,7 +145,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
};
hr = ID3D11Device_CreateTexture2D(device_hwctx->device, &texDesc, NULL, &tex);
@@ -277,9 +277,17 @@ static int d3d11va_frames_init(AVHWFrame
@@ -291,9 +291,17 @@ static int d3d11va_frames_init(AVHWFrame
.ArraySize = ctx->initial_pool_size,
.Usage = D3D11_USAGE_DEFAULT,
.BindFlags = hwctx->BindFlags,

View File

@ -0,0 +1,184 @@
Index: FFmpeg/libavutil/hwcontext.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext.c
+++ FFmpeg/libavutil/hwcontext.c
@@ -84,21 +84,6 @@ static const char *const hw_type_names[]
[AV_HWDEVICE_TYPE_VULKAN] = "vulkan",
};
-typedef struct FFHWDeviceContext {
- /**
- * The public AVHWDeviceContext. See hwcontext.h for it.
- */
- AVHWDeviceContext p;
-
- const HWContextType *hw_type;
-
- /**
- * For a derived device, a reference to the original device
- * context it was derived from.
- */
- AVBufferRef *source_device;
-} FFHWDeviceContext;
-
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
{
int type;
@@ -143,6 +128,7 @@ static void hwdevice_ctx_free(void *opaq
{
FFHWDeviceContext *ctxi = (FFHWDeviceContext*)data;
AVHWDeviceContext *ctx = &ctxi->p;
+ int i;
/* uninit might still want access the hw context and the user
* free() callback might destroy it, so uninit has to be called first */
@@ -153,6 +139,8 @@ static void hwdevice_ctx_free(void *opaq
ctx->free(ctx);
av_buffer_unref(&ctxi->source_device);
+ for (i = 0; i < AV_HWDEVICE_TYPE_NB; i++)
+ av_buffer_unref(&ctxi->derived_devices[i]);
av_freep(&ctx->hwctx);
av_freep(&ctx);
@@ -633,6 +621,28 @@ fail:
return ret;
}
+static AVBufferRef* find_derived_hwdevice_ctx(AVBufferRef *src_ref, enum AVHWDeviceType type)
+{
+ AVBufferRef *tmp_ref;
+ FFHWDeviceContext *src_ctxi;
+ AVHWDeviceContext *src_ctx;
+ int i;
+
+ src_ctxi = (FFHWDeviceContext *)src_ref->data;
+ src_ctx = &src_ctxi->p;
+ if (src_ctx->type == type)
+ return src_ref;
+
+ for (i = 0; i < AV_HWDEVICE_TYPE_NB; i++)
+ if (src_ctxi->derived_devices[i]) {
+ tmp_ref = find_derived_hwdevice_ctx(src_ctxi->derived_devices[i], type);
+ if (tmp_ref)
+ return tmp_ref;
+ }
+
+ return NULL;
+}
+
int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ref_ptr,
enum AVHWDeviceType type,
AVBufferRef *src_ref,
@@ -656,6 +666,16 @@ int av_hwdevice_ctx_create_derived_opts(
tmp_ref = tmp_ctx->source_device;
}
+ tmp_ref = find_derived_hwdevice_ctx(src_ref, type);
+ if (tmp_ref) {
+ dst_ref = av_buffer_ref(tmp_ref);
+ if (!dst_ref) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ goto done;
+ }
+
dst_ref = av_hwdevice_ctx_alloc(type);
if (!dst_ref) {
ret = AVERROR(ENOMEM);
@@ -676,6 +696,11 @@ int av_hwdevice_ctx_create_derived_opts(
ret = AVERROR(ENOMEM);
goto fail;
}
+ tmp_ctx->derived_devices[type] = av_buffer_ref(dst_ref);
+ if (!tmp_ctx->derived_devices[type]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
ret = av_hwdevice_ctx_init(dst_ref);
if (ret < 0)
goto fail;
Index: FFmpeg/libavutil/hwcontext.h
===================================================================
--- FFmpeg.orig/libavutil/hwcontext.h
+++ FFmpeg/libavutil/hwcontext.h
@@ -38,6 +38,7 @@ enum AVHWDeviceType {
AV_HWDEVICE_TYPE_MEDIACODEC,
AV_HWDEVICE_TYPE_VULKAN,
AV_HWDEVICE_TYPE_D3D12VA,
+ AV_HWDEVICE_TYPE_NB, ///< number of hw device types, not part of API/ABI.
};
/**
Index: FFmpeg/libavutil/hwcontext_internal.h
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_internal.h
+++ FFmpeg/libavutil/hwcontext_internal.h
@@ -164,4 +164,25 @@ extern const HWContextType ff_hwcontext_
extern const HWContextType ff_hwcontext_type_mediacodec;
extern const HWContextType ff_hwcontext_type_vulkan;
+typedef struct FFHWDeviceContext {
+ /**
+ * The public AVHWDeviceContext. See hwcontext.h for it.
+ */
+ AVHWDeviceContext p;
+
+ const HWContextType *hw_type;
+
+ /**
+ * For a derived device, a reference to the original device
+ * context it was derived from.
+ */
+ AVBufferRef *source_device;
+
+ /**
+ * An array of reference to device contexts which
+ * were derived from this device.
+ */
+ AVBufferRef *derived_devices[AV_HWDEVICE_TYPE_NB];
+} FFHWDeviceContext;
+
#endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
Index: FFmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_qsv.c
+++ FFmpeg/libavutil/hwcontext_qsv.c
@@ -369,7 +369,7 @@ static void qsv_frames_uninit(AVHWFrames
av_buffer_unref(&s->child_frames_ref);
}
-static void qsv_pool_release_dummy(void *opaque, uint8_t *data)
+static void qsv_release_dummy(void *opaque, uint8_t *data)
{
}
@@ -382,7 +382,7 @@ static AVBufferRef *qsv_pool_alloc(void
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
return av_buffer_create((uint8_t*)(s->surfaces_internal + s->nb_surfaces_used - 1),
- sizeof(*hwctx->surfaces), qsv_pool_release_dummy, NULL, 0);
+ sizeof(*hwctx->surfaces), qsv_release_dummy, NULL, 0);
}
return NULL;
@@ -2272,8 +2272,17 @@ static int qsv_device_create(AVHWDeviceC
child_device = (AVHWDeviceContext*)priv->child_device_ctx->data;
impl = choose_implementation(device, child_device_type);
+ ret = qsv_device_derive_from_child(ctx, impl, child_device, 0);
+ if (ret >= 0) {
+ FFHWDeviceContext *fctx = (FFHWDeviceContext*)ctx;
+ FFHWDeviceContext *fchild_device = (FFHWDeviceContext*)child_device;
+ fctx->source_device = av_buffer_ref(priv->child_device_ctx);
+ fchild_device->derived_devices[ctx->type] = av_buffer_create((uint8_t*)fctx, sizeof(*fctx), qsv_release_dummy, fctx, 0);
+ if (!fchild_device->derived_devices[ctx->type])
+ return AVERROR(ENOMEM);
+ }
- return qsv_device_derive_from_child(ctx, impl, child_device, 0);
+ return ret;
}
const HWContextType ff_hwcontext_type_qsv = {

View File

@ -1,45 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -2128,7 +2128,6 @@ static int qsv_device_create(AVHWDeviceC
}
} else if (CONFIG_VAAPI) {
child_device_type = AV_HWDEVICE_TYPE_VAAPI;
-#if QSV_ONEVPL
} else if (CONFIG_D3D11VA) { // Use D3D11 by default if d3d11va is enabled
av_log(ctx, AV_LOG_VERBOSE,
"Defaulting child_device_type to AV_HWDEVICE_TYPE_D3D11VA for oneVPL."
@@ -2137,16 +2136,6 @@ static int qsv_device_create(AVHWDeviceC
child_device_type = AV_HWDEVICE_TYPE_D3D11VA;
} else if (CONFIG_DXVA2) {
child_device_type = AV_HWDEVICE_TYPE_DXVA2;
-#else
- } else if (CONFIG_DXVA2) {
- av_log(NULL, AV_LOG_WARNING,
- "WARNING: defaulting child_device_type to AV_HWDEVICE_TYPE_DXVA2 for compatibility "
- "with old commandlines. This behaviour will be removed "
- "in the future. Please explicitly set device type via \"-init_hw_device\" option.\n");
- child_device_type = AV_HWDEVICE_TYPE_DXVA2;
- } else if (CONFIG_D3D11VA) {
- child_device_type = AV_HWDEVICE_TYPE_D3D11VA;
-#endif
} else {
av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
return AVERROR(ENOSYS);
@@ -2202,7 +2191,14 @@ static int qsv_device_create(AVHWDeviceC
impl = choose_implementation(device, child_device_type);
- return qsv_device_derive_from_child(ctx, impl, child_device, 0);
+ ret = qsv_device_derive_from_child(ctx, impl, child_device, 0);
+ if (ret == 0) {
+ ctx->internal->source_device = av_buffer_ref(priv->child_device_ctx);
+ if (!ctx->internal->source_device)
+ ret = AVERROR(ENOMEM);
+ }
+
+ return ret;
}
const HWContextType ff_hwcontext_type_qsv = {

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
Index: FFmpeg/libavutil/hwcontext_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_opencl.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
--- FFmpeg.orig/libavutil/hwcontext_opencl.c
+++ FFmpeg/libavutil/hwcontext_opencl.c
@@ -62,6 +62,9 @@
#endif
@ -12,15 +12,15 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
#include <CL/cl_d3d11.h>
#include "hwcontext_d3d11va.h"
@@ -129,6 +132,7 @@ typedef struct OpenCLDeviceContext {
@@ -139,6 +142,7 @@ typedef struct OpenCLDeviceContext {
#if HAVE_OPENCL_D3D11
int d3d11_mapping_usable;
+ int d3d11_qsv_mapping_usable;
int d3d11_map_amd;
int d3d11_map_intel;
@@ -891,6 +895,11 @@ static int opencl_device_init(AVHWDevice
clCreateFromD3D11Texture2DKHR_fn
@@ -906,6 +910,11 @@ static int opencl_device_init(AVHWDevice
priv->d3d11_mapping_usable = 0;
} else {
priv->d3d11_mapping_usable = 1;
@ -32,7 +32,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
}
}
#endif
@@ -1764,18 +1773,20 @@ static void opencl_frames_uninit(AVHWFra
@@ -1785,18 +1794,20 @@ static void opencl_frames_uninit(AVHWFra
#if HAVE_OPENCL_DXVA2 || HAVE_OPENCL_D3D11
int i, p;
@ -62,7 +62,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
#endif
if (priv->command_queue) {
@@ -2552,6 +2563,226 @@ fail:
@@ -2572,6 +2583,233 @@ fail:
#if HAVE_OPENCL_D3D11
@ -72,8 +72,8 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ HWMapDescriptor *hwmap)
+{
+ AVOpenCLFrameDescriptor *desc = hwmap->priv;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
+ OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
+ OpenCLFramesContext *frames_priv = dst_fc->hwctx;
+ cl_event event;
+ cl_int cle;
+ int p;
@ -106,9 +106,9 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+static int opencl_map_from_d3d11_qsv(AVHWFramesContext *dst_fc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
+ OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
+ OpenCLFramesContext *frames_priv = dst_fc->hwctx;
+ AVOpenCLDeviceContext *dst_dev = &device_priv->p;
+ mfxFrameSurface1 *mfx_surface = (mfxFrameSurface1*)src->data[3];
+ mfxHDLPair *pair = (mfxHDLPair*)mfx_surface->Data.MemId;
+ ID3D11Texture2D *tex = (ID3D11Texture2D*)pair->first;
@ -116,24 +116,26 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ cl_mem_flags cl_flags;
+ cl_event event;
+ cl_int cle;
+ int err, p, index, decoder_target;
+ int err, p, index, derived_frames;
+
+ cl_flags = opencl_mem_flags_for_mapping(flags);
+ if (!cl_flags)
+ return AVERROR(EINVAL);
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Map QSV surface %#x to OpenCL.\n", pair);
+ av_log(dst_fc, AV_LOG_DEBUG, "Map QSV surface %#llx to OpenCL.\n", (uintptr_t)pair);
+
+ index = (intptr_t)pair->second;
+ decoder_target = index >= 0 && index != MFX_INFINITE;
+
+ if (decoder_target && index >= frames_priv->nb_mapped_frames) {
+ av_log(dst_fc, AV_LOG_ERROR, "Texture array index out of range for "
+ "mapping: %d >= %d.\n", index, frames_priv->nb_mapped_frames);
+ return AVERROR(EINVAL);
+ derived_frames = frames_priv->nb_mapped_frames > 0;
+ if (derived_frames) {
+ av_assert0(index >= 0 && index != MFX_INFINITE);
+ if (index >= frames_priv->nb_mapped_frames) {
+ av_log(dst_fc, AV_LOG_ERROR, "Texture array index out of range for "
+ "mapping: %d >= %d.\n", index, frames_priv->nb_mapped_frames);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (decoder_target) {
+ if (derived_frames) {
+ desc = &frames_priv->mapped_frames[index];
+ } else {
+ desc = av_mallocz(sizeof(*desc));
@ -190,7 +192,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ if (cle == CL_SUCCESS)
+ opencl_wait_events(dst_fc, &event, 1);
+fail2:
+ if (!decoder_target) {
+ if (!derived_frames) {
+ for (p = 0; p < desc->nb_planes; p++) {
+ if (desc->planes[p])
+ clReleaseMemObject(desc->planes[p]);
@ -204,16 +206,15 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+static int opencl_frames_derive_from_d3d11_qsv(AVHWFramesContext *dst_fc,
+ AVHWFramesContext *src_fc, int flags)
+{
+ AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+ AVQSVFramesContext *src_hwctx = src_fc->hwctx;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
+ OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
+ OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
+ AVOpenCLDeviceContext *dst_dev = &device_priv->p;
+ OpenCLFramesContext *frames_priv = dst_fc->hwctx;
+ cl_mem_flags cl_flags;
+ cl_int cle;
+ int err, i, p, nb_planes = 2;
+
+ mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
+ ID3D11Texture2D *tex = (ID3D11Texture2D*)pair->first;
+ mfxHDLPair *pair = NULL;
+ ID3D11Texture2D *tex = NULL;
+
+ if (src_fc->sw_format != AV_PIX_FMT_NV12 &&
+ src_fc->sw_format != AV_PIX_FMT_P010) {
@ -223,19 +224,25 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ }
+
+ if (src_fc->initial_pool_size == 0) {
+ av_log(dst_fc, AV_LOG_ERROR, "Only fixed-size pools are supported "
+ "for QSV with D3D11 to OpenCL mapping.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!(src_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET) ||
+ (src_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
+ (src_hwctx->frame_type & MFX_MEMTYPE_FROM_VPPOUT)) {
+ av_log(dst_fc, AV_LOG_DEBUG, "Non-DECODER_TARGET direct input for QSV "
+ av_log(dst_fc, AV_LOG_DEBUG, "Non fixed-size pools input for QSV "
+ "with D3D11 to OpenCL mapping.\n");
+ return 0;
+ }
+
+ if ((src_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
+ (src_hwctx->frame_type & MFX_MEMTYPE_FROM_VPPOUT)) {
+ av_log(dst_fc, AV_LOG_DEBUG, "MFX memtype VPP input for QSV "
+ "with D3D11 to OpenCL mapping.\n");
+ return 0;
+ }
+
+ if (!src_hwctx->surfaces)
+ return AVERROR(ENOMEM);
+ pair = (mfxHDLPair*)src_hwctx->surfaces[0].Data.MemId;
+ if (!pair)
+ return AVERROR(ENOMEM);
+ tex = (ID3D11Texture2D*)pair->first;
+
+ cl_flags = opencl_mem_flags_for_mapping(flags);
+ if (!cl_flags)
+ return AVERROR(EINVAL);
@ -289,7 +296,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
static void opencl_unmap_from_d3d11(AVHWFramesContext *dst_fc,
HWMapDescriptor *hwmap)
{
@@ -2994,6 +3225,11 @@ static int opencl_map_to(AVHWFramesConte
@@ -3096,6 +3334,11 @@ static int opencl_map_to(AVHWFramesConte
return opencl_map_from_dxva2(hwfc, dst, src, flags);
#endif
#if HAVE_OPENCL_D3D11
@ -301,7 +308,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
case AV_PIX_FMT_D3D11:
if (priv->d3d11_mapping_usable)
return opencl_map_from_d3d11(hwfc, dst, src, flags);
@@ -3044,6 +3280,18 @@ static int opencl_frames_derive_to(AVHWF
@@ -3150,6 +3393,18 @@ static int opencl_frames_derive_to(AVHWF
break;
#endif
#if HAVE_OPENCL_D3D11

View File

@ -0,0 +1,13 @@
Index: FFmpeg/libavutil/hwcontext_d3d11va.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ FFmpeg/libavutil/hwcontext_d3d11va.c
@@ -638,6 +638,8 @@ static int d3d11va_device_create(AVHWDev
adapter = atoi(device);
} else {
AVDictionaryEntry *e = av_dict_get(opts, "vendor_id", NULL, 0);
+ if (!e || !e->value)
+ e = av_dict_get(opts, "vendor", NULL, 0); // for backward compatibility
if (e && e->value) {
adapter = d3d11va_device_find_adapter_by_vendor_id(ctx, creationFlags, e->value);
if (adapter < 0) {

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/configure
Index: FFmpeg/configure
===================================================================
--- jellyfin-ffmpeg.orig/configure
+++ jellyfin-ffmpeg/configure
@@ -3680,6 +3680,7 @@ gblur_vulkan_filter_deps="vulkan spirv_c
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3840,6 +3840,7 @@ gblur_vulkan_filter_deps="vulkan spirv_c
hflip_vulkan_filter_deps="vulkan spirv_compiler"
histeq_filter_deps="gpl"
hqdn3d_filter_deps="gpl"
@ -10,11 +10,11 @@ Index: jellyfin-ffmpeg/configure
iccdetect_filter_deps="lcms2"
iccgen_filter_deps="lcms2"
interlace_filter_deps="gpl"
Index: jellyfin-ffmpeg/libavfilter/Makefile
Index: FFmpeg/libavfilter/Makefile
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/Makefile
+++ jellyfin-ffmpeg/libavfilter/Makefile
@@ -336,6 +336,7 @@ OBJS-$(CONFIG_HUESATURATION_FILTER)
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -346,6 +346,7 @@ OBJS-$(CONFIG_HUESATURATION_FILTER)
OBJS-$(CONFIG_HWDOWNLOAD_FILTER) += vf_hwdownload.o
OBJS-$(CONFIG_HWMAP_FILTER) += vf_hwmap.o
OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER) += vf_hwupload_cuda.o
@ -22,11 +22,11 @@ Index: jellyfin-ffmpeg/libavfilter/Makefile
OBJS-$(CONFIG_HWUPLOAD_FILTER) += vf_hwupload.o
OBJS-$(CONFIG_HYSTERESIS_FILTER) += vf_hysteresis.o framesync.o
OBJS-$(CONFIG_ICCDETECT_FILTER) += vf_iccdetect.o fflcms2.o
Index: jellyfin-ffmpeg/libavfilter/allfilters.c
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/allfilters.c
+++ jellyfin-ffmpeg/libavfilter/allfilters.c
@@ -315,6 +315,7 @@ extern const AVFilter ff_vf_hwdownload;
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -324,6 +324,7 @@ extern const AVFilter ff_vf_hwdownload;
extern const AVFilter ff_vf_hwmap;
extern const AVFilter ff_vf_hwupload;
extern const AVFilter ff_vf_hwupload_cuda;
@ -34,10 +34,10 @@ Index: jellyfin-ffmpeg/libavfilter/allfilters.c
extern const AVFilter ff_vf_hysteresis;
extern const AVFilter ff_vf_iccdetect;
extern const AVFilter ff_vf_iccgen;
Index: jellyfin-ffmpeg/libavfilter/vf_hwupload_vaapi.c
Index: FFmpeg/libavfilter/vf_hwupload_vaapi.c
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/vf_hwupload_vaapi.c
+++ FFmpeg/libavfilter/vf_hwupload_vaapi.c
@@ -0,0 +1,193 @@
+/*
+ * This file is part of FFmpeg.

View File

@ -1,97 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
@@ -89,6 +89,13 @@ static const struct {
{ DXGI_FORMAT_B8G8R8A8_UNORM, AV_PIX_FMT_BGRA },
{ DXGI_FORMAT_R10G10B10A2_UNORM, AV_PIX_FMT_X2BGR10 },
{ DXGI_FORMAT_R16G16B16A16_FLOAT, AV_PIX_FMT_RGBAF16 },
+ { DXGI_FORMAT_AYUV, AV_PIX_FMT_VUYX },
+ { DXGI_FORMAT_YUY2, AV_PIX_FMT_YUYV422 },
+ { DXGI_FORMAT_Y210, AV_PIX_FMT_Y210 },
+ { DXGI_FORMAT_Y410, AV_PIX_FMT_XV30 },
+ { DXGI_FORMAT_P016, AV_PIX_FMT_P012 },
+ { DXGI_FORMAT_Y216, AV_PIX_FMT_Y212 },
+ { DXGI_FORMAT_Y416, AV_PIX_FMT_XV36 },
// Special opaque formats. The pix_fmt is merely a place holder, as the
// opaque format cannot be accessed directly.
{ DXGI_FORMAT_420_OPAQUE, AV_PIX_FMT_YUV420P },
@@ -559,9 +566,12 @@ static int d3d11va_device_create(AVHWDev
AVD3D11VADeviceContext *device_hwctx = ctx->hwctx;
HRESULT hr;
+ AVDictionaryEntry *e;
IDXGIAdapter *pAdapter = NULL;
ID3D10Multithread *pMultithread;
UINT creationFlags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
+ int adapter = -1;
+ long int vendor_id = -1;
int is_debug = !!av_dict_get(opts, "debug", NULL, 0);
int ret;
@@ -581,13 +591,45 @@ static int d3d11va_device_create(AVHWDev
return AVERROR_UNKNOWN;
}
+ e = av_dict_get(opts, "vendor", NULL, 0);
+ if (e) {
+ vendor_id = strtol(e->value, NULL, 0);
+ }
+
if (device) {
+ adapter = atoi(device);
+ }
+
+ if (adapter >= 0 || vendor_id != -1) {
IDXGIFactory2 *pDXGIFactory;
hr = mCreateDXGIFactory(&IID_IDXGIFactory2, (void **)&pDXGIFactory);
if (SUCCEEDED(hr)) {
- int adapter = atoi(device);
- if (FAILED(IDXGIFactory2_EnumAdapters(pDXGIFactory, adapter, &pAdapter)))
+ if (adapter < 0) {
+ int adapter_cnt = 0;
+ while (IDXGIFactory2_EnumAdapters(pDXGIFactory, adapter_cnt++, &pAdapter) != DXGI_ERROR_NOT_FOUND) {
+ DXGI_ADAPTER_DESC adapter_desc;
+ hr = IDXGIAdapter2_GetDesc(pAdapter, &adapter_desc);
+ if (FAILED(hr)) {
+ av_log(ctx, AV_LOG_ERROR, "IDXGIAdapter2_GetDesc returned error with adapter id %d\n", adapter_cnt);
+ continue;
+ }
+
+ if (adapter_desc.VendorId == vendor_id) {
+ break;
+ }
+
+ if (adapter)
+ IDXGIAdapter_Release(pAdapter);
+ }
+ if (adapter_cnt < 0) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to find d3d11va adapter by vendor id %ld\n", vendor_id);
+ IDXGIFactory2_Release(pDXGIFactory);
+ return AVERROR_UNKNOWN;
+ }
+ } else {
+ if (FAILED(IDXGIFactory2_EnumAdapters(pDXGIFactory, adapter, &pAdapter)))
pAdapter = NULL;
+ }
IDXGIFactory2_Release(pDXGIFactory);
}
}
Index: jellyfin-ffmpeg/libavutil/hwcontext_dxva2.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_dxva2.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_dxva2.c
@@ -82,6 +82,13 @@ static const struct {
} supported_formats[] = {
{ MKTAG('N', 'V', '1', '2'), AV_PIX_FMT_NV12 },
{ MKTAG('P', '0', '1', '0'), AV_PIX_FMT_P010 },
+ { MKTAG('A', 'Y', 'U', 'V'), AV_PIX_FMT_VUYX },
+ { MKTAG('Y', 'U', 'Y', '2'), AV_PIX_FMT_YUYV422 },
+ { MKTAG('Y', '2', '1', '0'), AV_PIX_FMT_Y210 },
+ { MKTAG('Y', '4', '1', '0'), AV_PIX_FMT_XV30 },
+ { MKTAG('P', '0', '1', '6'), AV_PIX_FMT_P012 },
+ { MKTAG('Y', '2', '1', '6'), AV_PIX_FMT_Y212 },
+ { MKTAG('Y', '4', '1', '6'), AV_PIX_FMT_XV36 },
{ D3DFMT_P8, AV_PIX_FMT_PAL8 },
{ D3DFMT_A8R8G8B8, AV_PIX_FMT_BGRA },
};

View File

@ -0,0 +1,17 @@
Index: FFmpeg/libavfilter/vf_overlay_vaapi.c
===================================================================
--- FFmpeg.orig/libavfilter/vf_overlay_vaapi.c
+++ FFmpeg/libavfilter/vf_overlay_vaapi.c
@@ -311,8 +311,12 @@ static int overlay_vaapi_config_input_ov
ctx->blend_alpha = ctx->alpha;
}
+ // VA_BLEND_PREMULTIPLIED_ALPHA may cause issues in
+ // per-pixel alpha case, disable it to align with MSDK.
+#if 0
if (have_alpha_planar(inlink))
ctx->blend_flags |= VA_BLEND_PREMULTIPLIED_ALPHA;
+#endif
return 0;
}

View File

@ -1,22 +1,20 @@
Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
Index: FFmpeg/libavfilter/vf_tonemap_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_tonemap_vaapi.c
+++ jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
@@ -39,7 +39,13 @@ typedef struct HDRVAAPIContext {
--- FFmpeg.orig/libavfilter/vf_tonemap_vaapi.c
+++ FFmpeg/libavfilter/vf_tonemap_vaapi.c
@@ -39,7 +39,11 @@ typedef struct HDRVAAPIContext {
enum AVColorTransferCharacteristic color_transfer;
enum AVColorSpace color_matrix;
+ char *in_master_display;
+ char *in_content_light;
+ char *out_master_display;
+ char *out_content_light;
+ char *mastering_display;
+ char *content_light;
+
VAHdrMetaDataHDR10 in_metadata;
+ VAHdrMetaDataHDR10 out_metadata;
AVFrameSideData *src_display;
AVFrameSideData *src_light;
@@ -52,7 +58,7 @@ static int tonemap_vaapi_save_metadata(A
@@ -52,7 +56,7 @@ static int tonemap_vaapi_save_metadata(A
AVContentLightMetadata *light_meta;
if (input_frame->color_trc != AVCOL_TRC_SMPTE2084) {
@ -25,7 +23,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
}
ctx->src_display = av_frame_get_side_data(input_frame,
@@ -60,8 +66,7 @@ static int tonemap_vaapi_save_metadata(A
@@ -60,8 +64,7 @@ static int tonemap_vaapi_save_metadata(A
if (ctx->src_display) {
hdr_meta = (AVMasteringDisplayMetadata *)ctx->src_display->data;
if (!hdr_meta) {
@ -35,7 +33,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
}
if (hdr_meta->has_luminance) {
@@ -118,8 +123,7 @@ static int tonemap_vaapi_save_metadata(A
@@ -118,8 +121,7 @@ static int tonemap_vaapi_save_metadata(A
ctx->in_metadata.white_point_y);
}
} else {
@ -45,7 +43,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
}
ctx->src_light = av_frame_get_side_data(input_frame,
@@ -127,8 +131,7 @@ static int tonemap_vaapi_save_metadata(A
@@ -127,8 +129,7 @@ static int tonemap_vaapi_save_metadata(A
if (ctx->src_light) {
light_meta = (AVContentLightMetadata *)ctx->src_light->data;
if (!light_meta) {
@ -55,7 +53,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
}
ctx->in_metadata.max_content_light_level = light_meta->MaxCLL;
@@ -146,6 +149,107 @@ static int tonemap_vaapi_save_metadata(A
@@ -146,6 +147,87 @@ static int tonemap_vaapi_save_metadata(A
return 0;
}
@ -66,25 +64,16 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
+ AVMasteringDisplayMetadata *hdr_meta;
+ AVFrameSideData *metadata_lt;
+ AVContentLightMetadata *hdr_meta_lt;
+
+ int i;
+ const int mapping[3] = {1, 2, 0}; //green, blue, red
+ const int chroma_den = 50000;
+ const int luma_den = 10000;
+
+ metadata = av_frame_get_side_data(output_frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+ if (metadata) {
+ av_frame_remove_side_data(output_frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+ metadata = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ sizeof(AVMasteringDisplayMetadata));
+ } else {
+ metadata = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ sizeof(AVMasteringDisplayMetadata));
+ }
+ metadata = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+ sizeof(AVMasteringDisplayMetadata));
+ if (!metadata)
+ return AVERROR(ENOMEM);
+
+ hdr_meta = (AVMasteringDisplayMetadata *)metadata->data;
+
@ -112,14 +101,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
+ hdr_meta->has_luminance = 1;
+
+ av_log(avctx, AV_LOG_DEBUG,
+ "Mastering Display Metadata(out luminance):\n");
+ av_log(avctx, AV_LOG_DEBUG,
+ "min_luminance=%u, max_luminance=%u\n",
+ ctx->out_metadata.min_display_mastering_luminance,
+ ctx->out_metadata.max_display_mastering_luminance);
+
+ av_log(avctx, AV_LOG_DEBUG,
+ "Mastering Display Metadata(out primaries):\n");
+ "Mastering display colour volume(out):\n");
+ av_log(avctx, AV_LOG_DEBUG,
+ "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",
+ ctx->out_metadata.display_primaries_x[0],
@ -130,20 +112,16 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
+ ctx->out_metadata.display_primaries_y[2],
+ ctx->out_metadata.white_point_x,
+ ctx->out_metadata.white_point_y);
+ av_log(avctx, AV_LOG_DEBUG,
+ "max_display_mastering_luminance=%u, min_display_mastering_luminance=%u\n",
+ ctx->out_metadata.max_display_mastering_luminance,
+ ctx->out_metadata.min_display_mastering_luminance);
+
+ metadata_lt = av_frame_get_side_data(output_frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ if (metadata_lt) {
+ av_frame_remove_side_data(output_frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ metadata_lt = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+ sizeof(AVContentLightMetadata));
+ } else {
+ metadata_lt = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+ sizeof(AVContentLightMetadata));
+ }
+ metadata_lt = av_frame_new_side_data(output_frame,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+ sizeof(AVContentLightMetadata));
+ if (!metadata_lt)
+ return AVERROR(ENOMEM);
+
+ hdr_meta_lt = (AVContentLightMetadata *)metadata_lt->data;
+
@ -151,7 +129,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
+ hdr_meta_lt->MaxFALL = FFMIN(ctx->out_metadata.max_pic_average_light_level, 65535);
+
+ av_log(avctx, AV_LOG_DEBUG,
+ "Mastering Content Light Level (out):\n");
+ "Content light level information(out):\n");
+ av_log(avctx, AV_LOG_DEBUG,
+ "MaxCLL(%u) MaxFALL(%u)\n",
+ ctx->out_metadata.max_content_light_level,
@ -163,7 +141,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx, AVFrame *input_frame)
{
VAAPIVPPContext *vpp_ctx = avctx->priv;
@@ -208,15 +312,26 @@ static int tonemap_vaapi_build_filter_pa
@@ -208,15 +290,26 @@ static int tonemap_vaapi_build_filter_pa
return AVERROR(EINVAL);
}
@ -176,7 +154,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
- av_log(avctx, AV_LOG_ERROR,
- "VAAPI driver doesn't support HDR to SDR\n");
- return AVERROR(EINVAL);
+ if (ctx->color_transfer == AVCOL_TRC_SMPTE2084) {
+ if (ctx->mastering_display) {
+ for (i = 0; i < num_query_caps; i++) {
+ if (VA_TONE_MAPPING_HDR_TO_HDR & hdr_cap[i].caps_flag)
+ break;
@ -199,7 +177,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
}
hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping;
@@ -241,6 +356,8 @@ static int tonemap_vaapi_filter_frame(AV
@@ -241,6 +334,8 @@ static int tonemap_vaapi_filter_frame(AV
VAProcPipelineParameterBuffer params;
int err;
@ -208,116 +186,126 @@ Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_vaapi.c
av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
av_get_pix_fmt_name(input_frame->format),
input_frame->width, input_frame->height, input_frame->pts);
@@ -250,9 +367,11 @@ static int tonemap_vaapi_filter_frame(AV
return AVERROR(EINVAL);
}
- err = tonemap_vaapi_save_metadata(avctx, input_frame);
- if (err < 0)
- goto fail;
+ if (!ctx->in_master_display && !ctx->in_content_light) {
+ err = tonemap_vaapi_save_metadata(avctx, input_frame);
+ if (err < 0)
+ goto fail;
+ }
err = tonemap_vaapi_set_filter_params(avctx, input_frame);
@@ -278,22 +373,43 @@ static int tonemap_vaapi_filter_frame(AV
if (err < 0)
@@ -289,6 +408,21 @@ static int tonemap_vaapi_filter_frame(AV
goto fail;
+ av_frame_remove_side_data(output_frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ av_frame_remove_side_data(output_frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+
+ if (!ctx->mastering_display) {
+ /* Use BT709 by default for HDR to SDR output frame */
+ output_frame->color_primaries = AVCOL_PRI_BT709;
+ output_frame->color_trc = AVCOL_TRC_BT709;
+ output_frame->colorspace = AVCOL_SPC_BT709;
+ }
+
if (ctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
output_frame->color_primaries = ctx->color_primaries;
if (ctx->color_transfer != AVCOL_TRC_UNSPECIFIED)
output_frame->color_trc = ctx->color_transfer;
- else
- output_frame->color_trc = AVCOL_TRC_BT709;
if (ctx->color_matrix != AVCOL_SPC_UNSPECIFIED)
output_frame->colorspace = ctx->color_matrix;
+ if (output_frame->color_trc == AVCOL_TRC_SMPTE2084) {
+ if (ctx->mastering_display) {
+ err = tonemap_vaapi_update_sidedata(avctx, output_frame);
+ if (err < 0)
+ goto fail;
+
+ out_hdr_metadata.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
+ out_hdr_metadata.metadata = &ctx->out_metadata;
+ out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10);
+
+ params.output_hdr_metadata = &out_hdr_metadata;
+ } else {
+ av_frame_remove_side_data(output_frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ av_frame_remove_side_data(output_frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+ }
+
err = ff_vaapi_vpp_init_params(avctx, &params,
input_frame, output_frame);
if (err < 0)
@@ -358,6 +492,60 @@ static av_cold int tonemap_vaapi_init(AV
goto fail;
+ if (ctx->mastering_display) {
+ out_hdr_metadata.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
+ out_hdr_metadata.metadata = &ctx->out_metadata;
+ out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10);
+ params.output_hdr_metadata = &out_hdr_metadata;
+ }
+
if (vpp_ctx->nb_filter_buffers) {
params.filters = &vpp_ctx->filter_buffers[0];
params.num_filters = vpp_ctx->nb_filter_buffers;
@@ -309,9 +425,6 @@ static int tonemap_vaapi_filter_frame(AV
av_get_pix_fmt_name(output_frame->format),
output_frame->width, output_frame->height, output_frame->pts);
- av_frame_remove_side_data(output_frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
- av_frame_remove_side_data(output_frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
-
return ff_filter_frame(outlink, output_frame);
fail:
@@ -332,8 +445,13 @@ static av_cold int tonemap_vaapi_init(AV
if (ctx->output_format_string) {
vpp_ctx->output_format = av_get_pix_fmt(ctx->output_format_string);
} else {
- vpp_ctx->output_format = AV_PIX_FMT_NV12;
- av_log(avctx, AV_LOG_WARNING, "Output format not set, use default format NV12\n");
+ if (ctx->mastering_display) {
+ vpp_ctx->output_format = AV_PIX_FMT_P010;
+ av_log(avctx, AV_LOG_VERBOSE, "Output format not set, use default format P010 for HDR to HDR tone mapping.\n");
+ } else {
+ vpp_ctx->output_format = AV_PIX_FMT_NV12;
+ av_log(avctx, AV_LOG_VERBOSE, "Output format not set, use default format NV12 for HDR to SDR tone mapping.\n");
+ }
}
#define STRING_OPTION(var_name, func_name, default_value) do { \
@@ -353,6 +471,37 @@ static av_cold int tonemap_vaapi_init(AV
STRING_OPTION(color_transfer, color_transfer, AVCOL_TRC_UNSPECIFIED);
STRING_OPTION(color_matrix, color_space, AVCOL_SPC_UNSPECIFIED);
+#define READ_DISPLAY_OPTION(in_or_out) do { \
+ if (10 != sscanf(ctx->in_or_out ## _master_display, \
+ "G(%hu|%hu)B(%hu|%hu)R(%hu|%hu)WP(%hu|%hu)L(%u|%u)", \
+ &ctx->in_or_out ## _metadata.display_primaries_x[0], \
+ &ctx->in_or_out ## _metadata.display_primaries_y[0], \
+ &ctx->in_or_out ## _metadata.display_primaries_x[1], \
+ &ctx->in_or_out ## _metadata.display_primaries_y[1], \
+ &ctx->in_or_out ## _metadata.display_primaries_x[2], \
+ &ctx->in_or_out ## _metadata.display_primaries_y[2], \
+ &ctx->in_or_out ## _metadata.white_point_x, \
+ &ctx->in_or_out ## _metadata.white_point_y, \
+ &ctx->in_or_out ## _metadata.min_display_mastering_luminance, \
+ &ctx->in_or_out ## _metadata.max_display_mastering_luminance)) { \
+ av_log(avctx, AV_LOG_ERROR, \
+ "Option " #in_or_out "-mastering-display input invalid\n"); \
+ return AVERROR(EINVAL); \
+ } \
+ } while (0)
+
+#define READ_LIGHT_OPTION(in_or_out) do { \
+ if (2 != sscanf(ctx->in_or_out ## _content_light, \
+ "CLL(%hu)FALL(%hu)", \
+ &ctx->in_or_out ## _metadata.max_content_light_level, \
+ &ctx->in_or_out ## _metadata.max_pic_average_light_level)) { \
+ av_log(avctx, AV_LOG_ERROR, \
+ "Option " #in_or_out "-content-light input invalid\n"); \
+ return AVERROR(EINVAL); \
+ } \
+ } while (0)
+
+ if (ctx->in_master_display) {
+ READ_DISPLAY_OPTION(in);
+ }
+
+ if (ctx->in_content_light) {
+ READ_LIGHT_OPTION(in);
+ }
+
+ if (ctx->color_transfer == AVCOL_TRC_SMPTE2084) {
+ if (!ctx->out_master_display) {
+ if (ctx->mastering_display) {
+ if (10 != sscanf(ctx->mastering_display,
+ "%hu %hu|%hu %hu|%hu %hu|%hu %hu|%u %u",
+ &ctx->out_metadata.display_primaries_x[0],
+ &ctx->out_metadata.display_primaries_y[0],
+ &ctx->out_metadata.display_primaries_x[1],
+ &ctx->out_metadata.display_primaries_y[1],
+ &ctx->out_metadata.display_primaries_x[2],
+ &ctx->out_metadata.display_primaries_y[2],
+ &ctx->out_metadata.white_point_x,
+ &ctx->out_metadata.white_point_y,
+ &ctx->out_metadata.min_display_mastering_luminance,
+ &ctx->out_metadata.max_display_mastering_luminance)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "H2H tone-mapping requires valid out-mastering-display metadata\n");
+ "Option mastering-display input invalid\n");
+ return AVERROR(EINVAL);
+ }
+ READ_DISPLAY_OPTION(out);
+
+ if (!ctx->out_content_light) {
+ if (!ctx->content_light) {
+ ctx->out_metadata.max_content_light_level = 0;
+ ctx->out_metadata.max_pic_average_light_level = 0;
+ } else if (2 != sscanf(ctx->content_light,
+ "%hu %hu",
+ &ctx->out_metadata.max_content_light_level,
+ &ctx->out_metadata.max_pic_average_light_level)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "H2H tone-mapping requires valid out-content-light metadata\n");
+ "Option content-light input invalid\n");
+ return AVERROR(EINVAL);
+ }
+ READ_LIGHT_OPTION(out);
+ }
+
return 0;
}
@@ -383,10 +571,13 @@ static const AVOption tonemap_vaapi_opti
@@ -378,6 +527,12 @@ static const AVOption tonemap_vaapi_opti
{ "t", "Output color transfer characteristics set",
OFFSET(color_transfer_string), AV_OPT_TYPE_STRING,
{ .str = NULL }, .flags = FLAGS, "transfer" },
+ { "indisplay", "Set input mastering display", OFFSET(in_master_display), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "inlight", "Set input content light", OFFSET(in_content_light), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "outdisplay", "Set output mastering display for H2H", OFFSET(out_master_display), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
+ { "outlight", "Set output content light for H2H", OFFSET(out_content_light), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
{ .str = NULL }, .flags = FLAGS, .unit = "transfer" },
+ { "display", "set mastering display colour volume",
+ OFFSET(mastering_display), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
+ { "light", "set content light level information",
+ OFFSET(content_light), AV_OPT_TYPE_STRING,
+ { .str = NULL }, .flags = FLAGS },
{ NULL }
};
-
AVFILTER_DEFINE_CLASS(tonemap_vaapi);
static const AVFilterPad tonemap_vaapi_inputs[] = {

View File

@ -1,28 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_vaapi.c
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_vaapi.c
@@ -236,8 +236,9 @@ static int overlay_vaapi_blend(FFFrameSy
blend_state.global_alpha = ctx->blend_alpha;
params[1].blend_state = &blend_state;
- params[1].surface = (VASurfaceID)(uintptr_t)input_overlay->data[3];
- params[1].output_region = &overlay_region;
+ params[1].surface = (VASurfaceID)(uintptr_t)input_overlay->data[3];
+ params[1].surface_region = NULL;
+ params[1].output_region = &overlay_region;
}
err = ff_vaapi_vpp_render_pictures(avctx, params, input_overlay ? 2 : 1, output);
@@ -312,8 +313,10 @@ static int overlay_vaapi_config_input_ov
ctx->blend_alpha = ctx->alpha;
}
+ // VA_BLEND_PREMULTIPLIED_ALPHA may cause issues in
+ // per-pixel alpha case, disable it to align with MSDK.
if (have_alpha_planar(inlink))
- ctx->blend_flags |= VA_BLEND_PREMULTIPLIED_ALPHA;
+ ctx->blend_flags |= 0;
return 0;
}

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavcodec/nvdec.c
Index: FFmpeg/libavcodec/nvdec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/nvdec.c
+++ jellyfin-ffmpeg/libavcodec/nvdec.c
@@ -306,8 +306,10 @@ static int nvdec_init_hwframes(AVCodecCo
--- FFmpeg.orig/libavcodec/nvdec.c
+++ FFmpeg/libavcodec/nvdec.c
@@ -299,8 +299,10 @@ static int nvdec_init_hwframes(AVCodecCo
frames_ctx = (AVHWFramesContext*)(*out_frames_ref)->data;
if (dummy) {

View File

@ -0,0 +1,607 @@
Index: FFmpeg/libavcodec/vaapi_av1.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_av1.c
+++ FFmpeg/libavcodec/vaapi_av1.c
@@ -19,6 +19,7 @@
*/
#include "libavutil/frame.h"
+#include "libavutil/mem.h"
#include "hwaccel_internal.h"
#include "vaapi_decode.h"
#include "internal.h"
@@ -42,6 +43,9 @@ typedef struct VAAPIAV1DecContext {
*/
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
AVFrame *tmp_frame;
+
+ int nb_slice_params;
+ VASliceParameterBufferAV1 *slice_params;
} VAAPIAV1DecContext;
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
@@ -97,6 +101,8 @@ static int vaapi_av1_decode_uninit(AVCod
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++)
av_frame_free(&ctx->ref_tab[i].frame);
+ av_freep(&ctx->slice_params);
+
return ff_vaapi_decode_uninit(avctx);
}
@@ -393,13 +399,25 @@ static int vaapi_av1_decode_slice(AVCode
{
const AV1DecContext *s = avctx->priv_data;
VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
- VASliceParameterBufferAV1 slice_param;
- int err = 0;
+ VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
+ int err, nb_params;
- for (int i = s->tg_start; i <= s->tg_end; i++) {
- memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
+ nb_params = s->tg_end - s->tg_start + 1;
+ if (ctx->nb_slice_params < nb_params) {
+ VASliceParameterBufferAV1 *tmp = av_realloc_array(ctx->slice_params,
+ nb_params,
+ sizeof(*ctx->slice_params));
+ if (!tmp) {
+ ctx->nb_slice_params = 0;
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ctx->slice_params = tmp;
+ ctx->nb_slice_params = nb_params;
+ }
- slice_param = (VASliceParameterBufferAV1) {
+ for (int i = s->tg_start; i <= s->tg_end; i++) {
+ ctx->slice_params[i - s->tg_start] = (VASliceParameterBufferAV1) {
.slice_data_size = s->tile_group_info[i].tile_size,
.slice_data_offset = s->tile_group_info[i].tile_offset,
.slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
@@ -408,18 +426,20 @@ static int vaapi_av1_decode_slice(AVCode
.tg_start = s->tg_start,
.tg_end = s->tg_end,
};
-
- err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
- sizeof(VASliceParameterBufferAV1),
- buffer,
- size);
- if (err) {
- ff_vaapi_decode_cancel(avctx, pic);
- return err;
- }
}
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, ctx->slice_params, nb_params,
+ sizeof(VASliceParameterBufferAV1),
+ buffer,
+ size);
+ if (err)
+ goto fail;
+
return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
}
const FFHWAccel ff_av1_vaapi_hwaccel = {
Index: FFmpeg/libavcodec/vaapi_decode.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_decode.c
+++ FFmpeg/libavcodec/vaapi_decode.c
@@ -62,6 +62,7 @@ int ff_vaapi_decode_make_param_buffer(AV
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
VAAPIDecodePicture *pic,
const void *params_data,
+ int nb_params,
size_t params_size,
const void *slice_data,
size_t slice_size)
@@ -72,13 +73,14 @@ int ff_vaapi_decode_make_slice_buffer(AV
av_assert0(pic->nb_slices <= pic->slices_allocated);
if (pic->nb_slices == pic->slices_allocated) {
- pic->slice_buffers =
+ VABufferID *tmp =
av_realloc_array(pic->slice_buffers,
pic->slices_allocated ? pic->slices_allocated * 2 : 64,
2 * sizeof(*pic->slice_buffers));
- if (!pic->slice_buffers)
+ if (!tmp)
return AVERROR(ENOMEM);
+ pic->slice_buffers = tmp;
pic->slices_allocated = pic->slices_allocated ? pic->slices_allocated * 2 : 64;
}
av_assert0(pic->nb_slices + 1 <= pic->slices_allocated);
@@ -87,7 +89,7 @@ int ff_vaapi_decode_make_slice_buffer(AV
vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
VASliceParameterBufferType,
- params_size, 1, (void*)params_data,
+ params_size, nb_params, (void*)params_data,
&pic->slice_buffers[index]);
if (vas != VA_STATUS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "Failed to create slice "
@@ -155,6 +157,11 @@ int ff_vaapi_decode_issue(AVCodecContext
VAStatus vas;
int err;
+ if (pic->nb_slices <= 0) {
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
av_log(avctx, AV_LOG_DEBUG, "Decode to surface %#x.\n",
pic->output_surface);
@@ -598,22 +605,26 @@ static int vaapi_decode_make_config(AVCo
if (err < 0)
goto fail;
- frames->initial_pool_size = 1;
- // Add per-codec number of surfaces used for storing reference frames.
- switch (avctx->codec_id) {
- case AV_CODEC_ID_H264:
- case AV_CODEC_ID_HEVC:
- case AV_CODEC_ID_AV1:
- frames->initial_pool_size += 16;
- break;
- case AV_CODEC_ID_VP9:
- frames->initial_pool_size += 8;
- break;
- case AV_CODEC_ID_VP8:
- frames->initial_pool_size += 3;
- break;
- default:
- frames->initial_pool_size += 2;
+ if (CONFIG_VAAPI_1)
+ frames->initial_pool_size = 0;
+ else {
+ frames->initial_pool_size = 1;
+ // Add per-codec number of surfaces used for storing reference frames.
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ case AV_CODEC_ID_HEVC:
+ case AV_CODEC_ID_AV1:
+ frames->initial_pool_size += 16;
+ break;
+ case AV_CODEC_ID_VP9:
+ frames->initial_pool_size += 8;
+ break;
+ case AV_CODEC_ID_VP8:
+ frames->initial_pool_size += 3;
+ break;
+ default:
+ frames->initial_pool_size += 2;
+ }
}
}
Index: FFmpeg/libavcodec/vaapi_decode.h
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_decode.h
+++ FFmpeg/libavcodec/vaapi_decode.h
@@ -73,6 +73,7 @@ int ff_vaapi_decode_make_param_buffer(AV
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
VAAPIDecodePicture *pic,
const void *params_data,
+ int nb_params,
size_t params_size,
const void *slice_data,
size_t slice_size);
Index: FFmpeg/libavcodec/vaapi_encode_av1.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_encode_av1.c
+++ FFmpeg/libavcodec/vaapi_encode_av1.c
@@ -23,6 +23,7 @@
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
+#include "libavutil/mastering_display_metadata.h"
#include "cbs_av1.h"
#include "put_bits.h"
@@ -41,6 +42,8 @@ typedef struct VAAPIEncodeAV1Context {
VAAPIEncodeContext common;
AV1RawOBU sh; /**< sequence header.*/
AV1RawOBU fh; /**< frame header.*/
+ AV1RawOBU mh[4]; /**< metadata header.*/
+ int nb_mh;
CodedBitstreamContext *cbc;
CodedBitstreamFragment current_obu;
VAConfigAttribValEncAV1 attr;
@@ -155,6 +158,8 @@ static av_cold int vaapi_encode_av1_conf
priv->q_idx_idr = priv->q_idx_p = priv->q_idx_b = 128;
}
+ ctx->roi_quant_range = AV1_MAX_QUANT;
+
return 0;
}
@@ -657,6 +662,68 @@ static int vaapi_encode_av1_init_picture
2 : 1));
}
+ priv->nb_mh = 0;
+
+ if (pic->type == PICTURE_TYPE_IDR) {
+ AVFrameSideData *sd =
+ av_frame_get_side_data(pic->input_image,
+ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+ if (sd) {
+ AVMasteringDisplayMetadata *mdm =
+ (AVMasteringDisplayMetadata *)sd->data;
+ if (mdm->has_primaries && mdm->has_luminance) {
+ AV1RawOBU *obu = &priv->mh[priv->nb_mh++];
+ AV1RawMetadata *md = &obu->obu.metadata;
+ AV1RawMetadataHDRMDCV *mdcv = &md->metadata.hdr_mdcv;
+ const int chroma_den = 1 << 16;
+ const int max_luma_den = 1 << 8;
+ const int min_luma_den = 1 << 14;
+
+ memset(obu, 0, sizeof(*obu));
+ obu->header.obu_type = AV1_OBU_METADATA;
+ md->metadata_type = AV1_METADATA_TYPE_HDR_MDCV;
+
+ for (i = 0; i < 3; i++) {
+ mdcv->primary_chromaticity_x[i] =
+ av_rescale(mdm->display_primaries[i][0].num, chroma_den,
+ mdm->display_primaries[i][0].den);
+ mdcv->primary_chromaticity_y[i] =
+ av_rescale(mdm->display_primaries[i][1].num, chroma_den,
+ mdm->display_primaries[i][1].den);
+ }
+
+ mdcv->white_point_chromaticity_x =
+ av_rescale(mdm->white_point[0].num, chroma_den,
+ mdm->white_point[0].den);
+ mdcv->white_point_chromaticity_y =
+ av_rescale(mdm->white_point[1].num, chroma_den,
+ mdm->white_point[1].den);
+
+ mdcv->luminance_max =
+ av_rescale(mdm->max_luminance.num, max_luma_den,
+ mdm->max_luminance.den);
+ mdcv->luminance_min =
+ av_rescale(mdm->min_luminance.num, min_luma_den,
+ mdm->min_luminance.den);
+ }
+ }
+
+ sd = av_frame_get_side_data(pic->input_image,
+ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ if (sd) {
+ AVContentLightMetadata *cllm = (AVContentLightMetadata *)sd->data;
+ AV1RawOBU *obu = &priv->mh[priv->nb_mh++];
+ AV1RawMetadata *md = &obu->obu.metadata;
+ AV1RawMetadataHDRCLL *cll = &md->metadata.hdr_cll;
+
+ memset(obu, 0, sizeof(*obu));
+ obu->header.obu_type = AV1_OBU_METADATA;
+ md->metadata_type = AV1_METADATA_TYPE_HDR_CLL;
+ cll->max_cll = cllm->MaxCLL;
+ cll->max_fall = cllm->MaxFALL;
+ }
+ }
+
end:
ff_cbs_fragment_reset(obu);
return ret;
@@ -733,6 +800,39 @@ end:
return ret;
}
+static int vaapi_encode_av1_write_extra_header(AVCodecContext *avctx,
+ VAAPIEncodePicture *pic,
+ int index, int *type,
+ char *data, size_t *data_len)
+{
+ VAAPIEncodeAV1Context *priv = avctx->priv_data;
+ CodedBitstreamFragment *obu = &priv->current_obu;
+ AV1RawOBU *mh_obu;
+ char mh_data[MAX_PARAM_BUFFER_SIZE];
+ size_t mh_data_len;
+ int ret = 0;
+
+ if (index >= priv->nb_mh)
+ return AVERROR_EOF;
+
+ mh_obu = &priv->mh[index];
+ ret = vaapi_encode_av1_add_obu(avctx, obu, AV1_OBU_METADATA, mh_obu);
+ if (ret < 0)
+ goto end;
+
+ ret = vaapi_encode_av1_write_obu(avctx, mh_data, &mh_data_len, obu);
+ if (ret < 0)
+ goto end;
+
+ memcpy(data, mh_data, MAX_PARAM_BUFFER_SIZE * sizeof(char));
+ *data_len = mh_data_len;
+ *type = VAEncPackedHeaderRawData;
+
+end:
+ ff_cbs_fragment_reset(obu);
+ return ret;
+}
+
static const VAAPIEncodeProfile vaapi_encode_av1_profiles[] = {
{ AV_PROFILE_AV1_MAIN, 8, 3, 1, 1, VAProfileAV1Profile0 },
{ AV_PROFILE_AV1_MAIN, 10, 3, 1, 1, VAProfileAV1Profile0 },
@@ -760,6 +860,8 @@ static const VAAPIEncodeType vaapi_encod
.slice_params_size = sizeof(VAEncTileGroupBufferAV1),
.init_slice_params = &vaapi_encode_av1_init_slice_params,
+
+ .write_extra_header = &vaapi_encode_av1_write_extra_header,
};
static av_cold int vaapi_encode_av1_init(AVCodecContext *avctx)
@@ -774,7 +876,8 @@ static av_cold int vaapi_encode_av1_init
ctx->desired_packed_headers =
VA_ENC_PACKED_HEADER_SEQUENCE |
- VA_ENC_PACKED_HEADER_PICTURE;
+ VA_ENC_PACKED_HEADER_PICTURE |
+ VA_ENC_PACKED_HEADER_MISC; // Metadata
if (avctx->profile == AV_PROFILE_UNKNOWN)
avctx->profile = priv->profile;
Index: FFmpeg/libavcodec/vaapi_encode_h264.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_encode_h264.c
+++ FFmpeg/libavcodec/vaapi_encode_h264.c
@@ -759,7 +759,7 @@ static int vaapi_encode_h264_init_pictur
vpic->frame_num = hpic->frame_num;
vpic->pic_fields.bits.idr_pic_flag = (pic->type == PICTURE_TYPE_IDR);
- vpic->pic_fields.bits.reference_pic_flag = (pic->type != PICTURE_TYPE_B);
+ vpic->pic_fields.bits.reference_pic_flag = pic->is_reference;
return 0;
}
Index: FFmpeg/libavcodec/vaapi_encode_h265.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_encode_h265.c
+++ FFmpeg/libavcodec/vaapi_encode_h265.c
@@ -945,26 +945,23 @@ static int vaapi_encode_h265_init_pictur
vpic->nal_unit_type = hpic->slice_nal_unit;
+ vpic->pic_fields.bits.reference_pic_flag = pic->is_reference;
switch (pic->type) {
case PICTURE_TYPE_IDR:
vpic->pic_fields.bits.idr_pic_flag = 1;
vpic->pic_fields.bits.coding_type = 1;
- vpic->pic_fields.bits.reference_pic_flag = 1;
break;
case PICTURE_TYPE_I:
vpic->pic_fields.bits.idr_pic_flag = 0;
vpic->pic_fields.bits.coding_type = 1;
- vpic->pic_fields.bits.reference_pic_flag = 1;
break;
case PICTURE_TYPE_P:
vpic->pic_fields.bits.idr_pic_flag = 0;
vpic->pic_fields.bits.coding_type = 2;
- vpic->pic_fields.bits.reference_pic_flag = 1;
break;
case PICTURE_TYPE_B:
vpic->pic_fields.bits.idr_pic_flag = 0;
vpic->pic_fields.bits.coding_type = 3;
- vpic->pic_fields.bits.reference_pic_flag = 0;
break;
default:
av_assert0(0 && "invalid picture type");
Index: FFmpeg/libavcodec/vaapi_h264.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_h264.c
+++ FFmpeg/libavcodec/vaapi_h264.c
@@ -93,14 +93,19 @@ typedef struct DPB {
*/
static int dpb_add(DPB *dpb, const H264Picture *pic)
{
- int i;
+ int i, pic_frame_idx, merged = 0;
if (dpb->size >= dpb->max_size)
return -1;
+ pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
+
for (i = 0; i < dpb->size; i++) {
VAPictureH264 * const va_pic = &dpb->va_pics[i];
- if (va_pic->picture_id == ff_vaapi_get_surface_id(pic->f)) {
+ int va_pic_long_ref = !!(va_pic->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE);
+ if (va_pic->picture_id == ff_vaapi_get_surface_id(pic->f) &&
+ va_pic_long_ref == pic->long_ref &&
+ va_pic->frame_idx == pic_frame_idx) {
VAPictureH264 temp_va_pic;
fill_vaapi_pic(&temp_va_pic, pic, 0);
@@ -112,11 +117,14 @@ static int dpb_add(DPB *dpb, const H264P
} else {
va_pic->BottomFieldOrderCnt = temp_va_pic.BottomFieldOrderCnt;
}
+ merged = 1;
}
- return 0;
}
}
+ if (merged)
+ return 0;
+
fill_vaapi_pic(&dpb->va_pics[dpb->size++], pic, 0);
return 0;
}
@@ -375,7 +383,7 @@ static int vaapi_h264_decode_slice(AVCod
slice_param.chroma_offset_l1);
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
- &slice_param, sizeof(slice_param),
+ &slice_param, 1, sizeof(slice_param),
buffer, size);
if (err) {
ff_vaapi_decode_cancel(avctx, pic);
Index: FFmpeg/libavcodec/vaapi_hevc.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_hevc.c
+++ FFmpeg/libavcodec/vaapi_hevc.c
@@ -353,7 +353,7 @@ static int vaapi_hevc_end_frame(AVCodecC
if (pic->last_size) {
last_slice_param->LongSliceFlags.fields.LastSliceOfPic = 1;
ret = ff_vaapi_decode_make_slice_buffer(avctx, &pic->pic,
- &pic->last_slice_param, slice_param_size,
+ &pic->last_slice_param, 1, slice_param_size,
pic->last_buffer, pic->last_size);
if (ret < 0)
goto fail;
@@ -471,7 +471,7 @@ static int vaapi_hevc_decode_slice(AVCod
if (!sh->first_slice_in_pic_flag) {
err = ff_vaapi_decode_make_slice_buffer(avctx, &pic->pic,
- &pic->last_slice_param, slice_param_size,
+ &pic->last_slice_param, 1, slice_param_size,
pic->last_buffer, pic->last_size);
pic->last_buffer = NULL;
pic->last_size = 0;
Index: FFmpeg/libavcodec/vaapi_mjpeg.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_mjpeg.c
+++ FFmpeg/libavcodec/vaapi_mjpeg.c
@@ -131,7 +131,7 @@ static int vaapi_mjpeg_decode_slice(AVCo
sp.components[i].ac_table_selector = s->ac_index[i];
}
- err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, sizeof(sp), buffer, size);
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, 1, sizeof(sp), buffer, size);
if (err)
goto fail;
Index: FFmpeg/libavcodec/vaapi_mpeg2.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_mpeg2.c
+++ FFmpeg/libavcodec/vaapi_mpeg2.c
@@ -162,7 +162,7 @@ static int vaapi_mpeg2_decode_slice(AVCo
};
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
- &slice_param, sizeof(slice_param),
+ &slice_param, 1, sizeof(slice_param),
buffer, size);
if (err < 0) {
ff_vaapi_decode_cancel(avctx, pic);
Index: FFmpeg/libavcodec/vaapi_mpeg4.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_mpeg4.c
+++ FFmpeg/libavcodec/vaapi_mpeg4.c
@@ -169,7 +169,7 @@ static int vaapi_mpeg4_decode_slice(AVCo
};
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
- &slice_param, sizeof(slice_param),
+ &slice_param, 1, sizeof(slice_param),
buffer, size);
if (err < 0) {
ff_vaapi_decode_cancel(avctx, pic);
Index: FFmpeg/libavcodec/vaapi_vc1.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_vc1.c
+++ FFmpeg/libavcodec/vaapi_vc1.c
@@ -489,7 +489,7 @@ static int vaapi_vc1_decode_slice(AVCode
};
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
- &slice_param, sizeof(slice_param),
+ &slice_param, 1, sizeof(slice_param),
buffer, size);
if (err < 0) {
ff_vaapi_decode_cancel(avctx, pic);
Index: FFmpeg/libavcodec/vaapi_vp8.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_vp8.c
+++ FFmpeg/libavcodec/vaapi_vp8.c
@@ -209,7 +209,7 @@ static int vaapi_vp8_decode_slice(AVCode
for (i = 0; i < 8; i++)
sp.partition_size[i+1] = s->coeff_partition_size[i];
- err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, sizeof(sp), data, data_size);
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, 1, sizeof(sp), data, data_size);
if (err)
goto fail;
Index: FFmpeg/libavcodec/vaapi_vp9.c
===================================================================
--- FFmpeg.orig/libavcodec/vaapi_vp9.c
+++ FFmpeg/libavcodec/vaapi_vp9.c
@@ -158,7 +158,7 @@ static int vaapi_vp9_decode_slice(AVCode
}
err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
- &slice_param, sizeof(slice_param),
+ &slice_param, 1, sizeof(slice_param),
buffer, size);
if (err) {
ff_vaapi_decode_cancel(avctx, pic);
Index: FFmpeg/libavfilter/vaapi_vpp.c
===================================================================
--- FFmpeg.orig/libavfilter/vaapi_vpp.c
+++ FFmpeg/libavfilter/vaapi_vpp.c
@@ -203,7 +203,10 @@ int ff_vaapi_vpp_config_output(AVFilterL
output_frames->width = ctx->output_width;
output_frames->height = ctx->output_height;
- output_frames->initial_pool_size = 4;
+ if (CONFIG_VAAPI_1)
+ output_frames->initial_pool_size = 0;
+ else
+ output_frames->initial_pool_size = 4;
err = ff_filter_init_hw_frames(avctx, outlink, 10);
if (err < 0)
@@ -219,6 +222,8 @@ int ff_vaapi_vpp_config_output(AVFilterL
va_frames = output_frames->hwctx;
av_assert0(ctx->va_context == VA_INVALID_ID);
+ av_assert0(output_frames->initial_pool_size ||
+ (va_frames->surface_ids == NULL && va_frames->nb_surfaces == 0));
vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
ctx->output_width, ctx->output_height,
VA_PROGRESSIVE,
Index: FFmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_vaapi.c
+++ FFmpeg/libavutil/hwcontext_vaapi.c
@@ -809,6 +809,9 @@ static int vaapi_map_frame(AVHWFramesCon
VAStatus vas;
void *address = NULL;
int err, i;
+#if VA_CHECK_VERSION(1, 21, 0)
+ uint32_t vaflags = 0;
+#endif
surface_id = (VASurfaceID)(uintptr_t)src->data[3];
av_log(hwfc, AV_LOG_DEBUG, "Map surface %#x.\n", surface_id);
@@ -892,7 +895,16 @@ static int vaapi_map_frame(AVHWFramesCon
}
}
+#if VA_CHECK_VERSION(1, 21, 0)
+ if (flags & AV_HWFRAME_MAP_READ)
+ vaflags |= VA_MAPBUFFER_FLAG_READ;
+ if (flags & AV_HWFRAME_MAP_WRITE)
+ vaflags |= VA_MAPBUFFER_FLAG_WRITE;
+ // On drivers not implementing vaMapBuffer2 libva calls vaMapBuffer instead.
+ vas = vaMapBuffer2(hwctx->display, map->image.buf, &address, vaflags);
+#else
vas = vaMapBuffer(hwctx->display, map->image.buf, &address);
+#endif
if (vas != VA_STATUS_SUCCESS) {
av_log(hwfc, AV_LOG_ERROR, "Failed to map image from surface "
"%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));

View File

@ -1,643 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsv.c
+++ jellyfin-ffmpeg/libavcodec/qsv.c
@@ -196,7 +196,7 @@ int ff_qsv_print_warning(void *log_ctx,
{
const char *desc;
int ret = qsv_map_error(err, &desc);
- av_log(log_ctx, AV_LOG_WARNING, "%s: %s (%d)\n", warning_string, desc, err);
+ av_log(log_ctx, AV_LOG_VERBOSE, "%s: %s (%d)\n", warning_string, desc, err);
return ret;
}
@@ -208,7 +208,6 @@ enum AVPixelFormat ff_qsv_map_fourcc(uin
case MFX_FOURCC_P8: return AV_PIX_FMT_PAL8;
case MFX_FOURCC_A2RGB10: return AV_PIX_FMT_X2RGB10;
case MFX_FOURCC_RGB4: return AV_PIX_FMT_BGRA;
-#if CONFIG_VAAPI
case MFX_FOURCC_YUY2: return AV_PIX_FMT_YUYV422;
case MFX_FOURCC_Y210: return AV_PIX_FMT_Y210;
case MFX_FOURCC_AYUV: return AV_PIX_FMT_VUYX;
@@ -218,7 +217,6 @@ enum AVPixelFormat ff_qsv_map_fourcc(uin
case MFX_FOURCC_Y216: return AV_PIX_FMT_Y212;
case MFX_FOURCC_Y416: return AV_PIX_FMT_XV36;
#endif
-#endif
}
return AV_PIX_FMT_NONE;
}
@@ -245,7 +243,6 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat
*fourcc = MFX_FOURCC_RGB4;
*shift = 0;
return AV_PIX_FMT_BGRA;
-#if CONFIG_VAAPI
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUYV422:
*fourcc = MFX_FOURCC_YUY2;
@@ -278,7 +275,6 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat
*shift = 1;
return AV_PIX_FMT_XV36;
#endif
-#endif
default:
return AVERROR(ENOSYS);
}
Index: jellyfin-ffmpeg/libavcodec/qsv_internal.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsv_internal.h
+++ jellyfin-ffmpeg/libavcodec/qsv_internal.h
@@ -57,6 +57,8 @@
#define QSV_MAX_FRAME_EXT_PARAMS 4
+#define QSV_PAYLOAD_SIZE 1024
+
#define QSV_VERSION_ATLEAST(MAJOR, MINOR) \
(MFX_VERSION_MAJOR > (MAJOR) || \
MFX_VERSION_MAJOR == (MAJOR) && MFX_VERSION_MINOR >= (MINOR))
@@ -99,6 +101,7 @@ typedef struct QSVFrame {
int queued;
int used;
+ int external_frame;
struct QSVFrame *next;
} QSVFrame;
Index: jellyfin-ffmpeg/libavcodec/qsvdec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvdec.c
+++ jellyfin-ffmpeg/libavcodec/qsvdec.c
@@ -42,13 +42,16 @@
#include "libavutil/imgutils.h"
#include "libavutil/film_grain_params.h"
#include "libavutil/mastering_display_metadata.h"
+#include "libavutil/stereo3d.h"
#include "avcodec.h"
#include "codec_internal.h"
#include "internal.h"
#include "decode.h"
#include "hwconfig.h"
+#include "get_bits.h"
#include "qsv.h"
+#include "h264_sei.h"
#include "qsv_internal.h"
#if QSV_ONEVPL
@@ -106,8 +109,13 @@ typedef struct QSVContext {
char *load_plugins;
+ mfxPayload payload;
+
mfxExtBuffer **ext_buffers;
int nb_ext_buffers;
+
+ H264SEIContext sei;
+ H264ParamSets ps;
} QSVContext;
static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
@@ -694,6 +702,147 @@ static int qsv_export_hdr_side_data(AVCo
#endif
+static int h264_decode_fpa(H2645SEIFramePacking *fpa, AVFrame *frame)
+{
+ if (!fpa || !frame) {
+ return AVERROR(EINVAL);
+ }
+
+ if (!fpa->arrangement_cancel_flag &&
+ fpa->arrangement_type <= 6 &&
+ fpa->content_interpretation_type > 0 &&
+ fpa->content_interpretation_type < 3) {
+ AVStereo3D *stereo = av_stereo3d_create_side_data(frame);
+ if (stereo) {
+ switch (fpa->arrangement_type) {
+ case 0:
+ stereo->type = AV_STEREO3D_CHECKERBOARD;
+ break;
+ case 1:
+ stereo->type = AV_STEREO3D_COLUMNS;
+ break;
+ case 2:
+ stereo->type = AV_STEREO3D_LINES;
+ break;
+ case 3:
+ if (fpa->quincunx_sampling_flag)
+ stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
+ else
+ stereo->type = AV_STEREO3D_SIDEBYSIDE;
+ break;
+ case 4:
+ stereo->type = AV_STEREO3D_TOPBOTTOM;
+ break;
+ case 5:
+ stereo->type = AV_STEREO3D_FRAMESEQUENCE;
+ if (fpa->current_frame_is_frame0_flag)
+ stereo->view = AV_STEREO3D_VIEW_LEFT;
+ else
+ stereo->view = AV_STEREO3D_VIEW_RIGHT;
+ break;
+ case 6:
+ stereo->type = AV_STEREO3D_2D;
+ break;
+ }
+
+ if (fpa->content_interpretation_type == 2)
+ stereo->flags = AV_STEREO3D_FLAG_INVERT;
+ }
+ }
+ return 0;
+}
+
+static int h264_parse_side_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame)
+{
+ GetBitContext gb_payload;
+ uint8_t *sei_buffer;
+ int sei_buffer_index;
+ int ret;
+
+ /* remove emulation prevention bytes */
+ sei_buffer = (uint8_t *)av_mallocz(q->payload.NumBit / 8);
+ if (!sei_buffer) {
+ av_freep(&sei_buffer);
+ return AVERROR(ENOMEM);
+ }
+ sei_buffer_index = 0;
+ for (int i = 0; i < q->payload.NumBit / 8; i++) {
+ if (q->payload.Data[i] == 3)
+ i++;
+ sei_buffer[sei_buffer_index] = q->payload.Data[i];
+ sei_buffer_index += 1;
+ }
+
+ ret = init_get_bits8(&gb_payload, sei_buffer, sei_buffer_index+1);
+ if (ret < 0) {
+ av_freep(&sei_buffer);
+ return ret;
+ }
+
+ ret = ff_h264_sei_decode(&q->sei, &gb_payload, &q->ps, avctx);
+ if (ret < 0) {
+ av_freep(&sei_buffer);
+ return ret;
+ }
+
+ switch (q->payload.Type) {
+ case SEI_TYPE_FRAME_PACKING_ARRANGEMENT:
+ ret = h264_decode_fpa(&q->sei.common.frame_packing, frame);
+ break;
+ default:
+ break;
+ }
+
+ av_freep(&sei_buffer);
+ return ret;
+}
+
+static int extract_frame_side_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame)
+{
+ mfxU64 ts;
+ mfxStatus sts;
+ int ret = 0;
+
+ if (q->payload.BufSize == 0) {
+ q->payload.Data = av_mallocz(QSV_PAYLOAD_SIZE);
+ if (!q->payload.Data) {
+ av_freep(&q->payload.Data);
+ return AVERROR(ENOMEM);
+ }
+ q->payload.BufSize = QSV_PAYLOAD_SIZE;
+ }
+
+ sts = MFX_ERR_NONE;
+ while (sts == MFX_ERR_NONE) {
+
+ sts = MFXVideoDECODE_GetPayload(q->session, &ts, &q->payload);
+
+ if (sts == MFX_ERR_NOT_ENOUGH_BUFFER) {
+ av_log(avctx, AV_LOG_VERBOSE, "Space for SEI is not enough. One SEI will be skipped\n");
+ continue;
+ } else if (sts != MFX_ERR_NONE || q->payload.NumBit == 0) {
+ break;
+ }
+
+ if (q->payload.Type != SEI_TYPE_FRAME_PACKING_ARRANGEMENT)
+ continue;
+
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ ret = h264_parse_side_data(avctx, q, frame);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_VERBOSE, "parse side data failed\n");
+ break;
+ }
+ }
+ return ret;
+}
+
static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
AVFrame *frame, int *got_frame,
const AVPacket *avpkt)
@@ -803,6 +952,10 @@ static int qsv_decode(AVCodecContext *av
outsurf = &aframe.frame->surface;
+ ret = extract_frame_side_data(avctx, q, frame);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_VERBOSE, "Extracting side from packet failed\n");
+
frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
#if QSV_VERSION_ATLEAST(1, 34)
if ((avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
@@ -873,6 +1026,8 @@ static void qsv_decode_close_qsvcontext(
av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
av_buffer_unref(&q->frames_ctx.mids_buf);
av_buffer_pool_uninit(&q->pool);
+
+ av_freep(&q->payload.Data);
}
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q,
Index: jellyfin-ffmpeg/libavcodec/qsvenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc.c
@@ -680,7 +680,9 @@ static int is_strict_gop(QSVEncContext *
static int init_video_param_jpeg(AVCodecContext *avctx, QSVEncContext *q)
{
- enum AVPixelFormat sw_format = avctx->pix_fmt == AV_PIX_FMT_QSV ?
+ enum AVPixelFormat sw_format = avctx->pix_fmt == AV_PIX_FMT_QSV ||
+ avctx->pix_fmt == AV_PIX_FMT_VAAPI ||
+ avctx->pix_fmt == AV_PIX_FMT_D3D11 ?
avctx->sw_pix_fmt : avctx->pix_fmt;
const AVPixFmtDescriptor *desc;
int ret;
@@ -746,7 +748,9 @@ static int init_video_param_jpeg(AVCodec
static int init_video_param(AVCodecContext *avctx, QSVEncContext *q)
{
- enum AVPixelFormat sw_format = avctx->pix_fmt == AV_PIX_FMT_QSV ?
+ enum AVPixelFormat sw_format = avctx->pix_fmt == AV_PIX_FMT_QSV ||
+ avctx->pix_fmt == AV_PIX_FMT_VAAPI ||
+ avctx->pix_fmt == AV_PIX_FMT_D3D11 ?
avctx->sw_pix_fmt : avctx->pix_fmt;
const AVPixFmtDescriptor *desc;
float quant;
@@ -1118,6 +1122,10 @@ static int init_video_param(AVCodecConte
q->extco3.MaxFrameSizeI = q->max_frame_size_i;
if (q->max_frame_size_p >= 0)
q->extco3.MaxFrameSizeP = q->max_frame_size_p;
+ if (sw_format == AV_PIX_FMT_BGRA &&
+ (q->profile == MFX_PROFILE_HEVC_REXT ||
+ q->profile == MFX_PROFILE_UNKNOWN))
+ q->extco3.TargetChromaFormatPlus1 = MFX_CHROMAFORMAT_YUV444 + 1;
q->extco3.ScenarioInfo = q->scenario;
} else if (avctx->codec_id == AV_CODEC_ID_AV1) {
@@ -1618,7 +1626,31 @@ int ff_qsv_enc_init(AVCodecContext *avct
if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
- AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
+ AVQSVFramesContext *frames_hwctx = NULL;
+
+ if (frames_ctx->format == AV_PIX_FMT_VAAPI || frames_ctx->format == AV_PIX_FMT_D3D11) {
+ AVBufferRef *derive_device_ref = NULL;
+ AVBufferRef *derive_frames_ref = NULL;
+ ret = av_hwdevice_ctx_create_derived(&derive_device_ref,
+ AV_HWDEVICE_TYPE_QSV, frames_ctx->device_ref, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to derive QSV device context: %d.\n", ret);
+ return ret;
+ }
+ ret = av_hwframe_ctx_create_derived(&derive_frames_ref,
+ AV_PIX_FMT_QSV, derive_device_ref, avctx->hw_frames_ctx, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to derive QSV frames context: %d.\n", ret);
+ av_buffer_unref(&derive_device_ref);
+ return ret;
+ }
+ av_buffer_unref(&avctx->hw_device_ctx);
+ avctx->hw_device_ctx = derive_device_ref;
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ avctx->hw_frames_ctx = derive_frames_ref;
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ }
+ frames_hwctx = frames_ctx->hwctx;
if (!iopattern) {
#if QSV_HAVE_OPAQUE
@@ -1773,6 +1805,10 @@ static void clear_unused_frames(QSVEncCo
memset(&cur->enc_ctrl, 0, sizeof(cur->enc_ctrl));
cur->enc_ctrl.Payload = cur->payloads;
cur->enc_ctrl.ExtParam = cur->extparam;
+ if (cur->external_frame) {
+ av_freep(&cur->surface.Data.MemId);
+ cur->external_frame = 0;
+ }
if (cur->frame->format == AV_PIX_FMT_QSV) {
av_frame_unref(cur->frame);
}
@@ -1887,19 +1923,42 @@ static int submit_frame(QSVEncContext *q
if (ret < 0)
return ret;
- if (frame->format == AV_PIX_FMT_QSV) {
- ret = av_frame_ref(qf->frame, frame);
- if (ret < 0)
- return ret;
+ if (frame->format == AV_PIX_FMT_QSV || frame->format == AV_PIX_FMT_VAAPI || frame->format == AV_PIX_FMT_D3D11) {
+ if (frame->format == AV_PIX_FMT_QSV) {
+ ret = av_frame_ref(qf->frame, frame);
+ if (ret < 0)
+ return ret;
+ } else {
+ qf->frame->format = AV_PIX_FMT_QSV;
+ qf->frame->hw_frames_ctx = av_buffer_ref(q->avctx->hw_frames_ctx);
+ if (!qf->frame->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+ ret = av_hwframe_map(qf->frame, frame, 0);
+ if (ret < 0) {
+ av_log(q->avctx, AV_LOG_ERROR, "Failed to map to QSV frames\n");
+ return ret;
+ }
+ ret = av_frame_copy_props(qf->frame, frame);
+ if (ret < 0)
+ return ret;
+ }
qf->surface = *(mfxFrameSurface1*)qf->frame->data[3];
+
if (q->frames_ctx.mids) {
ret = ff_qsv_find_surface_idx(&q->frames_ctx, qf);
- if (ret < 0)
- return ret;
-
- qf->surface.Data.MemId = &q->frames_ctx.mids[ret];
+ if (ret >= 0)
+ qf->surface.Data.MemId = &q->frames_ctx.mids[ret];
+ }
+ if (!q->frames_ctx.mids || ret < 0) {
+ QSVMid *mid = NULL;
+ mid = (QSVMid *)av_mallocz(sizeof(*mid));
+ if (!mid)
+ return AVERROR(ENOMEM);
+ mid->handle_pair = (mfxHDLPair *)qf->surface.Data.MemId;
+ qf->surface.Data.MemId = mid;
+ qf->external_frame = 1;
}
} else {
/* make a copy if the input is not padded as libmfx requires */
@@ -2597,6 +2656,8 @@ int ff_qsv_enc_close(AVCodecContext *avc
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[] = {
HW_CONFIG_ENCODER_FRAMES(QSV, QSV),
+ HW_CONFIG_ENCODER_FRAMES(VAAPI,VAAPI),
+ HW_CONFIG_ENCODER_FRAMES(D3D11,D3D11VA),
HW_CONFIG_ENCODER_DEVICE(NV12, QSV),
HW_CONFIG_ENCODER_DEVICE(P010, QSV),
NULL,
Index: jellyfin-ffmpeg/libavcodec/qsvenc_av1.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_av1.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_av1.c
@@ -149,10 +149,13 @@ FFCodec ff_av1_qsv_encoder = {
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
AV_PIX_FMT_P010,
AV_PIX_FMT_QSV,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_NONE },
.p.priv_class = &class,
.defaults = qsv_enc_defaults,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
+ FF_CODEC_CAP_INIT_CLEANUP,
.p.wrapper_name = "qsv",
.hw_configs = ff_qsv_enc_hw_configs,
};
Index: jellyfin-ffmpeg/libavcodec/qsvenc_h264.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_h264.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_h264.c
@@ -201,6 +201,8 @@ const FFCodec ff_h264_qsv_encoder = {
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
AV_PIX_FMT_QSV,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_NONE },
.p.priv_class = &class,
.defaults = qsv_enc_defaults,
Index: jellyfin-ffmpeg/libavcodec/qsvenc_hevc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_hevc.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_hevc.c
@@ -400,6 +400,8 @@ const FFCodec ff_hevc_qsv_encoder = {
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_Y210,
AV_PIX_FMT_QSV,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_BGRA,
AV_PIX_FMT_X2RGB10,
AV_PIX_FMT_VUYX,
Index: jellyfin-ffmpeg/libavcodec/qsvenc_jpeg.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_jpeg.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_jpeg.c
@@ -92,6 +92,8 @@ const FFCodec ff_mjpeg_qsv_encoder = {
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_BGRA,
AV_PIX_FMT_QSV,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_NONE },
.p.priv_class = &class,
.defaults = qsv_enc_defaults,
Index: jellyfin-ffmpeg/libavcodec/qsvenc_mpeg2.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_mpeg2.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_mpeg2.c
@@ -104,6 +104,8 @@ const FFCodec ff_mpeg2_qsv_encoder = {
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
AV_PIX_FMT_QSV,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_NONE },
.p.priv_class = &class,
.defaults = qsv_enc_defaults,
Index: jellyfin-ffmpeg/libavcodec/qsvenc_vp9.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_vp9.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_vp9.c
@@ -116,6 +116,8 @@ const FFCodec ff_vp9_qsv_encoder = {
AV_PIX_FMT_VUYX,
AV_PIX_FMT_QSV,
AV_PIX_FMT_XV30,
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_D3D11,
AV_PIX_FMT_NONE },
.p.priv_class = &class,
.defaults = qsv_enc_defaults,
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -115,7 +115,6 @@ static const struct {
{ AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4, 0 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010, 1 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8, 0 },
-#if CONFIG_VAAPI
{ AV_PIX_FMT_YUYV422,
MFX_FOURCC_YUY2, 0 },
{ AV_PIX_FMT_UYVY422,
@@ -144,7 +143,6 @@ static const struct {
{ AV_PIX_FMT_XV36,
MFX_FOURCC_Y416, 1 },
#endif
-#endif
};
extern int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
@@ -1526,7 +1524,6 @@ static int map_frame_to_surface(const AV
surface->Data.R = frame->data[0] + 2;
surface->Data.A = frame->data[0] + 3;
break;
-#if CONFIG_VAAPI
case AV_PIX_FMT_YUYV422:
surface->Data.Y = frame->data[0];
surface->Data.U = frame->data[0] + 1;
@@ -1563,7 +1560,6 @@ static int map_frame_to_surface(const AV
surface->Data.U = frame->data[0];
surface->Data.V = frame->data[0] + 2;
break;
-#endif
default:
return MFX_ERR_UNSUPPORTED;
}
@@ -1878,11 +1874,25 @@ static int qsv_frames_derive_to(AVHWFram
return 0;
}
+#if CONFIG_VAAPI
+static void qsv_umap_from_vaapi(AVHWFramesContext *dst_fc,
+ HWMapDescriptor *hwmap)
+{
+ mfxFrameSurface1 *new_sur = (mfxFrameSurface1 *)hwmap->priv;
+ mfxHDLPair *hdlpair = (mfxHDLPair *)new_sur->Data.MemId;
+ av_freep(&hdlpair->first);
+ av_freep(&new_sur->Data.MemId);
+ av_freep(&new_sur);
+}
+#endif
+
static int qsv_map_to(AVHWFramesContext *dst_ctx,
AVFrame *dst, const AVFrame *src, int flags)
{
AVQSVFramesContext *hwctx = dst_ctx->hwctx;
int i, err, index = -1;
+ mfxFrameSurface1 *new_sur = NULL;
+ mfxHDLPair *new_hdlpair = NULL;
for (i = 0; i < hwctx->nb_surfaces && index < 0; i++) {
switch(src->format) {
@@ -1921,21 +1931,77 @@ static int qsv_map_to(AVHWFramesContext
}
}
if (index < 0) {
- av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
- "is not in the mapped frames context.\n");
- return AVERROR(EINVAL);
- }
+ switch (src->format) {
+#if CONFIG_VAAPI
+ case AV_PIX_FMT_VAAPI:
+ {
+ new_sur = (mfxFrameSurface1 *)av_mallocz(sizeof(*new_sur));
+ if (!new_sur) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ err = qsv_init_surface(dst_ctx, new_sur);
+ if (err < 0)
+ goto qsv_map_to_err;
+
+ new_hdlpair = (mfxHDLPair *)av_mallocz(sizeof(*new_hdlpair));
+ if (!new_hdlpair) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ new_hdlpair->first = (VASurfaceID *)av_mallocz(sizeof(VASurfaceID));
+ if (!new_hdlpair->first) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ *(VASurfaceID*)(new_hdlpair->first) = (VASurfaceID)(uintptr_t)src->data[3];
+ new_sur->Data.MemId = new_hdlpair;
- err = ff_hwframe_map_create(dst->hw_frames_ctx,
- dst, src, NULL, NULL);
- if (err)
- return err;
+ err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
+ &qsv_umap_from_vaapi,
+ (void*)new_sur);
+ if (err)
+ goto qsv_map_to_err;
+
+ av_log(dst_ctx, AV_LOG_DEBUG, "Trying to map from a surface which "
+ "is not in the mapped frames context, so create a new surface\n");
+ }
+ break;
+#endif
+#if CONFIG_DXVA2
+ case AV_PIX_FMT_DXVA2_VLD:
+ {
+ av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
+ "is not in the mapped frames context.\n");
+ return AVERROR(EINVAL);
+ }
+ break;
+#endif
+ default:
+ return AVERROR(ENOSYS);
+ }
+ } else {
+ err = ff_hwframe_map_create(dst->hw_frames_ctx,
+ dst, src, NULL, NULL);
+ if (err)
+ goto qsv_map_to_err;
+ }
dst->width = src->width;
dst->height = src->height;
- dst->data[3] = (uint8_t*)&hwctx->surfaces[index];
+ dst->data[3] = (uint8_t*)((index == -1) ? new_sur : &hwctx->surfaces[index]);
return 0;
+
+qsv_map_to_err:
+ if (new_sur)
+ av_freep(&new_sur);
+ if (new_hdlpair) {
+ if (new_hdlpair->first)
+ av_freep(&new_hdlpair->first);
+ av_freep(&new_hdlpair);
+ }
+ return err;
}
static int qsv_frames_get_constraints(AVHWDeviceContext *ctx,

File diff suppressed because it is too large Load Diff

View File

@ -1,135 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/frame.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/frame.c
+++ jellyfin-ffmpeg/libavutil/frame.c
@@ -259,9 +259,39 @@ FF_ENABLE_DEPRECATION_WARNINGS
return AVERROR(EINVAL);
}
+int av_frame_copy_side_data(AVFrame* dst, const AVFrame* src, int flags)
+{
+ for (unsigned i = 0; i < src->nb_side_data; i++) {
+ const AVFrameSideData *sd_src = src->side_data[i];
+ AVFrameSideData *sd_dst;
+ if ( sd_src->type == AV_FRAME_DATA_PANSCAN
+ && (src->width != dst->width || src->height != dst->height))
+ continue;
+ if (flags & AV_FRAME_COPY_PROPS_FORCECOPY) {
+ sd_dst = av_frame_new_side_data(dst, sd_src->type,
+ sd_src->size);
+ if (!sd_dst) {
+ wipe_side_data(dst);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(sd_dst->data, sd_src->data, sd_src->size);
+ } else {
+ AVBufferRef *ref = av_buffer_ref(sd_src->buf);
+ sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
+ if (!sd_dst) {
+ av_buffer_unref(&ref);
+ wipe_side_data(dst);
+ return AVERROR(ENOMEM);
+ }
+ }
+ av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
+ }
+ return 0;
+}
+
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
{
- int ret, i;
+ int ret;
dst->key_frame = src->key_frame;
dst->pict_type = src->pict_type;
@@ -310,31 +340,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_dict_copy(&dst->metadata, src->metadata, 0);
- for (i = 0; i < src->nb_side_data; i++) {
- const AVFrameSideData *sd_src = src->side_data[i];
- AVFrameSideData *sd_dst;
- if ( sd_src->type == AV_FRAME_DATA_PANSCAN
- && (src->width != dst->width || src->height != dst->height))
- continue;
- if (force_copy) {
- sd_dst = av_frame_new_side_data(dst, sd_src->type,
- sd_src->size);
- if (!sd_dst) {
- wipe_side_data(dst);
- return AVERROR(ENOMEM);
- }
- memcpy(sd_dst->data, sd_src->data, sd_src->size);
- } else {
- AVBufferRef *ref = av_buffer_ref(sd_src->buf);
- sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
- if (!sd_dst) {
- av_buffer_unref(&ref);
- wipe_side_data(dst);
- return AVERROR(ENOMEM);
- }
- }
- av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
- }
+ if (ret = av_frame_copy_side_data(dst, src,
+ force_copy ? AV_FRAME_COPY_PROPS_FORCECOPY : 0) < 0)
+ return ret;
ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
@@ -793,6 +801,17 @@ void av_frame_remove_side_data(AVFrame *
}
}
+void av_frame_remove_all_side_data(AVFrame *frame)
+{
+ int i;
+
+ for (i = frame->nb_side_data - 1; i >= 0; i--) {
+ free_side_data(&frame->side_data[i]);
+ frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
+ frame->nb_side_data--;
+ }
+}
+
const char *av_frame_side_data_name(enum AVFrameSideDataType type)
{
switch(type) {
Index: jellyfin-ffmpeg/libavutil/frame.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/frame.h
+++ jellyfin-ffmpeg/libavutil/frame.h
@@ -866,6 +866,21 @@ int av_frame_copy(AVFrame *dst, const AV
*/
int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+/** Copy actual data buffers instead of references.
+ */
+#define AV_FRAME_COPY_PROPS_FORCECOPY 1
+
+/**
+ * Copy only side-data from src to dst.
+ *
+ * @param dst a frame to which the side data should be copied.
+ * @param src a frame from which to copy the side data.
+ * @param flags flags of type AV_FRAME_COPY_PROPS_*, controlling copy behavior.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy_side_data(AVFrame* dst, const AVFrame* src, int flags);
+
/**
* Get the buffer reference a given data plane is stored in.
*
@@ -918,6 +933,10 @@ AVFrameSideData *av_frame_get_side_data(
*/
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
+/**
+ * Remove and free all side data in this frame.
+ */
+void av_frame_remove_all_side_data(AVFrame *frame);
/**
* Flags for frame cropping.

View File

@ -0,0 +1,343 @@
Index: FFmpeg/libavcodec/qsv_internal.h
===================================================================
--- FFmpeg.orig/libavcodec/qsv_internal.h
+++ FFmpeg/libavcodec/qsv_internal.h
@@ -56,6 +56,8 @@
#define QSV_MAX_FRAME_EXT_PARAMS 4
+#define QSV_PAYLOAD_SIZE 1024
+
#define QSV_VERSION_ATLEAST(MAJOR, MINOR) \
(MFX_VERSION_MAJOR > (MAJOR) || \
MFX_VERSION_MAJOR == (MAJOR) && MFX_VERSION_MINOR >= (MINOR))
Index: FFmpeg/libavcodec/qsvdec.c
===================================================================
--- FFmpeg.orig/libavcodec/qsvdec.c
+++ FFmpeg/libavcodec/qsvdec.c
@@ -43,13 +43,16 @@
#include "libavutil/film_grain_params.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/avassert.h"
+#include "libavutil/stereo3d.h"
#include "avcodec.h"
#include "codec_internal.h"
#include "internal.h"
#include "decode.h"
#include "hwconfig.h"
+#include "get_bits.h"
#include "qsv.h"
+#include "h264_sei.h"
#include "qsv_internal.h"
#include "refstruct.h"
@@ -111,8 +114,13 @@ typedef struct QSVContext {
char *load_plugins;
+ mfxPayload payload;
+
mfxExtBuffer **ext_buffers;
int nb_ext_buffers;
+
+ H264SEIContext sei;
+ H264ParamSets ps;
} QSVContext;
static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
@@ -784,6 +792,147 @@ static int qsv_export_hdr_side_data_av1(
#endif
+static int h264_decode_fpa(H2645SEIFramePacking *fpa, AVFrame *frame)
+{
+ if (!fpa || !frame) {
+ return AVERROR(EINVAL);
+ }
+
+ if (!fpa->arrangement_cancel_flag &&
+ fpa->arrangement_type <= 6 &&
+ fpa->content_interpretation_type > 0 &&
+ fpa->content_interpretation_type < 3) {
+ AVStereo3D *stereo = av_stereo3d_create_side_data(frame);
+ if (stereo) {
+ switch (fpa->arrangement_type) {
+ case 0:
+ stereo->type = AV_STEREO3D_CHECKERBOARD;
+ break;
+ case 1:
+ stereo->type = AV_STEREO3D_COLUMNS;
+ break;
+ case 2:
+ stereo->type = AV_STEREO3D_LINES;
+ break;
+ case 3:
+ if (fpa->quincunx_sampling_flag)
+ stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
+ else
+ stereo->type = AV_STEREO3D_SIDEBYSIDE;
+ break;
+ case 4:
+ stereo->type = AV_STEREO3D_TOPBOTTOM;
+ break;
+ case 5:
+ stereo->type = AV_STEREO3D_FRAMESEQUENCE;
+ if (fpa->current_frame_is_frame0_flag)
+ stereo->view = AV_STEREO3D_VIEW_LEFT;
+ else
+ stereo->view = AV_STEREO3D_VIEW_RIGHT;
+ break;
+ case 6:
+ stereo->type = AV_STEREO3D_2D;
+ break;
+ }
+
+ if (fpa->content_interpretation_type == 2)
+ stereo->flags = AV_STEREO3D_FLAG_INVERT;
+ }
+ }
+ return 0;
+}
+
+static int h264_parse_side_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame)
+{
+ GetBitContext gb_payload;
+ uint8_t *sei_buffer;
+ int sei_buffer_index;
+ int ret;
+
+ /* remove emulation prevention bytes */
+ sei_buffer = (uint8_t *)av_mallocz(q->payload.NumBit / 8);
+ if (!sei_buffer) {
+ av_freep(&sei_buffer);
+ return AVERROR(ENOMEM);
+ }
+ sei_buffer_index = 0;
+ for (int i = 0; i < q->payload.NumBit / 8; i++) {
+ if (q->payload.Data[i] == 3)
+ i++;
+ sei_buffer[sei_buffer_index] = q->payload.Data[i];
+ sei_buffer_index += 1;
+ }
+
+ ret = init_get_bits8(&gb_payload, sei_buffer, sei_buffer_index+1);
+ if (ret < 0) {
+ av_freep(&sei_buffer);
+ return ret;
+ }
+
+ ret = ff_h264_sei_decode(&q->sei, &gb_payload, &q->ps, avctx);
+ if (ret < 0) {
+ av_freep(&sei_buffer);
+ return ret;
+ }
+
+ switch (q->payload.Type) {
+ case SEI_TYPE_FRAME_PACKING_ARRANGEMENT:
+ ret = h264_decode_fpa(&q->sei.common.frame_packing, frame);
+ break;
+ default:
+ break;
+ }
+
+ av_freep(&sei_buffer);
+ return ret;
+}
+
+static int extract_frame_side_data(AVCodecContext *avctx, QSVContext *q, AVFrame *frame)
+{
+ mfxU64 ts;
+ mfxStatus sts;
+ int ret = 0;
+
+ if (q->payload.BufSize == 0) {
+ q->payload.Data = av_mallocz(QSV_PAYLOAD_SIZE);
+ if (!q->payload.Data) {
+ av_freep(&q->payload.Data);
+ return AVERROR(ENOMEM);
+ }
+ q->payload.BufSize = QSV_PAYLOAD_SIZE;
+ }
+
+ sts = MFX_ERR_NONE;
+ while (sts == MFX_ERR_NONE) {
+
+ sts = MFXVideoDECODE_GetPayload(q->session, &ts, &q->payload);
+
+ if (sts == MFX_ERR_NOT_ENOUGH_BUFFER) {
+ av_log(avctx, AV_LOG_VERBOSE, "Space for SEI is not enough. One SEI will be skipped\n");
+ continue;
+ } else if (sts != MFX_ERR_NONE || q->payload.NumBit == 0) {
+ break;
+ }
+
+ if (q->payload.Type != SEI_TYPE_FRAME_PACKING_ARRANGEMENT)
+ continue;
+
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ ret = h264_parse_side_data(avctx, q, frame);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_VERBOSE, "parse side data failed\n");
+ break;
+ }
+ }
+ return ret;
+}
+
static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
AVFrame *frame, int *got_frame,
const AVPacket *avpkt)
@@ -895,6 +1044,10 @@ static int qsv_decode(AVCodecContext *av
outsurf = &aframe.frame->surface;
+ ret = extract_frame_side_data(avctx, q, frame);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_WARNING, "Extracting side from packet failed\n");
+
frame->pts = MFX_PTS_TO_PTS(outsurf->Data.TimeStamp, avctx->pkt_timebase);
#if QSV_VERSION_ATLEAST(1, 34)
if ((avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
@@ -985,6 +1138,8 @@ static void qsv_decode_close_qsvcontext(
av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
ff_refstruct_unref(&q->frames_ctx.mids);
av_buffer_pool_uninit(&q->pool);
+
+ av_freep(&q->payload.Data);
}
static int qsv_process_data(AVCodecContext *avctx, QSVContext *q,
Index: FFmpeg/libavcodec/qsvenc.c
===================================================================
--- FFmpeg.orig/libavcodec/qsvenc.c
+++ FFmpeg/libavcodec/qsvenc.c
@@ -205,6 +205,7 @@ static void dump_video_param(AVCodecCont
#endif
const char *tmp_str = NULL;
+ mfxExtHEVCParam *exthevcparam = NULL;
if (q->co2_idx > 0)
co2 = (mfxExtCodingOption2*)coding_opts[q->co2_idx];
@@ -220,6 +221,8 @@ static void dump_video_param(AVCodecCont
exthypermodeparam = (mfxExtHyperModeParam *)coding_opts[q->exthypermodeparam_idx];
#endif
+ if (q->exthevcparam_idx > 0)
+ exthevcparam = (mfxExtHEVCParam *)coding_opts[q->exthevcparam_idx];
av_log(avctx, AV_LOG_VERBOSE, "profile: %s; level: %"PRIu16"\n",
print_profile(avctx->codec_id, info->CodecProfile), info->CodecLevel);
@@ -400,6 +403,11 @@ static void dump_video_param(AVCodecCont
av_log(avctx, AV_LOG_VERBOSE, "\n");
}
#endif
+ if (exthevcparam &&
+ exthevcparam->GeneralConstraintFlags == MFX_HEVC_CONSTR_REXT_ONE_PICTURE_ONLY &&
+ avctx->codec_id == AV_CODEC_ID_HEVC &&
+ info->CodecProfile == MFX_PROFILE_HEVC_MAIN10)
+ av_log(avctx, AV_LOG_VERBOSE, "Main10sp (Main10 profile and one_pic_only flag): enable\n");
}
static void dump_video_vp9_param(AVCodecContext *avctx, QSVEncContext *q,
@@ -1211,6 +1219,18 @@ static int init_video_param(AVCodecConte
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->exthevctiles;
}
+ if (avctx->codec_id == AV_CODEC_ID_HEVC && q->main10sp) {
+ if (QSV_RUNTIME_VERSION_ATLEAST(q->ver, 2, 0)) {
+ q->param.mfx.CodecProfile = MFX_PROFILE_HEVC_MAIN10;
+ q->exthevcparam.Header.BufferId = MFX_EXTBUFF_HEVC_PARAM;
+ q->exthevcparam.Header.BufferSz = sizeof(q->exthevcparam);
+ q->exthevcparam.GeneralConstraintFlags = MFX_HEVC_CONSTR_REXT_ONE_PICTURE_ONLY;
+ q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->exthevcparam;
+ } else
+ av_log(avctx, AV_LOG_WARNING,
+ "This version of runtime doesn't support 10bit single still picture\n");
+ }
+
q->extvsi.VideoFullRange = (avctx->color_range == AVCOL_RANGE_JPEG);
q->extvsi.ColourDescriptionPresent = 0;
@@ -1463,12 +1483,17 @@ static int qsv_retrieve_enc_params(AVCod
};
#endif
- mfxExtBuffer *ext_buffers[6 + QSV_HAVE_HE];
+ mfxExtHEVCParam hevc_param_buf = {
+ .Header.BufferId = MFX_EXTBUFF_HEVC_PARAM,
+ .Header.BufferSz = sizeof(hevc_param_buf),
+ };
+ mfxExtBuffer *ext_buffers[7 + QSV_HAVE_HE];
int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
int ret, ext_buf_num = 0, extradata_offset = 0;
q->co2_idx = q->co3_idx = q->exthevctiles_idx = q->exthypermodeparam_idx = -1;
+ q->exthevcparam_idx = -1;
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&extradata;
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&co;
@@ -1496,6 +1521,10 @@ static int qsv_retrieve_enc_params(AVCod
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&hyper_mode_param_buf;
}
#endif
+ if (avctx->codec_id == AV_CODEC_ID_HEVC && QSV_RUNTIME_VERSION_ATLEAST(q->ver, 2, 0)) {
+ q->exthevcparam_idx = ext_buf_num;
+ ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&hevc_param_buf;
+ }
q->param.ExtParam = ext_buffers;
q->param.NumExtParam = ext_buf_num;
Index: FFmpeg/libavcodec/qsvenc.h
===================================================================
--- FFmpeg.orig/libavcodec/qsvenc.h
+++ FFmpeg/libavcodec/qsvenc.h
@@ -177,6 +177,7 @@ typedef struct QSVEncContext {
mfxExtMultiFrameControl extmfc;
#endif
mfxExtHEVCTiles exthevctiles;
+ mfxExtHEVCParam exthevcparam;
mfxExtVP9Param extvp9param;
#if QSV_HAVE_EXT_AV1_PARAM
mfxExtAV1TileParam extav1tileparam;
@@ -193,7 +194,7 @@ typedef struct QSVEncContext {
mfxExtVideoSignalInfo extvsi;
- mfxExtBuffer *extparam_internal[5 + (QSV_HAVE_MF * 2) + (QSV_HAVE_EXT_AV1_PARAM * 2) + QSV_HAVE_HE];
+ mfxExtBuffer *extparam_internal[6 + (QSV_HAVE_MF * 2) + (QSV_HAVE_EXT_AV1_PARAM * 2) + QSV_HAVE_HE];
int nb_extparam_internal;
mfxExtBuffer **extparam_str;
@@ -321,6 +322,9 @@ typedef struct QSVEncContext {
int dual_gfx;
AVDictionary *qsv_params;
+ int exthevcparam_idx;
+ int main10sp;
+
} QSVEncContext;
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q);
Index: FFmpeg/libavcodec/qsvenc_hevc.c
===================================================================
--- FFmpeg.orig/libavcodec/qsvenc_hevc.c
+++ FFmpeg/libavcodec/qsvenc_hevc.c
@@ -363,6 +363,9 @@ static const AVOption options[] = {
{ "int_ref_qp_delta", "QP difference for the refresh MBs", OFFSET(qsv.int_ref_qp_delta), AV_OPT_TYPE_INT, { .i64 = INT16_MIN }, INT16_MIN, INT16_MAX, VE },
{ "int_ref_cycle_dist", "Distance between the beginnings of the intra-refresh cycles in frames", OFFSET(qsv.int_ref_cycle_dist), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT16_MAX, VE },
+#if QSV_ONEVPL
+ { "main10sp", "This profile allow to encode 10 bit single still picture", OFFSET(qsv.main10sp), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
+#endif
{ NULL },
};

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavfilter/qsvvpp.c
Index: FFmpeg/libavfilter/qsvvpp.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/qsvvpp.c
+++ jellyfin-ffmpeg/libavfilter/qsvvpp.c
--- FFmpeg.orig/libavfilter/qsvvpp.c
+++ FFmpeg/libavfilter/qsvvpp.c
@@ -167,7 +167,7 @@ int ff_qsvvpp_print_warning(void *log_ct
const char *desc;
int ret;
@ -11,32 +11,25 @@ Index: jellyfin-ffmpeg/libavfilter/qsvvpp.c
return ret;
}
@@ -900,6 +900,20 @@ int ff_qsvvpp_filter_frame(QSVVPPContext
out_frame->frame->pts = av_rescale_q(out_frame->surface.Data.TimeStamp,
default_tb, outlink->time_base);
+ /* Copy the color side data */
+ if (in_frame->frame->color_primaries != -1)
+ out_frame->frame->color_primaries = in_frame->frame->color_primaries;
+ if (in_frame->frame->color_trc != -1)
+ out_frame->frame->color_trc = in_frame->frame->color_trc;
+ if (in_frame->frame->colorspace != -1)
+ out_frame->frame->colorspace = in_frame->frame->colorspace;
+ if (in_frame->frame->color_range != -1)
+ out_frame->frame->color_range = in_frame->frame->color_range;
+
+ ret = av_frame_copy_side_data(out_frame->frame, in_frame->frame, 0);
+ if (ret < 0)
+ return ret;
+
out_frame->queued++;
aframe = (QSVAsyncFrame){ sync, out_frame };
av_fifo_write(s->async_fifo, &aframe, 1);
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
@@ -463,8 +463,12 @@ static QSVFrame *submit_frame(QSVVPPCont
!(qsv_frame->frame->flags & AV_FRAME_FLAG_INTERLACED) ? MFX_PICSTRUCT_PROGRESSIVE :
((qsv_frame->frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) ? MFX_PICSTRUCT_FIELD_TFF :
MFX_PICSTRUCT_FIELD_BFF);
- if (qsv_frame->frame->repeat_pict == 1)
+ if (qsv_frame->frame->repeat_pict == 1) {
qsv_frame->surface.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
+ qsv_frame->surface.Info.PicStruct |=
+ (qsv_frame->frame->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) ? MFX_PICSTRUCT_FIELD_TFF :
+ MFX_PICSTRUCT_FIELD_BFF;
+ }
else if (qsv_frame->frame->repeat_pict == 2)
qsv_frame->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
else if (qsv_frame->frame->repeat_pict == 4)
Index: FFmpeg/libavfilter/vf_overlay_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_qsv.c
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
@@ -228,40 +228,51 @@ static int config_overlay_input(AVFilter
--- FFmpeg.orig/libavfilter/vf_overlay_qsv.c
+++ FFmpeg/libavfilter/vf_overlay_qsv.c
@@ -228,40 +228,47 @@ static int config_overlay_input(AVFilter
static int process_frame(FFFrameSync *fs)
{
@ -76,10 +69,6 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
+ if (ret < 0 && ret != AVERROR(EAGAIN))
+ return ret;
+
+ /* remove all side data of the overlay frame*/
+ if (overlay)
+ av_frame_remove_all_side_data(overlay);
+
+ /* composite overlay frame */
+ /* or overwrite main frame again if the overlay frame isn't ready yet */
+ return ff_qsvvpp_filter_frame(qsv, overlay ? in1 : in0, overlay ? overlay : main);
@ -113,7 +102,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
return ff_framesync_configure(&s->fs);
}
@@ -282,12 +293,6 @@ static int config_output(AVFilterLink *o
@@ -282,12 +289,6 @@ static int config_output(AVFilterLink *o
return AVERROR(EINVAL);
} else if (in0->format == AV_PIX_FMT_QSV) {
AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data;
@ -126,7 +115,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
vpp->qsv_param.out_sw_format = hw_frame0->sw_format;
}
@@ -369,6 +374,7 @@ static int overlay_qsv_query_formats(AVF
@@ -369,6 +370,7 @@ static int overlay_qsv_query_formats(AVF
static const enum AVPixelFormat main_in_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NV12,
@ -134,7 +123,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
AV_PIX_FMT_YUYV422,
AV_PIX_FMT_RGB32,
AV_PIX_FMT_QSV,
@@ -376,6 +382,7 @@ static int overlay_qsv_query_formats(AVF
@@ -376,6 +378,7 @@ static int overlay_qsv_query_formats(AVF
};
static const enum AVPixelFormat out_pix_fmts[] = {
AV_PIX_FMT_NV12,
@ -142,11 +131,11 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_qsv.c
AV_PIX_FMT_QSV,
AV_PIX_FMT_NONE
};
Index: jellyfin-ffmpeg/libavfilter/vf_vpp_qsv.c
Index: FFmpeg/libavfilter/vf_vpp_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_vpp_qsv.c
+++ jellyfin-ffmpeg/libavfilter/vf_vpp_qsv.c
@@ -344,6 +344,30 @@ static mfxStatus get_mfx_version(const A
--- FFmpeg.orig/libavfilter/vf_vpp_qsv.c
+++ FFmpeg/libavfilter/vf_vpp_qsv.c
@@ -388,6 +388,30 @@ static mfxStatus get_mfx_version(const A
return MFXQueryVersion(device_hwctx->session, mfx_version);
}
@ -174,22 +163,23 @@ Index: jellyfin-ffmpeg/libavfilter/vf_vpp_qsv.c
+ return MFXVideoCORE_QueryPlatform(device_hwctx->session, mfx_platform);
+}
+
static int config_output(AVFilterLink *outlink)
static int vpp_set_frame_ext_params(AVFilterContext *ctx, const AVFrame *in, AVFrame *out, QSVVPPFrameParam *fp)
{
AVFilterContext *ctx = outlink->src;
@@ -358,7 +382,10 @@ static int config_output(AVFilterLink *o
outlink->w = vpp->out_width;
outlink->h = vpp->out_height;
outlink->frame_rate = vpp->framerate;
- outlink->time_base = av_inv_q(vpp->framerate);
+ if (vpp->framerate.num == 0 || vpp->framerate.den == 0)
+ outlink->time_base = inlink->time_base;
+ else
+ outlink->time_base = av_inv_q(vpp->framerate);
#if QSV_ONEVPL
@@ -494,9 +518,9 @@ static int vpp_set_frame_ext_params(AVFi
outvsi_conf.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO_OUT;
outvsi_conf.Header.BufferSz = sizeof(mfxExtVideoSignalInfo);
outvsi_conf.VideoFullRange = (out->color_range == AVCOL_RANGE_JPEG);
- outvsi_conf.ColourPrimaries = (out->color_primaries == AVCOL_PRI_UNSPECIFIED) ? AVCOL_PRI_BT709 : out->color_primaries;
- outvsi_conf.TransferCharacteristics = (out->color_trc == AVCOL_TRC_UNSPECIFIED) ? AVCOL_TRC_BT709 : out->color_trc;
- outvsi_conf.MatrixCoefficients = (out->colorspace == AVCOL_SPC_UNSPECIFIED) ? AVCOL_SPC_BT709 : out->colorspace;
+ outvsi_conf.ColourPrimaries = (out->color_primaries == AVCOL_PRI_UNSPECIFIED) ? invsi_conf.ColourPrimaries : out->color_primaries;
+ outvsi_conf.TransferCharacteristics = (out->color_trc == AVCOL_TRC_UNSPECIFIED) ? invsi_conf.TransferCharacteristics : out->color_trc;
+ outvsi_conf.MatrixCoefficients = (out->colorspace == AVCOL_SPC_UNSPECIFIED) ? invsi_conf.MatrixCoefficients : out->colorspace;
outvsi_conf.ColourDescriptionPresent = 1;
param.filter_frame = NULL;
param.num_ext_buf = 0;
@@ -504,12 +531,21 @@ static int config_output(AVFilterLink *o
if (memcmp(&vpp->invsi_conf, &invsi_conf, sizeof(mfxExtVideoSignalInfo)) ||
@@ -686,12 +710,24 @@ static int config_output(AVFilterLink *o
if (inlink->w != outlink->w || inlink->h != outlink->h || in_format != vpp->out_format) {
if (QSV_RUNTIME_VERSION_ATLEAST(mfx_version, 1, 19)) {
@ -205,7 +195,10 @@ Index: jellyfin-ffmpeg/libavfilter/vf_vpp_qsv.c
+ /* Compute mode is only available on DG2+ platforms */
+ if (vpl && get_mfx_platform(ctx, &mfx_platform) == MFX_ERR_NONE) {
+ int code_name = mfx_platform.CodeName;
+ compute = code_name >= 45 && code_name != 55 && code_name != 50;
+ compute = code_name >= 45 &&
+ code_name <= 54 &&
+ code_name != 55 &&
+ code_name != 50;
+ }
+
+ if (mode == -1)
@ -215,74 +208,63 @@ Index: jellyfin-ffmpeg/libavfilter/vf_vpp_qsv.c
INIT_MFX_EXTBUF(scale_conf, MFX_EXTBUFF_VPP_SCALING);
SET_MFX_PARAM_FIELD(scale_conf, ScalingMode, mode);
@@ -582,6 +618,11 @@ static int activate(AVFilterContext *ctx
if (in->pts != AV_NOPTS_VALUE)
in->pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base);
+ if (outlink->frame_rate.num && outlink->frame_rate.den)
+ in->duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
+ else
+ in->duration = 0;
+
ret = ff_filter_frame(outlink, in);
if (ret < 0)
return ret;
@@ -686,20 +727,14 @@ static const AVOption vpp_options[] = {
{ "h", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
@@ -880,19 +916,13 @@ static const AVOption vpp_options[] = {
{ "height", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
{ "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
- { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
{ "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, .flags = FLAGS },
-#if QSV_ONEVPL
- { "scale_mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 5, .flags = FLAGS, "scale mode" },
- { "scale_mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 5, .flags = FLAGS, .unit = "scale mode" },
-#else
- { "scale_mode", "scaling & format conversion mode", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT }, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, .flags = FLAGS, "scale mode" },
- { "scale_mode", "scaling & format conversion mode", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT }, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, .flags = FLAGS, .unit = "scale mode" },
-#endif
+ { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, .flags = FLAGS },
+ { "scale_mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 5, .flags = FLAGS, "scale mode" },
{ "auto", "auto mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_DEFAULT}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
{ "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
{ "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
+ { "scale_mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 5, .flags = FLAGS, .unit = "scale mode" },
{ "auto", "auto mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_DEFAULT}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
{ "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
{ "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
-#if QSV_ONEVPL
{ "compute", "compute", 0, AV_OPT_TYPE_CONST, { .i64 = 3}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
{ "vd", "vd", 0, AV_OPT_TYPE_CONST, { .i64 = 4}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
{ "ve", "ve", 0, AV_OPT_TYPE_CONST, { .i64 = 5}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
{ "compute", "compute", 0, AV_OPT_TYPE_CONST, { .i64 = 3}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
{ "vd", "vd", 0, AV_OPT_TYPE_CONST, { .i64 = 4}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
{ "ve", "ve", 0, AV_OPT_TYPE_CONST, { .i64 = 5}, INT_MIN, INT_MAX, FLAGS, .unit = "scale mode"},
-#endif
{ "rate", "Generate output at frame rate or field rate, available only for deinterlace mode",
OFFSET(field_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "rate" },
@@ -708,6 +743,7 @@ static const AVOption vpp_options[] = {
{ "field", "Output at field rate (one frame of output for each field)",
0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "rate" },
OFFSET(field_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, .unit = "rate" },
@@ -923,8 +953,9 @@ static const AVOption vpp_options[] = {
{ "out_color_transfer", "Output color transfer characteristics",
OFFSET(color_transfer_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
- {"tonemap", "Perform tonemapping (0=disable tonemapping, 1=perform tonemapping if the input has HDR metadata)", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, .flags = FLAGS},
+ { "tonemap", "Perform tonemapping (0=disable tonemapping, 1=perform tonemapping if the input has HDR metadata)", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, .flags = FLAGS },
+ { "passthrough", "Apply pass through mode if possible.", OFFSET(has_passthrough), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, .flags = FLAGS },
{ NULL }
};
@@ -752,19 +788,14 @@ static const AVOption qsvscale_options[]
@@ -978,19 +1009,14 @@ static const AVOption qsvscale_options[]
{ "h", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
{ "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
-#if QSV_ONEVPL
- { "mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 5, FLAGS, "mode"},
- { "mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 5, FLAGS, .unit = "mode"},
-#else
- { "mode", "scaling & format conversion mode", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
- { "mode", "scaling & format conversion mode", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, .unit = "mode"},
-#endif
+ { "mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 5, FLAGS, "mode"},
{ "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, "mode"},
+ { "mode", "scaling & format conversion mode (mode compute(3), vd(4) and ve(5) are only available on some platforms)", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = -1}, -1, 5, FLAGS, .unit = "mode"},
{ "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"},
{ "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"},
-#if QSV_ONEVPL
{ "compute", "compute", 0, AV_OPT_TYPE_CONST, { .i64 = 3}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "vd", "vd", 0, AV_OPT_TYPE_CONST, { .i64 = 4}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "ve", "ve", 0, AV_OPT_TYPE_CONST, { .i64 = 5}, INT_MIN, INT_MAX, FLAGS, "mode"},
{ "compute", "compute", 0, AV_OPT_TYPE_CONST, { .i64 = 3}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"},
{ "vd", "vd", 0, AV_OPT_TYPE_CONST, { .i64 = 4}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"},
{ "ve", "ve", 0, AV_OPT_TYPE_CONST, { .i64 = 5}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"},
-#endif
+ { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, .flags = FLAGS },
{ NULL },
};
@@ -789,6 +820,7 @@ static const AVOption qsvdeint_options[]
{ "bob", "bob algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_BOB}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, "mode"},
{ "advanced", "Motion adaptive algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_ADVANCED}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, "mode"},
@@ -1015,6 +1041,7 @@ static const AVOption qsvdeint_options[]
{ "bob", "bob algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_BOB}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, .unit = "mode"},
{ "advanced", "Motion adaptive algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_ADVANCED}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, .unit = "mode"},
+ { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, .flags = FLAGS },
{ NULL },

View File

@ -0,0 +1,184 @@
Index: FFmpeg/libavcodec/qsv_internal.h
===================================================================
--- FFmpeg.orig/libavcodec/qsv_internal.h
+++ FFmpeg/libavcodec/qsv_internal.h
@@ -100,6 +100,7 @@ typedef struct QSVFrame {
int queued;
int used;
+ int external_frame;
struct QSVFrame *next;
} QSVFrame;
Index: FFmpeg/libavcodec/qsvenc.c
===================================================================
--- FFmpeg.orig/libavcodec/qsvenc.c
+++ FFmpeg/libavcodec/qsvenc.c
@@ -1900,6 +1900,10 @@ static void clear_unused_frames(QSVEncCo
memset(&cur->enc_ctrl, 0, sizeof(cur->enc_ctrl));
cur->enc_ctrl.Payload = cur->payloads;
cur->enc_ctrl.ExtParam = cur->extparam;
+ if (cur->external_frame) {
+ av_freep(&cur->surface.Data.MemId);
+ cur->external_frame = 0;
+ }
if (cur->frame->format == AV_PIX_FMT_QSV) {
av_frame_unref(cur->frame);
}
@@ -2071,6 +2075,16 @@ static int submit_frame(QSVEncContext *q
return ret;
if (frame->format == AV_PIX_FMT_QSV) {
+ AVHWFramesContext *frames_ctx = NULL;
+ AVQSVFramesContext *frames_hwctx = NULL;
+ int is_fixed_pool = 0;
+
+ if (q->avctx->hw_frames_ctx) {
+ frames_ctx = (AVHWFramesContext *)q->avctx->hw_frames_ctx->data;
+ frames_hwctx = frames_ctx->hwctx;
+ is_fixed_pool = frames_hwctx->nb_surfaces > 0;
+ }
+
ret = av_frame_ref(qf->frame, frame);
if (ret < 0)
return ret;
@@ -2079,10 +2093,19 @@ static int submit_frame(QSVEncContext *q
if (q->frames_ctx.mids) {
ret = ff_qsv_find_surface_idx(&q->frames_ctx, qf);
- if (ret < 0)
+ if (ret < 0 && !is_fixed_pool)
return ret;
-
- qf->surface.Data.MemId = &q->frames_ctx.mids[ret];
+ if (ret >= 0)
+ qf->surface.Data.MemId = &q->frames_ctx.mids[ret];
+ }
+ if (is_fixed_pool && (!q->frames_ctx.mids || ret < 0)) {
+ QSVMid *mid = NULL;
+ mid = (QSVMid *)av_mallocz(sizeof(*mid));
+ if (!mid)
+ return AVERROR(ENOMEM);
+ mid->handle_pair = (mfxHDLPair *)qf->surface.Data.MemId;
+ qf->surface.Data.MemId = mid;
+ qf->external_frame = 1;
}
} else {
/* make a copy if the input is not padded as libmfx requires */
Index: FFmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_qsv.c
+++ FFmpeg/libavutil/hwcontext_qsv.c
@@ -2142,11 +2142,25 @@ static int qsv_frames_derive_to(AVHWFram
}
}
+#if CONFIG_VAAPI
+static void qsv_fixed_pool_unmap_from_vaapi(AVHWFramesContext *dst_fc,
+ HWMapDescriptor *hwmap)
+{
+ mfxFrameSurface1 *new_sur = (mfxFrameSurface1 *)hwmap->priv;
+ mfxHDLPair *hdlpair = (mfxHDLPair *)new_sur->Data.MemId;
+ av_freep(&hdlpair->first);
+ av_freep(&new_sur->Data.MemId);
+ av_freep(&new_sur);
+}
+#endif
+
static int qsv_fixed_pool_map_to(AVHWFramesContext *dst_ctx,
AVFrame *dst, const AVFrame *src, int flags)
{
AVQSVFramesContext *hwctx = dst_ctx->hwctx;
int i, err, index = -1;
+ mfxFrameSurface1 *new_sur = NULL;
+ mfxHDLPair *new_hdlpair = NULL;
for (i = 0; i < hwctx->nb_surfaces && index < 0; i++) {
switch(src->format) {
@@ -2185,21 +2199,77 @@ static int qsv_fixed_pool_map_to(AVHWFra
}
}
if (index < 0) {
- av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
- "is not in the mapped frames context.\n");
- return AVERROR(EINVAL);
- }
+ switch (src->format) {
+#if CONFIG_VAAPI
+ case AV_PIX_FMT_VAAPI:
+ {
+ new_sur = (mfxFrameSurface1 *)av_mallocz(sizeof(*new_sur));
+ if (!new_sur) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ err = qsv_init_surface(dst_ctx, new_sur);
+ if (err < 0)
+ goto qsv_map_to_err;
+
+ new_hdlpair = (mfxHDLPair *)av_mallocz(sizeof(*new_hdlpair));
+ if (!new_hdlpair) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ new_hdlpair->first = (VASurfaceID *)av_mallocz(sizeof(VASurfaceID));
+ if (!new_hdlpair->first) {
+ err = AVERROR(ENOMEM);
+ goto qsv_map_to_err;
+ }
+ *(VASurfaceID*)(new_hdlpair->first) = (VASurfaceID)(uintptr_t)src->data[3];
+ new_sur->Data.MemId = new_hdlpair;
- err = ff_hwframe_map_create(dst->hw_frames_ctx,
- dst, src, NULL, NULL);
- if (err)
- return err;
+ err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
+ &qsv_fixed_pool_unmap_from_vaapi,
+ (void*)new_sur);
+ if (err)
+ goto qsv_map_to_err;
+
+ av_log(dst_ctx, AV_LOG_DEBUG, "Trying to map from a surface which "
+ "is not in the mapped frames context, so create a new surface\n");
+ }
+ break;
+#endif
+#if CONFIG_DXVA2
+ case AV_PIX_FMT_DXVA2_VLD:
+ {
+ av_log(dst_ctx, AV_LOG_ERROR, "Trying to map from a surface which "
+ "is not in the mapped frames context.\n");
+ return AVERROR(EINVAL);
+ }
+ break;
+#endif
+ default:
+ return AVERROR(ENOSYS);
+ }
+ } else {
+ err = ff_hwframe_map_create(dst->hw_frames_ctx,
+ dst, src, NULL, NULL);
+ if (err)
+ goto qsv_map_to_err;
+ }
dst->width = src->width;
dst->height = src->height;
- dst->data[3] = (uint8_t*)&hwctx->surfaces[index];
+ dst->data[3] = (uint8_t*)((index == -1) ? new_sur : &hwctx->surfaces[index]);
return 0;
+
+qsv_map_to_err:
+ if (new_sur)
+ av_freep(&new_sur);
+ if (new_hdlpair) {
+ if (new_hdlpair->first)
+ av_freep(&new_hdlpair->first);
+ av_freep(&new_hdlpair);
+ }
+ return err;
}
static void qsv_dynamic_pool_unmap(AVHWFramesContext *ctx, HWMapDescriptor *hwmap)

View File

@ -1,16 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/buffersrc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/buffersrc.c
+++ jellyfin-ffmpeg/libavfilter/buffersrc.c
@@ -65,9 +65,9 @@ typedef struct BufferSourceContext {
#define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format, pts)\
if (c->w != width || c->h != height || c->pix_fmt != format) {\
- av_log(s, AV_LOG_INFO, "filter context - w: %d h: %d fmt: %d, incoming frame - w: %d h: %d fmt: %d pts_time: %s\n",\
+ av_log(s, AV_LOG_DEBUG, "filter context - w: %d h: %d fmt: %d, incoming frame - w: %d h: %d fmt: %d pts_time: %s\n",\
c->w, c->h, c->pix_fmt, width, height, format, av_ts2timestr(pts, &s->outputs[0]->time_base));\
- av_log(s, AV_LOG_WARNING, "Changing video frame properties on the fly is not supported by all filters.\n");\
+ av_log(s, AV_LOG_DEBUG, "Changing video frame properties on the fly is not supported by all filters.\n");\
}
#define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, layout, format, pts)\

View File

@ -1,28 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/hevc_mp4toannexb_bsf.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/hevc_mp4toannexb_bsf.c
+++ jellyfin-ffmpeg/libavcodec/hevc_mp4toannexb_bsf.c
@@ -121,7 +121,7 @@ static int hevc_mp4toannexb_filter(AVBSF
HEVCBSFContext *s = ctx->priv_data;
AVPacket *in;
GetByteContext gb;
-
+ int has_sps = 0, has_pps = 0;
int got_irap = 0;
int i, ret = 0;
@@ -155,10 +155,13 @@ static int hevc_mp4toannexb_filter(AVBSF
}
nalu_type = (bytestream2_peek_byte(&gb) >> 1) & 0x3f;
+ has_sps = (has_sps || nalu_type == HEVC_NAL_SPS);
+ has_pps = (has_pps || nalu_type == HEVC_NAL_PPS);
/* prepend extradata to IRAP frames */
is_irap = nalu_type >= 16 && nalu_type <= 23;
- add_extradata = is_irap && !got_irap;
+ /* ignore the extradata if IRAP frame has sps and pps */
+ add_extradata = is_irap && !got_irap && !(has_sps && has_pps);
extra_size = add_extradata * ctx->par_out->extradata_size;
got_irap |= is_irap;

View File

@ -0,0 +1,13 @@
Index: FFmpeg/libavfilter/buffersrc.c
===================================================================
--- FFmpeg.orig/libavfilter/buffersrc.c
+++ FFmpeg/libavfilter/buffersrc.c
@@ -75,7 +75,7 @@ typedef struct BufferSourceContext {
c->prev_delta = c->prev_w != width || c->prev_h != height || c->prev_pix_fmt != format ||\
c->prev_color_space != csp || c->prev_color_range != range;\
if (c->link_delta) {\
- int loglevel = c->prev_delta ? AV_LOG_WARNING : AV_LOG_DEBUG;\
+ int loglevel = c->prev_delta ? AV_LOG_VERBOSE : AV_LOG_DEBUG;\
av_log(s, loglevel, "Changing video frame properties on the fly is not supported by all filters.\n");\
av_log(s, loglevel, "filter context - w: %d h: %d fmt: %d csp: %s range: %s, incoming frame - w: %d h: %d fmt: %d csp: %s range: %s pts_time: %s\n",\
c->w, c->h, c->pix_fmt, av_color_space_name(c->color_space), av_color_range_name(c->color_range),\

View File

@ -0,0 +1,41 @@
Index: FFmpeg/libavcodec/bsf/hevc_mp4toannexb.c
===================================================================
--- FFmpeg.orig/libavcodec/bsf/hevc_mp4toannexb.c
+++ FFmpeg/libavcodec/bsf/hevc_mp4toannexb.c
@@ -126,6 +126,7 @@ static int hevc_mp4toannexb_filter(AVBSF
int got_irap = 0;
int i, ret = 0;
+ int has_sps = 0, has_pps = 0;
ret = ff_bsf_get_packet(ctx, &in);
if (ret < 0)
@@ -157,11 +158,14 @@ static int hevc_mp4toannexb_filter(AVBSF
}
nalu_type = (bytestream2_peek_byte(&gb) >> 1) & 0x3f;
+ has_sps = (has_sps || nalu_type == HEVC_NAL_SPS);
+ has_pps = (has_pps || nalu_type == HEVC_NAL_PPS);
/* prepend extradata to IRAP frames */
is_irap = nalu_type >= HEVC_NAL_BLA_W_LP &&
nalu_type <= HEVC_NAL_RSV_IRAP_VCL23;
- add_extradata = is_irap && !got_irap;
+ /* ignore the extradata if IRAP frame has sps and pps */
+ add_extradata = is_irap && !got_irap && !(has_sps && has_pps);
extra_size = add_extradata * ctx->par_out->extradata_size;
got_irap |= is_irap;
Index: FFmpeg/tests/fate/hevc.mak
===================================================================
--- FFmpeg.orig/tests/fate/hevc.mak
+++ FFmpeg/tests/fate/hevc.mak
@@ -222,7 +222,7 @@ FATE_HEVC-$(call ALLYES, HEVC_DEMUXER MO
fate-hevc-bsf-mp4toannexb: tests/data/hevc-mp4.mov
fate-hevc-bsf-mp4toannexb: CMD = md5 -i $(TARGET_PATH)/tests/data/hevc-mp4.mov -c:v copy -fflags +bitexact -f hevc
fate-hevc-bsf-mp4toannexb: CMP = oneline
-fate-hevc-bsf-mp4toannexb: REF = 73019329ed7f81c24f9af67c34c640c0
+fate-hevc-bsf-mp4toannexb: REF = 7d05a79c7a6665ae22c0043a4d83a811
fate-hevc-skiploopfilter: CMD = framemd5 -skip_loop_filter nokey -i $(TARGET_SAMPLES)/hevc-conformance/SAO_D_Samsung_5.bit -sws_flags bitexact
FATE_HEVC-$(call FRAMEMD5, HEVC, HEVC, HEVC_PARSER) += fate-hevc-skiploopfilter

View File

@ -1,36 +1,32 @@
Index: jellyfin-ffmpeg/libavfilter/vf_subtitles.c
Index: FFmpeg/libavfilter/vf_subtitles.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_subtitles.c
+++ jellyfin-ffmpeg/libavfilter/vf_subtitles.c
@@ -56,10 +56,13 @@ typedef struct AssContext {
char *force_style;
int stream_index;
int alpha;
--- FFmpeg.orig/libavfilter/vf_subtitles.c
+++ FFmpeg/libavfilter/vf_subtitles.c
@@ -65,6 +65,9 @@ typedef struct AssContext {
int shaping;
FFDrawContext draw;
int wrap_unicode;
+ int sub2video;
+ int last_image;
uint8_t rgba_map[4];
int pix_step[4]; ///< steps per pixel for each plane of the main output
int original_w, original_h;
int shaping;
+ int64_t max_pts, max_ts_ms;
FFDrawContext draw;
} AssContext;
@@ -71,7 +74,12 @@ typedef struct AssContext {
#define OFFSET(x) offsetof(AssContext, x)
@@ -75,7 +78,12 @@ typedef struct AssContext {
{"f", "set the filename of file to read", OFFSET(filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, \
{"original_size", "set the size of the original video (used to scale fonts)", OFFSET(original_w), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, FLAGS }, \
{"fontsdir", "set the directory containing the fonts to read", OFFSET(fontsdir), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, \
- {"alpha", "enable processing of alpha channel", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FLAGS }, \
+ {"alpha", "enable processing of alpha channel", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FLAGS }, \
+ {"sub2video", "enable textual subtitle to video mode", OFFSET(sub2video), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FLAGS }, \
+ {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, {.i64 = ASS_SHAPING_COMPLEX }, -1, 1, FLAGS, "shaping_mode"}, \
+ {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"}, \
+ {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"}, \
+ {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"}, \
+ {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, {.i64 = ASS_SHAPING_COMPLEX }, -1, 1, FLAGS, .unit = "shaping_mode"}, \
+ {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"}, \
+ {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"}, \
+ {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"}, \
/* libass supports a log level ranging from 0 to 7 */
static const int ass_libavfilter_log_level_map[] = {
@@ -157,6 +165,8 @@ static int config_input(AVFilterLink *in
@@ -162,6 +170,8 @@ static int config_input(AVFilterLink *in
if (ass->shaping != -1)
ass_set_shaper(ass->renderer, ass->shaping);
@ -39,7 +35,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_subtitles.c
return 0;
}
@@ -187,18 +197,41 @@ static int filter_frame(AVFilterLink *in
@@ -192,18 +202,41 @@ static int filter_frame(AVFilterLink *in
AVFilterLink *outlink = ctx->outputs[0];
AssContext *ass = ctx->priv;
int detect_change = 0;
@ -83,18 +79,18 @@ Index: jellyfin-ffmpeg/libavfilter/vf_subtitles.c
static const AVFilterPad ass_inputs[] = {
{
.name = "default",
@@ -220,10 +253,6 @@ static const AVFilterPad ass_outputs[] =
@@ -218,10 +251,6 @@ static const AVFilterPad ass_inputs[] =
static const AVOption ass_options[] = {
COMMON_OPTIONS
- {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, "shaping_mode"},
- {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
- {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
- {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, "shaping_mode"},
- {"shaping", "set shaping engine", OFFSET(shaping), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, FLAGS, .unit = "shaping_mode"},
- {"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"},
- {"simple", "simple shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_SIMPLE}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"},
- {"complex", "complex shaping", 0, AV_OPT_TYPE_CONST, {.i64 = ASS_SHAPING_COMPLEX}, INT_MIN, INT_MAX, FLAGS, .unit = "shaping_mode"},
{NULL},
};
@@ -247,6 +276,9 @@ static av_cold int init_ass(AVFilterCont
@@ -245,6 +274,9 @@ static av_cold int init_ass(AVFilterCont
ass->filename);
return AVERROR(EINVAL);
}
@ -104,7 +100,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_subtitles.c
return 0;
}
@@ -268,8 +300,8 @@ const AVFilter ff_vf_ass = {
@@ -266,8 +298,8 @@ const AVFilter ff_vf_ass = {
static const AVOption subtitles_options[] = {
COMMON_OPTIONS
{"charenc", "set input character encoding", OFFSET(charenc), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS},
@ -113,9 +109,9 @@ Index: jellyfin-ffmpeg/libavfilter/vf_subtitles.c
+ {"stream_index", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
+ {"si", "set stream index", OFFSET(stream_index), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS},
{"force_style", "force subtitle style", OFFSET(force_style), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS},
{NULL},
};
@@ -483,6 +515,8 @@ static av_cold int init_subtitles(AVFilt
#if FF_ASS_FEATURE_WRAP_UNICODE
{"wrap_unicode", "break lines according to the Unicode Line Breaking Algorithm", OFFSET(wrap_unicode), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, FLAGS },
@@ -496,6 +528,8 @@ static av_cold int init_subtitles(AVFilt
avsubtitle_free(&sub);
}

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavfilter/Makefile
Index: FFmpeg/libavfilter/Makefile
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/Makefile
+++ jellyfin-ffmpeg/libavfilter/Makefile
@@ -570,6 +570,7 @@ OBJS-$(CONFIG_XSTACK_QSV_FILTER)
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -588,6 +588,7 @@ OBJS-$(CONFIG_XSTACK_QSV_FILTER)
OBJS-$(CONFIG_ALLRGB_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_ALLYUV_FILTER) += vsrc_testsrc.o
@ -10,22 +10,22 @@ Index: jellyfin-ffmpeg/libavfilter/Makefile
OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_COLORCHART_FILTER) += vsrc_testsrc.o
Index: jellyfin-ffmpeg/libavfilter/allfilters.c
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/allfilters.c
+++ jellyfin-ffmpeg/libavfilter/allfilters.c
@@ -534,6 +534,7 @@ extern const AVFilter ff_vf_xstack_qsv;
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -552,6 +552,7 @@ extern const AVFilter ff_vf_xstack_qsv;
extern const AVFilter ff_vsrc_allrgb;
extern const AVFilter ff_vsrc_allyuv;
+extern const AVFilter ff_vsrc_alphasrc;
extern const AVFilter ff_vsrc_cellauto;
extern const AVFilter ff_vsrc_color;
extern const AVFilter ff_vsrc_colorchart;
Index: jellyfin-ffmpeg/libavfilter/vsrc_alphasrc.c
extern const AVFilter ff_vsrc_color_vulkan;
Index: FFmpeg/libavfilter/vsrc_alphasrc.c
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/vsrc_alphasrc.c
+++ FFmpeg/libavfilter/vsrc_alphasrc.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2021 NyanMisaka
@ -147,7 +147,7 @@ Index: jellyfin-ffmpeg/libavfilter/vsrc_alphasrc.c
+ if (s->rgb)
+ memset(out->buf[i]->data, 0, out->buf[i]->size);
+ else if (s->planar)
+ memset(out->buf[i]->data, (i == 1 || i == 2) ? 128 : 0, out->buf[i]->size);
+ memset(out->buf[i]->data, (i == 0) ? 16 : ((i == 1 || i == 2) ? 128 : 0), out->buf[i]->size);
+ }
+ }
+

View File

@ -1,199 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsv.c
+++ jellyfin-ffmpeg/libavcodec/qsv.c
@@ -834,7 +834,7 @@ static mfxStatus qsv_frame_alloc(mfxHDL
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
mfxFrameInfo *i = &req->Info;
- mfxFrameInfo *i1 = &frames_hwctx->surfaces[0].Info;
+ mfxFrameInfo *i1 = &frames_hwctx->reserve_surface.Info;
if (i->Width > i1->Width || i->Height > i1->Height ||
i->FourCC != i1->FourCC || i->ChromaFormat != i1->ChromaFormat) {
@@ -949,7 +949,7 @@ static mfxStatus qsv_frame_lock(mfxHDL p
if (!qsv_mid->hw_frame->hw_frames_ctx)
goto fail;
- qsv_mid->surf.Info = hw_frames_hwctx->surfaces[0].Info;
+ qsv_mid->surf.Info = hw_frames_hwctx->reserve_surface.Info;
qsv_mid->surf.Data.MemId = qsv_mid->handle_pair;
/* map the data to the system memory */
Index: jellyfin-ffmpeg/libavcodec/qsvenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc.c
@@ -721,8 +721,8 @@ static int init_video_param_jpeg(AVCodec
if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext *)avctx->hw_frames_ctx->data;
AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
- q->param.mfx.FrameInfo.Width = frames_hwctx->surfaces[0].Info.Width;
- q->param.mfx.FrameInfo.Height = frames_hwctx->surfaces[0].Info.Height;
+ q->param.mfx.FrameInfo.Width = frames_hwctx->reserve_surface.Info.Width;
+ q->param.mfx.FrameInfo.Height = frames_hwctx->reserve_surface.Info.Height;
}
if (avctx->framerate.den > 0 && avctx->framerate.num > 0) {
@@ -845,8 +845,8 @@ static int init_video_param(AVCodecConte
if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
- q->param.mfx.FrameInfo.Width = frames_hwctx->surfaces[0].Info.Width;
- q->param.mfx.FrameInfo.Height = frames_hwctx->surfaces[0].Info.Height;
+ q->param.mfx.FrameInfo.Width = frames_hwctx->reserve_surface.Info.Width;
+ q->param.mfx.FrameInfo.Height = frames_hwctx->reserve_surface.Info.Height;
}
if (avctx->framerate.den > 0 && avctx->framerate.num > 0) {
Index: jellyfin-ffmpeg/libavfilter/qsvvpp.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/qsvvpp.c
+++ jellyfin-ffmpeg/libavfilter/qsvvpp.c
@@ -307,7 +307,7 @@ static int fill_frameinfo_by_link(mfxFra
frames_ctx = (AVHWFramesContext *)link->hw_frames_ctx->data;
frames_hwctx = frames_ctx->hwctx;
- *frameinfo = frames_hwctx->surfaces[0].Info;
+ *frameinfo = frames_hwctx->reserve_surface.Info;
} else {
pix_fmt = link->format;
desc = av_pix_fmt_desc_get(pix_fmt);
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -611,7 +611,7 @@ static mfxStatus frame_alloc(mfxHDL pthi
QSVFramesContext *s = ctx->internal->priv;
AVQSVFramesContext *hwctx = ctx->hwctx;
mfxFrameInfo *i = &req->Info;
- mfxFrameInfo *i1 = &hwctx->surfaces[0].Info;
+ mfxFrameInfo *i1 = &hwctx->reserve_surface.Info;
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
!(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
@@ -1159,7 +1159,7 @@ static int qsv_init_internal_session(AVH
MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
par.AsyncDepth = 1;
- par.vpp.In = frames_hwctx->surfaces[0].Info;
+ par.vpp.In = frames_hwctx->reserve_surface.Info;
/* Apparently VPP requires the frame rate to be set to some value, otherwise
* init will fail (probably for the framerate conversion filter). Since we
@@ -1248,6 +1248,14 @@ static int qsv_frames_init(AVHWFramesCon
}
#endif
+ ret = qsv_init_surface(ctx, &frames_hwctx->reserve_surface);
+ if (ret < 0) {
+#if QSV_HAVE_OPAQUE
+ av_freep(&s->surface_ptrs);
+#endif
+ return ret;
+ }
+
s->session_download = NULL;
s->session_upload = NULL;
@@ -1776,13 +1784,7 @@ static int qsv_frames_derive_to(AVHWFram
{
QSVFramesContext *s = dst_ctx->internal->priv;
AVQSVFramesContext *dst_hwctx = dst_ctx->hwctx;
- int i;
-
- if (src_ctx->initial_pool_size == 0) {
- av_log(dst_ctx, AV_LOG_ERROR, "Only fixed-size pools can be "
- "mapped to QSV frames.\n");
- return AVERROR(EINVAL);
- }
+ int i, ret;
switch (src_ctx->device_ctx->type) {
#if CONFIG_VAAPI
@@ -1798,11 +1800,20 @@ static int qsv_frames_derive_to(AVHWFram
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
- qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ ret = qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
s->handle_pairs_internal[i].first = src_hwctx->surface_ids + i;
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
+ ret = qsv_init_surface(dst_ctx, &dst_hwctx->reserve_surface);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
@@ -1821,7 +1832,11 @@ static int qsv_frames_derive_to(AVHWFram
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_ctx->initial_pool_size; i++) {
- qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ ret = qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->texture_infos[i].texture;
if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
@@ -1830,6 +1845,11 @@ static int qsv_frames_derive_to(AVHWFram
}
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
+ ret = qsv_init_surface(dst_ctx, &dst_hwctx->reserve_surface);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
dst_hwctx->nb_surfaces = src_ctx->initial_pool_size;
if (src_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
@@ -1852,11 +1872,20 @@ static int qsv_frames_derive_to(AVHWFram
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
for (i = 0; i < src_hwctx->nb_surfaces; i++) {
- qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ ret = qsv_init_surface(dst_ctx, &s->surfaces_internal[i]);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
s->handle_pairs_internal[i].first = (mfxMemId)src_hwctx->surfaces[i];
s->handle_pairs_internal[i].second = (mfxMemId)MFX_INFINITE;
s->surfaces_internal[i].Data.MemId = (mfxMemId)&s->handle_pairs_internal[i];
}
+ ret = qsv_init_surface(dst_ctx, &dst_hwctx->reserve_surface);
+ if (ret < 0) {
+ av_freep(&s->surfaces_internal);
+ return ret;
+ }
dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces;
if (src_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget)
dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET;
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.h
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.h
@@ -54,6 +54,12 @@ typedef struct AVQSVFramesContext {
mfxFrameSurface1 *surfaces;
int nb_surfaces;
+ /*
+ * This surface store the surface information,
+ * and can be used to init dec, enc and vpp.
+ */
+ mfxFrameSurface1 reserve_surface;
+
/**
* A combination of MFX_MEMTYPE_* describing the frame pool.
*/

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavcodec/vaapi_encode.h
Index: FFmpeg/libavcodec/vaapi_encode.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/vaapi_encode.h
+++ jellyfin-ffmpeg/libavcodec/vaapi_encode.h
@@ -486,7 +486,7 @@ int ff_vaapi_encode_close(AVCodecContext
--- FFmpeg.orig/libavcodec/vaapi_encode.h
+++ FFmpeg/libavcodec/vaapi_encode.h
@@ -520,7 +520,7 @@ int ff_vaapi_encode_close(AVCodecContext
"Increase this to improve single channel performance. This option " \
"doesn't work if driver doesn't implement vaSyncBuffer function.", \
OFFSET(common.async_depth), AV_OPT_TYPE_INT, \

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavcodec/vaapi_encode_h265.c
Index: FFmpeg/libavcodec/vaapi_encode_h265.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/vaapi_encode_h265.c
+++ jellyfin-ffmpeg/libavcodec/vaapi_encode_h265.c
--- FFmpeg.orig/libavcodec/vaapi_encode_h265.c
+++ FFmpeg/libavcodec/vaapi_encode_h265.c
@@ -452,8 +452,9 @@ static int vaapi_encode_h265_init_sequen
sps->log2_min_luma_transform_block_size_minus2 = 0;
sps->log2_diff_max_min_luma_transform_block_size = 3;

View File

@ -1,18 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/vulkan.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/vulkan.h
+++ jellyfin-ffmpeg/libavutil/vulkan.h
@@ -26,9 +26,10 @@
#include "hwcontext_vulkan.h"
#include "vulkan_loader.h"
-#define FF_VK_DEFAULT_USAGE_FLAGS (VK_IMAGE_USAGE_SAMPLED_BIT | \
- VK_IMAGE_USAGE_STORAGE_BIT | \
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT | \
+#define FF_VK_DEFAULT_USAGE_FLAGS (VK_IMAGE_USAGE_SAMPLED_BIT | \
+ VK_IMAGE_USAGE_STORAGE_BIT | \
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | \
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | \
VK_IMAGE_USAGE_TRANSFER_DST_BIT)
/* GLSL management macros */

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavformat/id3v2.c
Index: FFmpeg/libavformat/id3v2.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/id3v2.c
+++ jellyfin-ffmpeg/libavformat/id3v2.c
--- FFmpeg.orig/libavformat/id3v2.c
+++ FFmpeg/libavformat/id3v2.c
@@ -136,6 +136,7 @@ const CodecMime ff_id3v2_mime_tags[] = {
{ "image/png", AV_CODEC_ID_PNG },
{ "image/tiff", AV_CODEC_ID_TIFF },
@ -10,7 +10,7 @@ Index: jellyfin-ffmpeg/libavformat/id3v2.c
{ "JPG", AV_CODEC_ID_MJPEG }, /* ID3v2.2 */
{ "PNG", AV_CODEC_ID_PNG }, /* ID3v2.2 */
{ "", AV_CODEC_ID_NONE },
@@ -321,39 +322,54 @@ static void read_ttag(AVFormatContext *s
@@ -325,39 +326,54 @@ static void read_ttag(AVFormatContext *s
AVDictionary **metadata, const char *key)
{
uint8_t *dst;

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/configure
Index: FFmpeg/configure
===================================================================
--- jellyfin-ffmpeg.orig/configure
+++ jellyfin-ffmpeg/configure
@@ -1774,7 +1774,6 @@ EXTERNAL_LIBRARY_GPL_LIST="
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -1872,7 +1872,6 @@ EXTERNAL_LIBRARY_GPL_LIST="
EXTERNAL_LIBRARY_NONFREE_LIST="
decklink
@ -10,10 +10,10 @@ Index: jellyfin-ffmpeg/configure
libtls
"
@@ -1814,6 +1813,7 @@ EXTERNAL_LIBRARY_LIST="
@@ -1912,6 +1911,7 @@ EXTERNAL_LIBRARY_LIST="
libcodec2
libdav1d
libdc1394
libdrm
+ libfdk_aac
libflite
libfontconfig

View File

@ -0,0 +1,137 @@
Index: FFmpeg/libavformat/hlsenc.c
===================================================================
--- FFmpeg.orig/libavformat/hlsenc.c
+++ FFmpeg/libavformat/hlsenc.c
@@ -850,7 +850,7 @@ static int hls_mux_init(AVFormatContext
AVFormatContext *vtt_oc = NULL;
int byterange_mode = (hls->flags & HLS_SINGLE_FILE) || (hls->max_seg_size > 0);
int remaining_options;
- int i, ret;
+ int i, j, ret;
ret = avformat_alloc_output_context2(&vs->avf, vs->oformat, NULL, NULL);
if (ret < 0)
@@ -896,6 +896,20 @@ static int hls_mux_init(AVFormatContext
st->codecpar->codec_tag = 0;
}
+ // copy side data
+ for (j = 0; j < vs->streams[i]->codecpar->nb_coded_side_data; j++) {
+ const AVPacketSideData *sd_src = &vs->streams[i]->codecpar->coded_side_data[j];
+ AVPacketSideData *sd_dst;
+
+ sd_dst = av_packet_side_data_new(&st->codecpar->coded_side_data,
+ &st->codecpar->nb_coded_side_data,
+ sd_src->type, sd_src->size, 0);
+ if (!sd_dst)
+ return AVERROR(ENOMEM);
+
+ memcpy(sd_dst->data, sd_src->data, sd_src->size);
+ }
+
st->sample_aspect_ratio = vs->streams[i]->sample_aspect_ratio;
st->time_base = vs->streams[i]->time_base;
av_dict_copy(&st->metadata, vs->streams[i]->metadata, 0);
Index: FFmpeg/libavformat/movenc.c
===================================================================
--- FFmpeg.orig/libavformat/movenc.c
+++ FFmpeg/libavformat/movenc.c
@@ -8124,6 +8124,7 @@ static const AVCodecTag codec_mp4_tags[]
{ AV_CODEC_ID_HEVC, MKTAG('h', 'e', 'v', '1') },
{ AV_CODEC_ID_HEVC, MKTAG('h', 'v', 'c', '1') },
{ AV_CODEC_ID_HEVC, MKTAG('d', 'v', 'h', '1') },
+ { AV_CODEC_ID_HEVC, MKTAG('d', 'v', 'h', 'e') },
{ AV_CODEC_ID_VVC, MKTAG('v', 'v', 'c', '1') },
{ AV_CODEC_ID_VVC, MKTAG('v', 'v', 'i', '1') },
{ AV_CODEC_ID_EVC, MKTAG('e', 'v', 'c', '1') },
@@ -8137,6 +8138,7 @@ static const AVCodecTag codec_mp4_tags[]
{ AV_CODEC_ID_TSCC2, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_VP9, MKTAG('v', 'p', '0', '9') },
{ AV_CODEC_ID_AV1, MKTAG('a', 'v', '0', '1') },
+ { AV_CODEC_ID_AV1, MKTAG('d', 'a', 'v', '1') },
{ AV_CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') },
{ AV_CODEC_ID_ALAC, MKTAG('a', 'l', 'a', 'c') },
{ AV_CODEC_ID_MP4ALS, MKTAG('m', 'p', '4', 'a') },
Index: FFmpeg/libavformat/mpegtsenc.c
===================================================================
--- FFmpeg.orig/libavformat/mpegtsenc.c
+++ FFmpeg/libavformat/mpegtsenc.c
@@ -23,6 +23,7 @@
#include "libavutil/bswap.h"
#include "libavutil/crc.h"
#include "libavutil/dict.h"
+#include "libavutil/dovi_meta.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
@@ -350,6 +351,52 @@ static void put_registration_descriptor(
*q_ptr = q;
}
+static int put_dovi_descriptor(AVFormatContext *s, uint8_t **q_ptr,
+ const AVDOVIDecoderConfigurationRecord *dovi)
+{
+ uint16_t val16;
+ uint8_t *q = *q_ptr;
+
+ if (!dovi)
+ return AVERROR(ENOMEM);
+
+ if (!dovi->bl_present_flag) {
+ av_log(s, AV_LOG_ERROR,
+ "EL only DOVI stream is not supported!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ put_registration_descriptor(&q, MKTAG('D', 'O', 'V', 'I')); // format_identifier
+
+ /* DOVI Video Stream Descriptor Syntax */
+ *q++ = 0xb0; // descriptor_tag
+ *q++ = 0x05; // descriptor_length
+ *q++ = dovi->dv_version_major;
+ *q++ = dovi->dv_version_minor;
+
+ val16 = (dovi->dv_profile & 0x7f) << 9 | // 7 bits
+ (dovi->dv_level & 0x3f) << 3 | // 6 bits
+ (dovi->rpu_present_flag & 0x01) << 2 | // 1 bits
+ (dovi->el_present_flag & 0x01) << 1 | // 1 bits
+ (dovi->bl_present_flag & 0x01); // 1 bits
+ put16(&q, val16);
+
+#if 0
+ // TODO: support dependency_pid (EL only stream)
+ // descriptor_length: 0x05->0x07
+ if (!bl_present_flag) {
+ val16 = (dependency_pid & 0x1fff) << 3; // 13+3 bits
+ put16(&q, val16);
+ }
+#endif
+
+ *q++ = (dovi->dv_bl_signal_compatibility_id & 0x0f) << 4; // 4+4 bits
+
+ *q_ptr = q;
+
+ return 0;
+}
+
static int get_dvb_stream_type(AVFormatContext *s, AVStream *st)
{
MpegTSWrite *ts = s->priv_data;
@@ -803,7 +850,16 @@ static int mpegts_write_pmt(AVFormatCont
} else if (stream_type == STREAM_TYPE_VIDEO_VC1) {
put_registration_descriptor(&q, MKTAG('V', 'C', '-', '1'));
} else if (stream_type == STREAM_TYPE_VIDEO_HEVC && s->strict_std_compliance <= FF_COMPLIANCE_NORMAL) {
- put_registration_descriptor(&q, MKTAG('H', 'E', 'V', 'C'));
+ const AVPacketSideData *sd = av_packet_side_data_get(st->codecpar->coded_side_data,
+ st->codecpar->nb_coded_side_data, AV_PKT_DATA_DOVI_CONF);
+ const AVDOVIDecoderConfigurationRecord *dovi = sd ? (const AVDOVIDecoderConfigurationRecord *)sd->data : NULL;
+
+ if (dovi && dovi->bl_present_flag && s->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+ if (put_dovi_descriptor(s, &q, dovi) < 0)
+ break;
+ } else {
+ put_registration_descriptor(&q, MKTAG('H', 'E', 'V', 'C'));
+ }
} else if (stream_type == STREAM_TYPE_VIDEO_CAVS || stream_type == STREAM_TYPE_VIDEO_AVS2 ||
stream_type == STREAM_TYPE_VIDEO_AVS3) {
put_registration_descriptor(&q, MKTAG('A', 'V', 'S', 'V'));

View File

@ -1,551 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_vulkan.c
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_vulkan.c
@@ -31,6 +31,7 @@ typedef struct OverlayVulkanContext {
FFVkQueueFamilyCtx qf;
FFVkExecContext *exec;
FFVulkanPipeline *pl;
+ FFVulkanPipeline *pl_pass;
FFFrameSync fs;
FFVkBuffer params_buf;
@@ -44,6 +45,10 @@ typedef struct OverlayVulkanContext {
int overlay_y;
int overlay_w;
int overlay_h;
+
+ int opt_repeatlast;
+ int opt_shortest;
+ int opt_eof_action;
} OverlayVulkanContext;
static const char overlay_noalpha[] = {
@@ -80,17 +85,78 @@ static const char overlay_alpha[] = {
static av_cold int init_filter(AVFilterContext *ctx)
{
int err;
- FFVkSampler *sampler;
+ FFVkSPIRVShader *shd;
OverlayVulkanContext *s = ctx->priv;
FFVulkanContext *vkctx = &s->vkctx;
+ FFVkSampler *sampler = ff_vk_init_sampler(vkctx, 1, VK_FILTER_NEAREST);
const int planes = av_pix_fmt_count_planes(s->vkctx.output_format);
- ff_vk_qf_init(vkctx, &s->qf, VK_QUEUE_COMPUTE_BIT, 0);
+ FFVulkanDescriptorSetBinding desc_i[3] = {
+ {
+ .name = "main_img",
+ .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ .dimensions = 2,
+ .elems = planes,
+ .stages = VK_SHADER_STAGE_COMPUTE_BIT,
+ .updater = s->main_images,
+ .sampler = sampler,
+ },
+ {
+ .name = "overlay_img",
+ .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ .dimensions = 2,
+ .elems = planes,
+ .stages = VK_SHADER_STAGE_COMPUTE_BIT,
+ .updater = s->overlay_images,
+ .sampler = sampler,
+ },
+ {
+ .name = "output_img",
+ .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ .mem_layout = ff_vk_shader_rep_fmt(s->vkctx.output_format),
+ .mem_quali = "writeonly",
+ .dimensions = 2,
+ .elems = planes,
+ .stages = VK_SHADER_STAGE_COMPUTE_BIT,
+ .updater = s->output_images,
+ },
+ };
- sampler = ff_vk_init_sampler(vkctx, 1, VK_FILTER_NEAREST);
if (!sampler)
return AVERROR_EXTERNAL;
+ ff_vk_qf_init(vkctx, &s->qf, VK_QUEUE_COMPUTE_BIT, 0);
+
+ s->pl_pass = ff_vk_create_pipeline(vkctx, &s->qf);
+ if (!s->pl_pass)
+ return AVERROR(ENOMEM);
+
+ { /* Create the shader passthrough */
+ shd = ff_vk_init_shader(s->pl_pass, "overlay_compute_passthrough",
+ VK_SHADER_STAGE_COMPUTE_BIT);
+ if (!shd)
+ return AVERROR(ENOMEM);
+
+ ff_vk_set_compute_shader_sizes(shd, CGROUPS);
+
+ RET(ff_vk_add_descriptor_set(vkctx, s->pl_pass, shd, desc_i, FF_ARRAY_ELEMS(desc_i), 0)); /* set 0 */
+
+ GLSLC(0, void main() );
+ GLSLC(0, { );
+ GLSLC(1, ivec2 pos = ivec2(gl_GlobalInvocationID.xy); );
+ GLSLF(1, int planes = %i; ,planes);
+ GLSLC(1, for (int i = 0; i < planes; i++) { );
+ GLSLC(2, vec4 res = texture(main_img[i], pos); );
+ GLSLC(2, imageStore(output_img[i], pos, res); );
+ GLSLC(1, } );
+ GLSLC(0, } );
+
+ RET(ff_vk_compile_shader(vkctx, shd, "main"));
+ }
+
+ RET(ff_vk_init_pipeline_layout(vkctx, s->pl_pass));
+ RET(ff_vk_init_compute_pipeline(vkctx, s->pl_pass));
+
s->pl = ff_vk_create_pipeline(vkctx, &s->qf);
if (!s->pl)
return AVERROR(ENOMEM);
@@ -98,37 +164,6 @@ static av_cold int init_filter(AVFilterC
{ /* Create the shader */
const int ialpha = av_pix_fmt_desc_get(s->vkctx.input_format)->flags & AV_PIX_FMT_FLAG_ALPHA;
- FFVulkanDescriptorSetBinding desc_i[3] = {
- {
- .name = "main_img",
- .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
- .dimensions = 2,
- .elems = planes,
- .stages = VK_SHADER_STAGE_COMPUTE_BIT,
- .updater = s->main_images,
- .sampler = sampler,
- },
- {
- .name = "overlay_img",
- .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
- .dimensions = 2,
- .elems = planes,
- .stages = VK_SHADER_STAGE_COMPUTE_BIT,
- .updater = s->overlay_images,
- .sampler = sampler,
- },
- {
- .name = "output_img",
- .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
- .mem_layout = ff_vk_shader_rep_fmt(s->vkctx.output_format),
- .mem_quali = "writeonly",
- .dimensions = 2,
- .elems = planes,
- .stages = VK_SHADER_STAGE_COMPUTE_BIT,
- .updater = s->output_images,
- },
- };
-
FFVulkanDescriptorSetBinding desc_b = {
.name = "params",
.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
@@ -139,8 +174,8 @@ static av_cold int init_filter(AVFilterC
.buf_content = "ivec2 o_offset[3], o_size[3];",
};
- FFVkSPIRVShader *shd = ff_vk_init_shader(s->pl, "overlay_compute",
- VK_SHADER_STAGE_COMPUTE_BIT);
+ shd = ff_vk_init_shader(s->pl, "overlay_compute",
+ VK_SHADER_STAGE_COMPUTE_BIT);
if (!shd)
return AVERROR(ENOMEM);
@@ -229,7 +264,7 @@ fail:
}
static int process_frames(AVFilterContext *avctx, AVFrame *out_f,
- AVFrame *main_f, AVFrame *overlay_f)
+ AVFrame *main_f, AVFrame *overlay_f, int passthrough)
{
int err;
VkCommandBuffer cmd_buf;
@@ -240,14 +275,20 @@ static int process_frames(AVFilterContex
AVVkFrame *out = (AVVkFrame *)out_f->data[0];
AVVkFrame *main = (AVVkFrame *)main_f->data[0];
- AVVkFrame *overlay = (AVVkFrame *)overlay_f->data[0];
+ AVVkFrame *overlay;
AVHWFramesContext *main_fc = (AVHWFramesContext*)main_f->hw_frames_ctx->data;
- AVHWFramesContext *overlay_fc = (AVHWFramesContext*)overlay_f->hw_frames_ctx->data;
+ AVHWFramesContext *overlay_fc;
const VkFormat *output_formats = av_vkfmt_from_pixfmt(s->vkctx.output_format);
const VkFormat *main_sw_formats = av_vkfmt_from_pixfmt(main_fc->sw_format);
- const VkFormat *overlay_sw_formats = av_vkfmt_from_pixfmt(overlay_fc->sw_format);
+ const VkFormat *overlay_sw_formats;
+
+ if (!passthrough) {
+ overlay = (AVVkFrame *)overlay_f->data[0];
+ overlay_fc = (AVHWFramesContext*)overlay_f->hw_frames_ctx->data;
+ overlay_sw_formats = av_vkfmt_from_pixfmt(overlay_fc->sw_format);
+ }
/* Update descriptors and init the exec context */
ff_vk_start_exec_recording(vkctx, s->exec);
@@ -255,92 +296,128 @@ static int process_frames(AVFilterContex
for (int i = 0; i < planes; i++) {
RET(ff_vk_create_imageview(vkctx, s->exec,
- &s->main_images[i].imageView, main->img[i],
- main_sw_formats[i],
- ff_comp_identity_map));
-
- RET(ff_vk_create_imageview(vkctx, s->exec,
- &s->overlay_images[i].imageView, overlay->img[i],
- overlay_sw_formats[i],
- ff_comp_identity_map));
+ &s->main_images[i].imageView, main->img[i],
+ main_sw_formats[i],
+ ff_comp_identity_map));
RET(ff_vk_create_imageview(vkctx, s->exec,
- &s->output_images[i].imageView, out->img[i],
- output_formats[i],
- ff_comp_identity_map));
+ &s->output_images[i].imageView, out->img[i],
+ output_formats[i],
+ ff_comp_identity_map));
s->main_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
- s->overlay_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
s->output_images[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ if (!passthrough) {
+ RET(ff_vk_create_imageview(vkctx, s->exec,
+ &s->overlay_images[i].imageView, overlay->img[i],
+ overlay_sw_formats[i],
+ ff_comp_identity_map));
+
+ s->overlay_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ }
}
ff_vk_update_descriptor_set(vkctx, s->pl, 0);
+ ff_vk_update_descriptor_set(vkctx, s->pl_pass, 0);
- for (int i = 0; i < planes; i++) {
- VkImageMemoryBarrier bar[3] = {
- {
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .srcAccessMask = 0,
- .dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
- .oldLayout = main->layout[i],
- .newLayout = s->main_images[i].imageLayout,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = main->img[i],
- .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .subresourceRange.levelCount = 1,
- .subresourceRange.layerCount = 1,
- },
- {
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .srcAccessMask = 0,
- .dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
- .oldLayout = overlay->layout[i],
- .newLayout = s->overlay_images[i].imageLayout,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = overlay->img[i],
- .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .subresourceRange.levelCount = 1,
- .subresourceRange.layerCount = 1,
- },
- {
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .srcAccessMask = 0,
- .dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
- .oldLayout = out->layout[i],
- .newLayout = s->output_images[i].imageLayout,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = out->img[i],
- .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .subresourceRange.levelCount = 1,
- .subresourceRange.layerCount = 1,
- },
- };
-
- vk->CmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0,
- 0, NULL, 0, NULL, FF_ARRAY_ELEMS(bar), bar);
+#define MAIN_BARRIER \
+ { \
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, \
+ .srcAccessMask = 0, \
+ .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, \
+ .oldLayout = main->layout[i], \
+ .newLayout = s->main_images[i].imageLayout, \
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .image = main->img[i], \
+ .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \
+ .subresourceRange.levelCount = 1, \
+ .subresourceRange.layerCount = 1, \
+ }
- main->layout[i] = bar[0].newLayout;
- main->access[i] = bar[0].dstAccessMask;
+#define OVERLAY_BARRIER \
+ { \
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, \
+ .srcAccessMask = 0, \
+ .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, \
+ .oldLayout = overlay->layout[i], \
+ .newLayout = s->overlay_images[i].imageLayout, \
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .image = overlay->img[i], \
+ .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \
+ .subresourceRange.levelCount = 1, \
+ .subresourceRange.layerCount = 1, \
+ }
- overlay->layout[i] = bar[1].newLayout;
- overlay->access[i] = bar[1].dstAccessMask;
+#define OUT_BARRIER \
+ { \
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, \
+ .srcAccessMask = 0, \
+ .dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT, \
+ .oldLayout = out->layout[i], \
+ .newLayout = s->output_images[i].imageLayout, \
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, \
+ .image = out->img[i], \
+ .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, \
+ .subresourceRange.levelCount = 1, \
+ .subresourceRange.layerCount = 1, \
+ }
- out->layout[i] = bar[2].newLayout;
- out->access[i] = bar[2].dstAccessMask;
+ for (int i = 0; i < planes; i++) {
+ if (passthrough) {
+ VkImageMemoryBarrier bar_pass[2] = {
+ MAIN_BARRIER,
+ OUT_BARRIER,
+ };
+
+ vk->CmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0,
+ 0, NULL, 0, NULL, FF_ARRAY_ELEMS(bar_pass), bar_pass);
+
+ main->layout[i] = bar_pass[0].newLayout;
+ main->access[i] = bar_pass[0].dstAccessMask;
+ out->layout[i] = bar_pass[1].newLayout;
+ out->access[i] = bar_pass[1].dstAccessMask;
+ } else {
+ VkImageMemoryBarrier bar[3] = {
+ MAIN_BARRIER,
+ OVERLAY_BARRIER,
+ OUT_BARRIER,
+ };
+
+ vk->CmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0,
+ 0, NULL, 0, NULL, FF_ARRAY_ELEMS(bar), bar);
+
+ main->layout[i] = bar[0].newLayout;
+ main->access[i] = bar[0].dstAccessMask;
+ overlay->layout[i] = bar[1].newLayout;
+ overlay->access[i] = bar[1].dstAccessMask;
+ out->layout[i] = bar[2].newLayout;
+ out->access[i] = bar[2].dstAccessMask;
+ }
}
- ff_vk_bind_pipeline_exec(vkctx, s->exec, s->pl);
+ if (passthrough) {
+ av_log(avctx, AV_LOG_DEBUG, "Binding pl_pass to exec: overlay_compute_passthrough\n");
+ ff_vk_bind_pipeline_exec(vkctx, s->exec, s->pl_pass);
+ } else {
+ av_log(avctx, AV_LOG_DEBUG, "Binding pl to exec: overlay_compute\n");
+ ff_vk_bind_pipeline_exec(vkctx, s->exec, s->pl);
+ }
vk->CmdDispatch(cmd_buf,
FFALIGN(s->vkctx.output_width, CGROUPS[0])/CGROUPS[0],
FFALIGN(s->vkctx.output_height, CGROUPS[1])/CGROUPS[1], 1);
ff_vk_add_exec_dep(vkctx, s->exec, main_f, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
- ff_vk_add_exec_dep(vkctx, s->exec, overlay_f, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+
+ if (!passthrough)
+ ff_vk_add_exec_dep(vkctx, s->exec, overlay_f, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
+
ff_vk_add_exec_dep(vkctx, s->exec, out_f, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
err = ff_vk_submit_exec_queue(vkctx, s->exec);
@@ -358,7 +435,7 @@ fail:
static int overlay_vulkan_blend(FFFrameSync *fs)
{
- int err;
+ int err, passthrough = 0;
AVFilterContext *ctx = fs->parent;
OverlayVulkanContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
@@ -371,22 +448,14 @@ static int overlay_vulkan_blend(FFFrameS
if (err < 0)
goto fail;
- if (!input_main || !input_overlay)
- return 0;
+ if (!input_main)
+ return AVERROR_BUG;
- if (!s->initialized) {
- AVHWFramesContext *main_fc = (AVHWFramesContext*)input_main->hw_frames_ctx->data;
- AVHWFramesContext *overlay_fc = (AVHWFramesContext*)input_overlay->hw_frames_ctx->data;
- if (main_fc->sw_format != overlay_fc->sw_format) {
- av_log(ctx, AV_LOG_ERROR, "Mismatching sw formats!\n");
- return AVERROR(EINVAL);
- }
-
- s->overlay_w = input_overlay->width;
- s->overlay_h = input_overlay->height;
+ if (!input_overlay)
+ passthrough = 1;
+ if (!s->initialized)
RET(init_filter(ctx));
- }
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
@@ -394,7 +463,7 @@ static int overlay_vulkan_blend(FFFrameS
goto fail;
}
- RET(process_frames(ctx, out, input_main, input_overlay));
+ RET(process_frames(ctx, out, input_main, input_overlay, passthrough));
err = av_frame_copy_props(out, input_main);
if (err < 0)
@@ -412,6 +481,18 @@ static int overlay_vulkan_config_output(
int err;
AVFilterContext *avctx = outlink->src;
OverlayVulkanContext *s = avctx->priv;
+ AVFilterLink *inlink = avctx->inputs[0];
+ AVFilterLink *inlink_overlay = avctx->inputs[1];
+ AVHWFramesContext *main_fc = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
+ AVHWFramesContext *overlay_fc = (AVHWFramesContext*)inlink_overlay->hw_frames_ctx->data;
+
+ if (main_fc->sw_format != overlay_fc->sw_format) {
+ av_log(avctx, AV_LOG_ERROR, "Mismatching sw formats!\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->overlay_w = inlink_overlay->w;
+ s->overlay_h = inlink_overlay->h;
err = ff_vk_filter_config_output(outlink);
if (err < 0)
@@ -421,6 +502,11 @@ static int overlay_vulkan_config_output(
if (err < 0)
return err;
+ s->fs.opt_repeatlast = s->opt_repeatlast;
+ s->fs.opt_shortest = s->opt_shortest;
+ s->fs.opt_eof_action = s->opt_eof_action;
+ s->fs.time_base = outlink->time_base = inlink->time_base;
+
return ff_framesync_configure(&s->fs);
}
@@ -456,6 +542,14 @@ static void overlay_vulkan_uninit(AVFilt
static const AVOption overlay_vulkan_options[] = {
{ "x", "Set horizontal offset", OFFSET(overlay_x), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, .flags = FLAGS },
{ "y", "Set vertical offset", OFFSET(overlay_y), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, .flags = FLAGS },
+ { "eof_action", "Action to take when encountering EOF from secondary input ",
+ OFFSET(opt_eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
+ EOF_ACTION_REPEAT, EOF_ACTION_PASS, .flags = FLAGS, "eof_action" },
+ { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
+ { "endall", "End both streams.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
+ { "pass", "Pass through the main input.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_PASS }, .flags = FLAGS, "eof_action" },
+ { "shortest", "force termination when the shortest input terminates", OFFSET(opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
+ { "repeatlast", "repeat overlay of the last overlay frame", OFFSET(opt_repeatlast), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
{ NULL },
};
Index: jellyfin-ffmpeg/libavfilter/vf_scale_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_scale_vulkan.c
+++ jellyfin-ffmpeg/libavfilter/vf_scale_vulkan.c
@@ -220,14 +220,17 @@ static av_cold int init_filter(AVFilterC
GLSLC(1, } );
}
} else {
- GLSLC(1, vec4 res = scale_bilinear(0, pos, c_r, c_o); );
- GLSLF(1, res = rgb2yuv(res, %i); ,s->out_range == AVCOL_RANGE_JPEG);
+ GLSLC(1, size = imageSize(output_img[0]); );
+ GLSLC(1, if (IS_WITHIN(pos, size)) { );
+ GLSLC(2, vec4 res = scale_bilinear(0, pos, c_r, c_o); );
+ GLSLF(2, res = rgb2yuv(res, %i);,s->out_range == AVCOL_RANGE_JPEG);
switch (s->vkctx.output_format) {
- case AV_PIX_FMT_NV12: GLSLC(1, write_nv12(res, pos); ); break;
- case AV_PIX_FMT_YUV420P: GLSLC(1, write_420(res, pos); ); break;
- case AV_PIX_FMT_YUV444P: GLSLC(1, write_444(res, pos); ); break;
+ case AV_PIX_FMT_NV12: GLSLC(2, write_nv12(res, pos); ); break;
+ case AV_PIX_FMT_YUV420P: GLSLC(2, write_420(res, pos); ); break;
+ case AV_PIX_FMT_YUV444P: GLSLC(2, write_444(res, pos); ); break;
default: return AVERROR(EINVAL);
}
+ GLSLC(1, } );
}
GLSLC(0, } );
@@ -299,7 +302,8 @@ static int process_frames(AVFilterContex
AVVkFrame *out = (AVVkFrame *)out_f->data[0];
VkImageMemoryBarrier barriers[AV_NUM_DATA_POINTERS*2];
int barrier_count = 0;
- const int planes = av_pix_fmt_count_planes(s->vkctx.input_format);
+ const int in_planes = av_pix_fmt_count_planes(s->vkctx.input_format);
+ const int out_planes = av_pix_fmt_count_planes(s->vkctx.output_format);
const VkFormat *input_formats = av_vkfmt_from_pixfmt(s->vkctx.input_format);
const VkFormat *output_formats = av_vkfmt_from_pixfmt(s->vkctx.output_format);
@@ -307,24 +311,27 @@ static int process_frames(AVFilterContex
ff_vk_start_exec_recording(vkctx, s->exec);
cmd_buf = ff_vk_get_exec_buf(s->exec);
- for (int i = 0; i < planes; i++) {
+ for (int i = 0; i < in_planes; i++) {
RET(ff_vk_create_imageview(vkctx, s->exec,
&s->input_images[i].imageView, in->img[i],
input_formats[i],
ff_comp_identity_map));
+ s->input_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ }
+
+ for (int i = 0; i < out_planes; i++) {
RET(ff_vk_create_imageview(vkctx, s->exec,
&s->output_images[i].imageView, out->img[i],
output_formats[i],
ff_comp_identity_map));
- s->input_images[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
s->output_images[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
}
ff_vk_update_descriptor_set(vkctx, s->pl, 0);
- for (int i = 0; i < planes; i++) {
+ for (int i = 0; i < in_planes; i++) {
VkImageMemoryBarrier bar = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
@@ -345,7 +352,7 @@ static int process_frames(AVFilterContex
in->access[i] = bar.dstAccessMask;
}
- for (int i = 0; i < av_pix_fmt_count_planes(s->vkctx.output_format); i++) {
+ for (int i = 0; i < out_planes; i++) {
VkImageMemoryBarrier bar = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,

View File

@ -1,20 +1,20 @@
Index: jellyfin-ffmpeg/fftools/ffmpeg.c
Index: FFmpeg/fftools/ffmpeg.c
===================================================================
--- jellyfin-ffmpeg.orig/fftools/ffmpeg.c
+++ jellyfin-ffmpeg/fftools/ffmpeg.c
@@ -130,6 +130,9 @@ static int trigger_fix_sub_duration_hear
--- FFmpeg.orig/fftools/ffmpeg.c
+++ FFmpeg/fftools/ffmpeg.c
@@ -116,6 +116,9 @@ typedef struct BenchmarkTimeStamps {
static BenchmarkTimeStamps get_benchmark_time_stamps(void);
static int64_t getmaxrss(void);
static int ifilter_has_all_input_formats(FilterGraph *fg);
+static int64_t gettime_relative_minus_pause(void);
+static void pause_transcoding(void);
+static void unpause_transcoding(void);
static int64_t nb_frames_dup = 0;
static uint64_t dup_warning = 1000;
@@ -149,6 +152,9 @@ int nb_output_files = 0;
FilterGraph **filtergraphs;
int nb_filtergraphs;
atomic_uint nb_output_dumped = 0;
@@ -134,6 +137,9 @@ int nb_filtergraphs;
Decoder **decoders;
int nb_decoders;
+int64_t paused_start = 0;
+int64_t paused_time = 0;
@ -22,7 +22,7 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
#if HAVE_TERMIOS_H
/* init terminal so that we can grab keys */
@@ -3511,12 +3517,28 @@ static void set_tty_echo(int on)
@@ -773,12 +779,28 @@ static void set_tty_echo(int on)
#endif
}
@ -42,7 +42,7 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
+
static int check_keyboard_interaction(int64_t cur_time)
{
int i, ret, key;
int i, key;
static int64_t last_time;
- if (received_nb_signals)
+ if (received_nb_signals) {
@ -52,10 +52,10 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
/* read_key() returns 0 on EOF */
if (cur_time - last_time >= 100000) {
key = read_key();
@@ -3530,6 +3552,11 @@ static int check_keyboard_interaction(in
@@ -791,6 +813,11 @@ static int check_keyboard_interaction(in
}
if (key == '+') av_log_set_level(av_log_get_level()+10);
if (key == '-') av_log_set_level(av_log_get_level()-10);
if (key == 's') qp_hist ^= 1;
+ if (key == 'u' || key != -1) unpause_transcoding();
+ if (key == 'p'){
+ pause_transcoding();
@ -64,9 +64,9 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
if (key == 'c' || key == 'C'){
char buf[4096], target[64], command[256], arg[256] = {0};
double time;
@@ -3613,7 +3640,9 @@ static int check_keyboard_interaction(in
@@ -825,7 +852,9 @@ static int check_keyboard_interaction(in
"c Send command to first matching filter supporting it\n"
"C Send/Queue command to all matching filters\n"
"D cycle through available debug modes\n"
"h dump packets/hex press to cycle through the 3 states\n"
+ "p pause transcoding\n"
"q quit\n"
@ -74,42 +74,32 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
"s Show QP histogram\n"
);
}
@@ -3921,6 +3950,11 @@ static int transcode_step(void)
InputStream *ist = NULL;
int ret;
+ if (paused_start) {
+ av_usleep(10000);
+ return 0;
+ }
+
ost = choose_output();
if (!ost) {
if (got_eagain()) {
@@ -4023,11 +4057,11 @@ static int transcode(void)
@@ -855,12 +884,15 @@ static int transcode(Scheduler *sch)
timer_start = av_gettime_relative();
while (!received_sigterm) {
while (!sch_wait(sch, stats_period, &transcode_ts)) {
- int64_t cur_time= av_gettime_relative();
+ int64_t cur_time= gettime_relative_minus_pause();
/* if 'q' pressed, exits */
if (stdin_interaction)
- if (stdin_interaction)
- if (check_keyboard_interaction(cur_time) < 0)
+ if (check_keyboard_interaction(av_gettime_relative()) < 0)
+ if (stdin_interaction) {
+ if (check_keyboard_interaction(av_gettime_relative()) < 0) {
+ paused_start = 0; // unpausing the input thread on exit
break;
+ }
+ }
/* check if there's any stream where output is still needed */
@@ -4064,7 +4098,7 @@ static int transcode(void)
}
/* dump report by using the output first video and audio streams */
print_report(0, timer_start, cur_time, transcode_ts);
@@ -877,11 +909,17 @@ static int transcode(Scheduler *sch)
term_exit();
/* dump report by using the first video and audio streams */
- print_report(1, timer_start, av_gettime_relative());
+ print_report(1, timer_start, gettime_relative_minus_pause());
- print_report(1, timer_start, av_gettime_relative(), transcode_ts);
+ print_report(1, timer_start, gettime_relative_minus_pause(), transcode_ts);
/* close each encoder */
for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
@@ -4091,6 +4125,12 @@ static int transcode(void)
return ret;
}
@ -122,13 +112,13 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.c
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
{
BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
Index: jellyfin-ffmpeg/fftools/ffmpeg.h
Index: FFmpeg/fftools/ffmpeg.h
===================================================================
--- jellyfin-ffmpeg.orig/fftools/ffmpeg.h
+++ jellyfin-ffmpeg/fftools/ffmpeg.h
@@ -766,6 +766,9 @@ extern int recast_media;
extern int do_psnr;
#endif
--- FFmpeg.orig/fftools/ffmpeg.h
+++ FFmpeg/fftools/ffmpeg.h
@@ -667,6 +667,9 @@ extern int recast_media;
extern FILE *vstats_file;
+extern int64_t paused_start;
+extern int64_t paused_time;
@ -136,19 +126,19 @@ Index: jellyfin-ffmpeg/fftools/ffmpeg.h
void term_init(void);
void term_exit(void);
Index: jellyfin-ffmpeg/fftools/ffmpeg_demux.c
Index: FFmpeg/fftools/ffmpeg_demux.c
===================================================================
--- jellyfin-ffmpeg.orig/fftools/ffmpeg_demux.c
+++ jellyfin-ffmpeg/fftools/ffmpeg_demux.c
@@ -253,6 +253,11 @@ static void *input_thread(void *arg)
while (1) {
DemuxMsg msg = { NULL };
--- FFmpeg.orig/fftools/ffmpeg_demux.c
+++ FFmpeg/fftools/ffmpeg_demux.c
@@ -699,6 +699,11 @@ static int input_thread(void *arg)
DemuxStream *ds;
unsigned send_flags = 0;
+ if (paused_start) {
+ av_usleep(1000); // Be more responsive to unpausing than main thread
+ av_usleep(1000); // pausing the input thread
+ continue;
+ }
+
ret = av_read_frame(f->ctx, pkt);
ret = av_read_frame(f->ctx, dt.pkt_demux);
if (ret == AVERROR(EAGAIN)) {

View File

@ -1,42 +0,0 @@
Index: jellyfin-ffmpeg/libavformat/hlsenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/hlsenc.c
+++ jellyfin-ffmpeg/libavformat/hlsenc.c
@@ -854,7 +854,7 @@ static int hls_mux_init(AVFormatContext
AVFormatContext *vtt_oc = NULL;
int byterange_mode = (hls->flags & HLS_SINGLE_FILE) || (hls->max_seg_size > 0);
int remaining_options;
- int i, ret;
+ int i, j, ret;
ret = avformat_alloc_output_context2(&vs->avf, vs->oformat, NULL, NULL);
if (ret < 0)
@@ -905,6 +905,15 @@ FF_ENABLE_DEPRECATION_WARNINGS
st->codecpar->codec_tag = 0;
}
+ // copy side data
+ for (j = 0; j < vs->streams[i]->nb_side_data; j++) {
+ const AVPacketSideData *sd_src = &vs->streams[i]->side_data[j];
+ uint8_t *dst_data = av_stream_new_side_data(st, sd_src->type, sd_src->size);
+ if (!dst_data)
+ return AVERROR(ENOMEM);
+ memcpy(dst_data, sd_src->data, sd_src->size);
+ }
+
st->sample_aspect_ratio = vs->streams[i]->sample_aspect_ratio;
st->time_base = vs->streams[i]->time_base;
av_dict_copy(&st->metadata, vs->streams[i]->metadata, 0);
Index: jellyfin-ffmpeg/libavformat/movenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/movenc.c
+++ jellyfin-ffmpeg/libavformat/movenc.c
@@ -7714,6 +7714,8 @@ static const AVCodecTag codec_mp4_tags[]
{ AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '3') },
{ AV_CODEC_ID_HEVC, MKTAG('h', 'e', 'v', '1') },
{ AV_CODEC_ID_HEVC, MKTAG('h', 'v', 'c', '1') },
+ { AV_CODEC_ID_HEVC, MKTAG('d', 'v', 'h', '1') },
+ { AV_CODEC_ID_HEVC, MKTAG('d', 'v', 'h', 'e') },
{ AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', '4', 'v') },
{ AV_CODEC_ID_MJPEG, MKTAG('m', 'p', '4', 'v') },

View File

@ -0,0 +1,38 @@
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -5558,7 +5558,7 @@ elif enabled x86; then
;;
# everything else should support nopl and conditional mov (cmov)
*)
- cpuflags="-march=$cpu"
+ # there is no -march=generic option
enable i686
enable fast_cmov
;;
@@ -7539,7 +7539,9 @@ if enabled icc; then
disable aligned_stack
fi
elif enabled gcc; then
- check_optflags -fno-tree-vectorize
+ case $gcc_basever in
+ 2|2.*|3.*|4.*|5.*|6.*) check_optflags -fno-tree-vectorize ;;
+ esac
check_cflags -Werror=format-security
check_cflags -Werror=implicit-function-declaration
check_cflags -Werror=missing-prototypes
Index: FFmpeg/libavcodec/x86/cabac.h
===================================================================
--- FFmpeg.orig/libavcodec/x86/cabac.h
+++ FFmpeg/libavcodec/x86/cabac.h
@@ -183,6 +183,9 @@ av_noinline
#else
av_always_inline
#endif
+#ifdef __GNUC__
+__attribute__((optimize("-fno-tree-vectorize")))
+#endif
int get_cabac_inline_x86(CABACContext *c, uint8_t *const state)
{
int bit, tmp;

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavcodec/dxva2.c
Index: FFmpeg/libavcodec/dxva2.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dxva2.c
+++ jellyfin-ffmpeg/libavcodec/dxva2.c
--- FFmpeg.orig/libavcodec/dxva2.c
+++ FFmpeg/libavcodec/dxva2.c
@@ -615,6 +615,16 @@ int ff_dxva2_common_frame_params(AVCodec
else
surface_alignment = 16;
@ -11,7 +11,7 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2.c
+ if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
+ AVD3D11VADeviceContext *device_hwctx = device_ctx->hwctx;
+ if (device_hwctx->device_desc.VendorId == 0x8086) {
+ av_log(avctx, AV_LOG_DEBUG, "Intel d3d11va found, alignment changed!\n");
+ av_log(avctx, AV_LOG_DEBUG, "Intel DX11 device found, alignment changed!\n");
+ surface_alignment = 32;
+ }
+ }
@ -19,12 +19,12 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2.c
/* 1 base work surface */
num_surfaces = 1;
Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
Index: FFmpeg/libavutil/hwcontext_d3d11va.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
@@ -545,6 +545,35 @@ static void d3d11va_device_uninit(AVHWDe
}
--- FFmpeg.orig/libavutil/hwcontext_d3d11va.c
+++ FFmpeg/libavutil/hwcontext_d3d11va.c
@@ -609,6 +609,35 @@ static int d3d11va_device_find_adapter_b
return -1;
}
+static int d3d11va_check_uma_support(AVHWDeviceContext *ctx)
@ -59,7 +59,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
static int d3d11va_device_create(AVHWDeviceContext *ctx, const char *device,
AVDictionary *opts, int flags)
{
@@ -608,6 +637,9 @@ static int d3d11va_device_create(AVHWDev
@@ -686,6 +715,9 @@ static int d3d11va_device_create(AVHWDev
ID3D10Multithread_Release(pMultithread);
}
@ -69,10 +69,10 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.c
#if !HAVE_UWP && HAVE_DXGIDEBUG_H
if (is_debug) {
HANDLE dxgidebug_dll = LoadLibrary("dxgidebug.dll");
Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.h
Index: FFmpeg/libavutil/hwcontext_d3d11va.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_d3d11va.h
+++ jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.h
--- FFmpeg.orig/libavutil/hwcontext_d3d11va.h
+++ FFmpeg/libavutil/hwcontext_d3d11va.h
@@ -94,6 +94,16 @@ typedef struct AVD3D11VADeviceContext {
void (*lock)(void *lock_ctx);
void (*unlock)(void *lock_ctx);

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
Index: FFmpeg/libavfilter/vf_overlay_cuda.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_cuda.c
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
--- FFmpeg.orig/libavfilter/vf_overlay_cuda.c
+++ FFmpeg/libavfilter/vf_overlay_cuda.c
@@ -50,6 +50,8 @@
static const enum AVPixelFormat supported_main_formats[] = {
AV_PIX_FMT_NV12,
@ -11,7 +11,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
AV_PIX_FMT_NONE,
};
@@ -101,12 +103,15 @@ typedef struct OverlayCUDAContext {
@@ -105,12 +107,15 @@ typedef struct OverlayCUDAContext {
enum AVPixelFormat in_format_overlay;
enum AVPixelFormat in_format_main;
@ -28,7 +28,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
CUstream cu_stream;
FFFrameSync fs;
@@ -181,7 +186,10 @@ static int set_expr(AVExpr **pexpr, cons
@@ -185,7 +190,10 @@ static int set_expr(AVExpr **pexpr, cons
static int formats_match(const enum AVPixelFormat format_main, const enum AVPixelFormat format_overlay) {
switch(format_main) {
case AV_PIX_FMT_NV12:
@ -40,7 +40,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
case AV_PIX_FMT_YUV420P:
return format_overlay == AV_PIX_FMT_YUV420P ||
format_overlay == AV_PIX_FMT_YUVA420P;
@@ -196,11 +204,13 @@ static int formats_match(const enum AVPi
@@ -200,11 +208,13 @@ static int formats_match(const enum AVPi
static int overlay_cuda_call_kernel(
OverlayCUDAContext *ctx,
int x_position, int y_position,
@ -57,7 +57,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
int alpha_adj_x, int alpha_adj_y) {
CudaFunctions *cu = ctx->hwctx->internal->cuda_dl;
@@ -208,14 +218,18 @@ static int overlay_cuda_call_kernel(
@@ -212,14 +222,18 @@ static int overlay_cuda_call_kernel(
void* kernel_args[] = {
&x_position, &y_position,
&main_data, &main_linesize,
@ -77,7 +77,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
DIV_UP(main_width, BLOCK_X), DIV_UP(main_height, BLOCK_Y), 1,
BLOCK_X, BLOCK_Y, 1,
0, ctx->cu_stream, kernel_args, NULL));
@@ -290,11 +304,13 @@ static int overlay_cuda_blend(FFFrameSyn
@@ -300,11 +314,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
overlay_cuda_call_kernel(ctx,
ctx->x_position, ctx->y_position,
@ -94,7 +94,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
// overlay rest planes depending on pixel format
@@ -302,29 +318,42 @@ static int overlay_cuda_blend(FFFrameSyn
@@ -312,29 +328,42 @@ FF_ENABLE_DEPRECATION_WARNINGS
case AV_PIX_FMT_NV12:
overlay_cuda_call_kernel(ctx,
ctx->x_position, ctx->y_position / 2,
@ -154,7 +154,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
break;
default:
av_log(ctx, AV_LOG_ERROR, "Passed unsupported overlay pixel format\n");
@@ -450,6 +479,8 @@ static int overlay_cuda_config_output(AV
@@ -462,6 +491,8 @@ static int overlay_cuda_config_output(AV
return AVERROR(ENOSYS);
}
@ -163,7 +163,7 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
// check overlay input formats
if (!frames_ctx_overlay) {
@@ -503,7 +534,13 @@ static int overlay_cuda_config_output(AV
@@ -515,7 +546,13 @@ static int overlay_cuda_config_output(AV
return err;
}
@ -178,10 +178,10 @@ Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.c
if (err < 0) {
CHECK_CU(cu->cuCtxPopCurrent(&dummy));
return err;
Index: jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.cu
Index: FFmpeg/libavfilter/vf_overlay_cuda.cu
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_overlay_cuda.cu
+++ jellyfin-ffmpeg/libavfilter/vf_overlay_cuda.cu
--- FFmpeg.orig/libavfilter/vf_overlay_cuda.cu
+++ FFmpeg/libavfilter/vf_overlay_cuda.cu
@@ -18,14 +18,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/

View File

@ -1,55 +0,0 @@
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -7195,8 +7195,23 @@ check_optflags -fno-signed-zeros
if enabled lto; then
test "$cc_type" != "$ld_type" && die "LTO requires same compiler and linker"
- check_cflags -flto
- check_ldflags -flto $cpuflags
+ if test "$cc_type" = "clang"; then
+ check_cflags -flto=thin
+ check_ldflags -flto=thin $cpuflags
+ # Clang's LTO fails on Windows, when there are references outside
+ # of inline assembly to nonlocal labels defined within inline assembly,
+ # see https://github.com/llvm/llvm-project/issues/76046.
+ case $target_os in
+ mingw*|win*)
+ disable inline_asm_nonlocal_labels
+ ;;
+ esac
+ else
+ check_cflags -flto
+ check_ldflags -flto $cpuflags
+ check_cflags -flto=auto
+ check_ldflags -flto=auto $cpuflags
+ fi
disable inline_asm_direct_symbol_refs
fi
@@ -7235,7 +7250,9 @@ if enabled icc; then
disable aligned_stack
fi
elif enabled gcc; then
- check_optflags -fno-tree-vectorize
+ case $gcc_basever in
+ 2|2.*|3.*|4.*|5.*|6.*) check_optflags -fno-tree-vectorize ;;
+ esac
check_cflags -Werror=format-security
check_cflags -Werror=implicit-function-declaration
check_cflags -Werror=missing-prototypes
Index: FFmpeg/libavcodec/x86/cabac.h
===================================================================
--- FFmpeg.orig/libavcodec/x86/cabac.h
+++ FFmpeg/libavcodec/x86/cabac.h
@@ -183,6 +183,9 @@ av_noinline
#else
av_always_inline
#endif
+#ifdef __GNUC__
+__attribute__((optimize("-fno-tree-vectorize")))
+#endif
int get_cabac_inline_x86(CABACContext *c, uint8_t *const state)
{
int bit, tmp;

View File

@ -1,13 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vaapi.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
@@ -1711,7 +1711,7 @@ static int vaapi_device_create(AVHWDevic
if (priv->drm_fd < 0) {
av_log(ctx, AV_LOG_VERBOSE, "Cannot open "
"DRM render node for device %d.\n", n);
- break;
+ continue;
}
#if CONFIG_LIBDRM
info = drmGetVersion(priv->drm_fd);

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavcodec/nvenc.c
Index: FFmpeg/libavcodec/nvenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/nvenc.c
+++ jellyfin-ffmpeg/libavcodec/nvenc.c
--- FFmpeg.orig/libavcodec/nvenc.c
+++ FFmpeg/libavcodec/nvenc.c
@@ -24,6 +24,7 @@
#include "nvenc.h"
@ -10,7 +10,7 @@ Index: jellyfin-ffmpeg/libavcodec/nvenc.c
#if CONFIG_AV1_NVENC_ENCODER
#include "av1.h"
#endif
@@ -33,6 +34,7 @@
@@ -32,6 +33,7 @@
#include "libavutil/hwcontext.h"
#include "libavutil/cuda_check.h"
#include "libavutil/imgutils.h"
@ -18,7 +18,7 @@ Index: jellyfin-ffmpeg/libavcodec/nvenc.c
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "libavutil/mathematics.h"
@@ -2445,6 +2447,80 @@ static int prepare_sei_data_array(AVCode
@@ -2506,6 +2508,80 @@ static int prepare_sei_data_array(AVCode
}
}
}

View File

@ -1,7 +1,35 @@
Index: jellyfin-ffmpeg/libavcodec/dxva2.c
Index: FFmpeg/libavcodec/d3d12va_hevc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dxva2.c
+++ jellyfin-ffmpeg/libavcodec/dxva2.c
--- FFmpeg.orig/libavcodec/d3d12va_hevc.c
+++ FFmpeg/libavcodec/d3d12va_hevc.c
@@ -33,7 +33,7 @@
#define MAX_SLICES 256
typedef struct HEVCDecodePictureContext {
- DXVA_PicParams_HEVC pp;
+ ff_DXVA_PicParams_HEVC_Rext pp;
DXVA_Qmatrix_HEVC qm;
unsigned slice_count;
DXVA_Slice_HEVC_Short slice_short[MAX_SLICES];
@@ -151,12 +151,12 @@ static int d3d12va_hevc_end_frame(AVCode
HEVCContext *h = avctx->priv_data;
HEVCDecodePictureContext *ctx_pic = h->ref->hwaccel_picture_private;
- int scale = ctx_pic->pp.dwCodingParamToolFlags & 1;
+ int scale = ctx_pic->pp.main.dwCodingParamToolFlags & 1;
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
return -1;
- return ff_d3d12va_common_end_frame(avctx, h->ref->frame, &ctx_pic->pp, sizeof(ctx_pic->pp),
+ return ff_d3d12va_common_end_frame(avctx, h->ref->frame, &ctx_pic->pp.main, sizeof(ctx_pic->pp.main),
scale ? &ctx_pic->qm : NULL, scale ? sizeof(ctx_pic->qm) : 0, update_input_arguments);
}
Index: FFmpeg/libavcodec/dxva2.c
===================================================================
--- FFmpeg.orig/libavcodec/dxva2.c
+++ FFmpeg/libavcodec/dxva2.c
@@ -43,6 +43,12 @@ DEFINE_GUID(ff_DXVA2_ModeVC1_D,
DEFINE_GUID(ff_DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
DEFINE_GUID(ff_DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
@ -16,14 +44,14 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2.c
DEFINE_GUID(ff_DXVA2_ModeVP9_VLD_10bit_Profile2,0xa4c749ef,0x6ecf,0x48aa,0x84,0x48,0x50,0xa7,0xa1,0x16,0x5f,0xf7);
DEFINE_GUID(ff_DXVA2_ModeAV1_VLD_Profile0,0xb8be4ccb,0xcf53,0x46ba,0x8d,0x59,0xd6,0xb8,0xa6,0xda,0x5d,0x2a);
@@ -69,6 +75,8 @@ static const int prof_hevc_main[] = {
FF_PROFILE_UNKNOWN};
static const int prof_hevc_main10[] = {FF_PROFILE_HEVC_MAIN_10,
FF_PROFILE_UNKNOWN};
+static const int prof_hevc_main_rext[] = {FF_PROFILE_HEVC_REXT,
+ FF_PROFILE_UNKNOWN};
static const int prof_vp9_profile0[] = {FF_PROFILE_VP9_0,
FF_PROFILE_UNKNOWN};
static const int prof_vp9_profile2[] = {FF_PROFILE_VP9_2,
AV_PROFILE_UNKNOWN};
static const int prof_hevc_main10[] = {AV_PROFILE_HEVC_MAIN_10,
AV_PROFILE_UNKNOWN};
+static const int prof_hevc_main_rext[] = {AV_PROFILE_HEVC_REXT,
+ AV_PROFILE_UNKNOWN};
static const int prof_vp9_profile0[] = {AV_PROFILE_VP9_0,
AV_PROFILE_UNKNOWN};
static const int prof_vp9_profile2[] = {AV_PROFILE_VP9_2,
@@ -97,6 +105,14 @@ static const dxva_mode dxva_modes[] = {
{ &ff_DXVA2_ModeHEVC_VLD_Main10, AV_CODEC_ID_HEVC, prof_hevc_main10 },
{ &ff_DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC, prof_hevc_main },
@ -149,79 +177,27 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2.c
frames_ctx->width = FFALIGN(avctx->coded_width, surface_alignment);
frames_ctx->height = FFALIGN(avctx->coded_height, surface_alignment);
frames_ctx->initial_pool_size = num_surfaces;
Index: jellyfin-ffmpeg/libavcodec/dxva2_hevc.c
Index: FFmpeg/libavcodec/dxva2_hevc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dxva2_hevc.c
+++ jellyfin-ffmpeg/libavcodec/dxva2_hevc.c
@@ -28,10 +28,60 @@
#include "hevc_data.h"
#include "hevcdec.h"
+/**
+ * Picture Parameters DXVA buffer struct for Rext is not specified in DXVA
+ * spec. The below structures come from Intel platform DDI definition, so they
+ * are currently Intel specific.
+ *
+ * For Nvidia and AMD platforms supporting HEVC Rext, it is expected
+ * the picture param information included in below structures is sufficient
+ * for underlying drivers supporting range extension.
+ */
+#pragma pack(push, 1)
+typedef struct
+{
+ DXVA_PicParams_HEVC main;
+
+ // HEVC Range Extension. Fields are named the same as in HEVC spec.
+ __C89_NAMELESS union {
+ __C89_NAMELESS struct {
+ UINT32 transform_skip_rotation_enabled_flag : 1;
+ UINT32 transform_skip_context_enabled_flag : 1;
+ UINT32 implicit_rdpcm_enabled_flag : 1;
+ UINT32 explicit_rdpcm_enabled_flag : 1;
+ UINT32 extended_precision_processing_flag : 1;
+ UINT32 intra_smoothing_disabled_flag : 1;
+ UINT32 high_precision_offsets_enabled_flag : 1;
+ UINT32 persistent_rice_adaptation_enabled_flag : 1;
+ UINT32 cabac_bypass_alignment_enabled_flag : 1;
+ UINT32 cross_component_prediction_enabled_flag : 1;
+ UINT32 chroma_qp_offset_list_enabled_flag : 1;
+ // Indicates if luma bit depth equals to 16. If its value is 1, the
+ // corresponding bit_depth_luma_minus8 must be set to 0.
+ UINT32 BitDepthLuma16 : 1;
+ // Indicates if chroma bit depth equals to 16. If its value is 1, the
+ // corresponding bit_depth_chroma_minus8 must be set to 0.
+ UINT32 BitDepthChroma16 : 1;
+ UINT32 ReservedBits8 : 19;
+ };
+ UINT32 dwRangeExtensionFlags;
+ };
+
+ UCHAR diff_cu_chroma_qp_offset_depth; // [0..3]
+ UCHAR chroma_qp_offset_list_len_minus1; // [0..5]
+ UCHAR log2_sao_offset_scale_luma; // [0..6]
+ UCHAR log2_sao_offset_scale_chroma; // [0..6]
+ UCHAR log2_max_transform_skip_block_size_minus2;
+ CHAR cb_qp_offset_list[6]; // [-12..12]
+ CHAR cr_qp_offset_list[6]; // [-12..12]
+
+} DXVA_PicParams_HEVC_Rext;
+#pragma pack(pop)
+
--- FFmpeg.orig/libavcodec/dxva2_hevc.c
+++ FFmpeg/libavcodec/dxva2_hevc.c
@@ -32,7 +32,7 @@
#define MAX_SLICES 256
struct hevc_dxva2_picture_context {
- DXVA_PicParams_HEVC pp;
+ DXVA_PicParams_HEVC_Rext pp;
+ ff_DXVA_PicParams_HEVC_Rext pp;
DXVA_Qmatrix_HEVC qm;
unsigned slice_count;
DXVA_Slice_HEVC_Short slice_short[MAX_SLICES];
@@ -57,18 +107,48 @@ static int get_refpic_index(const DXVA_P
@@ -58,19 +58,49 @@ static int get_refpic_index(const DXVA_P
}
static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const HEVCContext *h,
void ff_dxva2_hevc_fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx,
- DXVA_PicParams_HEVC *pp)
+ DXVA_PicParams_HEVC_Rext *ppext)
+ ff_DXVA_PicParams_HEVC_Rext *ppext)
{
const HEVCContext *h = avctx->priv_data;
const HEVCFrame *current_picture = h->ref;
const HEVCSPS *sps = h->ps.sps;
const HEVCPPS *pps = h->ps.pps;
@ -266,16 +242,13 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2_hevc.c
pp->wFormatAndSequenceInfoFlags = (sps->chroma_format_idc << 0) |
(sps->separate_colour_plane_flag << 2) |
((sps->bit_depth - 8) << 3) |
@@ -404,16 +484,18 @@ static int dxva2_hevc_decode_slice(AVCod
static int dxva2_hevc_end_frame(AVCodecContext *avctx)
@@ -409,14 +439,15 @@ static int dxva2_hevc_end_frame(AVCodecC
{
+ AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
HEVCContext *h = avctx->priv_data;
struct hevc_dxva2_picture_context *ctx_pic = h->ref->hwaccel_picture_private;
- int scale = ctx_pic->pp.dwCodingParamToolFlags & 1;
+ int scale = ctx_pic->pp.main.dwCodingParamToolFlags & 1;
+ int rext = avctx->profile == FF_PROFILE_HEVC_REXT;
+ int rext = avctx->profile == AV_PROFILE_HEVC_REXT;
int ret;
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
@ -287,11 +260,85 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2_hevc.c
scale ? &ctx_pic->qm : NULL, scale ? sizeof(ctx_pic->qm) : 0,
commit_bitstream_and_slice_buffer);
return ret;
Index: jellyfin-ffmpeg/libavcodec/hevcdec.c
Index: FFmpeg/libavcodec/dxva2_internal.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/hevcdec.c
+++ jellyfin-ffmpeg/libavcodec/hevcdec.c
@@ -453,6 +453,13 @@ static enum AVPixelFormat get_format(HEV
--- FFmpeg.orig/libavcodec/dxva2_internal.h
+++ FFmpeg/libavcodec/dxva2_internal.h
@@ -134,6 +134,58 @@ typedef struct FFDXVASharedContext {
DXVA_CONTEXT_CFG(avctx, ctx) && \
(ff_dxva2_is_d3d11(avctx) || DXVA2_VAR(ctx, surface_count)))
+#if CONFIG_HEVC_D3D12VA_HWACCEL || CONFIG_HEVC_D3D11VA_HWACCEL || CONFIG_HEVC_D3D11VA2_HWACCEL || CONFIG_HEVC_DXVA2_HWACCEL
+/**
++ * Picture Parameters DXVA buffer struct for Rext is not specified in DXVA
++ * spec. The below structures come from Intel platform DDI definition, so they
++ * are currently Intel specific.
++ *
++ * For Nvidia and AMD platforms supporting HEVC Rext, it is expected
++ * the picture param information included in below structures is sufficient
++ * for underlying drivers supporting range extension.
++ */
+#pragma pack(push, 1)
+typedef struct
+{
+ DXVA_PicParams_HEVC main;
+
+ // HEVC Range Extension. Fields are named the same as in HEVC spec.
+ __C89_NAMELESS union {
+ __C89_NAMELESS struct {
+ UINT32 transform_skip_rotation_enabled_flag : 1;
+ UINT32 transform_skip_context_enabled_flag : 1;
+ UINT32 implicit_rdpcm_enabled_flag : 1;
+ UINT32 explicit_rdpcm_enabled_flag : 1;
+ UINT32 extended_precision_processing_flag : 1;
+ UINT32 intra_smoothing_disabled_flag : 1;
+ UINT32 high_precision_offsets_enabled_flag : 1;
+ UINT32 persistent_rice_adaptation_enabled_flag : 1;
+ UINT32 cabac_bypass_alignment_enabled_flag : 1;
+ UINT32 cross_component_prediction_enabled_flag : 1;
+ UINT32 chroma_qp_offset_list_enabled_flag : 1;
+ // Indicates if luma bit depth equals to 16. If its value is 1, the
+ // corresponding bit_depth_luma_minus8 must be set to 0.
+ UINT32 BitDepthLuma16 : 1;
+ // Indicates if chroma bit depth equals to 16. If its value is 1, the
+ // corresponding bit_depth_chroma_minus8 must be set to 0.
+ UINT32 BitDepthChroma16 : 1;
+ UINT32 ReservedBits8 : 19;
+ };
+ UINT32 dwRangeExtensionFlags;
+ };
+
+ UCHAR diff_cu_chroma_qp_offset_depth; // [0..3]
+ UCHAR chroma_qp_offset_list_len_minus1; // [0..5]
+ UCHAR log2_sao_offset_scale_luma; // [0..6]
+ UCHAR log2_sao_offset_scale_chroma; // [0..6]
+ UCHAR log2_max_transform_skip_block_size_minus2;
+ CHAR cb_qp_offset_list[6]; // [-12..12]
+ CHAR cr_qp_offset_list[6]; // [-12..12]
+
+} ff_DXVA_PicParams_HEVC_Rext;
+#pragma pack(pop)
+#endif
+
#if CONFIG_D3D12VA
unsigned ff_d3d12va_get_surface_index(const AVCodecContext *avctx,
D3D12VADecodeContext *ctx, const AVFrame *frame,
@@ -171,7 +223,9 @@ void ff_dxva2_h264_fill_picture_paramete
void ff_dxva2_h264_fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, DXVA_Qmatrix_H264 *qm);
-void ff_dxva2_hevc_fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, DXVA_PicParams_HEVC *pp);
+#if CONFIG_HEVC_D3D12VA_HWACCEL || CONFIG_HEVC_D3D11VA_HWACCEL || CONFIG_HEVC_D3D11VA2_HWACCEL || CONFIG_HEVC_DXVA2_HWACCEL
+void ff_dxva2_hevc_fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, ff_DXVA_PicParams_HEVC_Rext *ppext);
+#endif
void ff_dxva2_hevc_fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, DXVA_Qmatrix_HEVC *qm);
Index: FFmpeg/libavcodec/hevcdec.c
===================================================================
--- FFmpeg.orig/libavcodec/hevcdec.c
+++ FFmpeg/libavcodec/hevcdec.c
@@ -465,6 +465,13 @@ static enum AVPixelFormat get_format(HEV
#endif
break;
case AV_PIX_FMT_YUV444P:
@ -305,7 +352,7 @@ Index: jellyfin-ffmpeg/libavcodec/hevcdec.c
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
@@ -468,6 +475,13 @@ static enum AVPixelFormat get_format(HEV
@@ -483,6 +490,13 @@ static enum AVPixelFormat get_format(HEV
break;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV422P10LE:
@ -319,8 +366,8 @@ Index: jellyfin-ffmpeg/libavcodec/hevcdec.c
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
@@ -481,6 +495,13 @@ static enum AVPixelFormat get_format(HEV
#endif
@@ -500,6 +514,13 @@ static enum AVPixelFormat get_format(HEV
/* NOTE: fallthrough */
case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV444P12:
+#if CONFIG_HEVC_DXVA2_HWACCEL
@ -333,7 +380,7 @@ Index: jellyfin-ffmpeg/libavcodec/hevcdec.c
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
@@ -492,6 +513,13 @@ static enum AVPixelFormat get_format(HEV
@@ -514,6 +535,13 @@ static enum AVPixelFormat get_format(HEV
#endif
break;
case AV_PIX_FMT_YUV422P12:

View File

@ -1,13 +1,13 @@
Index: jellyfin-ffmpeg/libavfilter/vf_scale_vaapi.c
Index: FFmpeg/libavfilter/vf_scale_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_scale_vaapi.c
+++ jellyfin-ffmpeg/libavfilter/vf_scale_vaapi.c
@@ -212,7 +212,7 @@ static const AVOption scale_vaapi_option
--- FFmpeg.orig/libavfilter/vf_scale_vaapi.c
+++ FFmpeg/libavfilter/vf_scale_vaapi.c
@@ -224,7 +224,7 @@ static const AVOption scale_vaapi_option
{ "format", "Output video format (software format of hardware frames)",
OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "mode", "Scaling mode",
- OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VA_FILTER_SCALING_HQ },
+ OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VA_FILTER_SCALING_FAST },
0, VA_FILTER_SCALING_NL_ANAMORPHIC, FLAGS, "mode" },
0, VA_FILTER_SCALING_NL_ANAMORPHIC, FLAGS, .unit = "mode" },
{ "default", "Use the default (depend on the driver) scaling algorithm",
0, AV_OPT_TYPE_CONST, { .i64 = VA_FILTER_SCALING_DEFAULT }, 0, 0, FLAGS, "mode" },
0, AV_OPT_TYPE_CONST, { .i64 = VA_FILTER_SCALING_DEFAULT }, 0, 0, FLAGS, .unit = "mode" },

View File

@ -0,0 +1,15 @@
Index: FFmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_vaapi.c
+++ FFmpeg/libavutil/hwcontext_vaapi.c
@@ -1358,9 +1358,8 @@ static int vaapi_map_to_drm_esh(AVHWFram
vas = vaSyncSurface(hwctx->display, surface_id);
if (vas != VA_STATUS_SUCCESS) {
- av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
+ av_log(hwfc, AV_LOG_WARNING, "Failed to sync surface "
"%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
- return AVERROR(EIO);
}
}

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavcodec/qsv.c
Index: FFmpeg/libavcodec/qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsv.c
+++ jellyfin-ffmpeg/libavcodec/qsv.c
@@ -409,9 +409,17 @@ static int qsv_load_plugins(mfxSession s
--- FFmpeg.orig/libavcodec/qsv.c
+++ FFmpeg/libavcodec/qsv.c
@@ -406,9 +406,17 @@ static int qsv_load_plugins(mfxSession s
void *logctx)
{
#if QSV_HAVE_USER_PLUGIN

View File

@ -1,11 +1,11 @@
Index: jellyfin-ffmpeg/libavcodec/qsvenc_av1.c
Index: FFmpeg/libavcodec/qsvenc_av1.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_av1.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_av1.c
@@ -130,8 +130,8 @@ static const AVClass class = {
--- FFmpeg.orig/libavcodec/qsvenc_av1.c
+++ FFmpeg/libavcodec/qsvenc_av1.c
@@ -203,8 +203,8 @@ static const AVClass class = {
static const FFCodecDefault qsv_enc_defaults[] = {
{ "b", "1M" },
{ "b", "0" },
- { "g", "-1" },
- { "bf", "-1" },
+ { "g", "250" },
@ -13,13 +13,13 @@ Index: jellyfin-ffmpeg/libavcodec/qsvenc_av1.c
{ "refs", "0" },
{ NULL },
};
Index: jellyfin-ffmpeg/libavcodec/qsvenc_h264.c
Index: FFmpeg/libavcodec/qsvenc_h264.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_h264.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_h264.c
--- FFmpeg.orig/libavcodec/qsvenc_h264.c
+++ FFmpeg/libavcodec/qsvenc_h264.c
@@ -180,8 +180,8 @@ static const AVClass class = {
static const FFCodecDefault qsv_enc_defaults[] = {
{ "b", "1M" },
{ "b", "0" },
{ "refs", "0" },
- { "g", "-1" },
- { "bf", "-1" },
@ -28,15 +28,15 @@ Index: jellyfin-ffmpeg/libavcodec/qsvenc_h264.c
{ "qmin", "-1" },
{ "qmax", "-1" },
{ "trellis", "-1" },
Index: jellyfin-ffmpeg/libavcodec/qsvenc_hevc.c
Index: FFmpeg/libavcodec/qsvenc_hevc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc_hevc.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc_hevc.c
@@ -376,8 +376,8 @@ static const AVClass class = {
--- FFmpeg.orig/libavcodec/qsvenc_hevc.c
+++ FFmpeg/libavcodec/qsvenc_hevc.c
@@ -379,8 +379,8 @@ static const AVClass class = {
static const FFCodecDefault qsv_enc_defaults[] = {
{ "b", "1M" },
{ "b", "0" },
{ "refs", "0" },
- { "g", "-1" },
- { "g", "248" },
- { "bf", "-1" },
+ { "g", "250" },
+ { "bf", "4" },

View File

@ -1,16 +1,18 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
Index: FFmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vaapi.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
@@ -150,6 +150,7 @@ static const VAAPIFormatDescriptor vaapi
MAP(XRGB, RGB32, 0RGB, 0),
--- FFmpeg.orig/libavutil/hwcontext_vaapi.c
+++ FFmpeg/libavutil/hwcontext_vaapi.c
@@ -170,6 +170,9 @@ static const VAAPIFormatDescriptor vaapi
#ifdef VA_FOURCC_X2R10G10B10
MAP(X2R10G10B10, RGB32_10, X2RGB10, 0),
+ MAP(X2B10G10R10, RGB32_10, X2BGR10, 0),
#endif
+#ifdef VA_FOURCC_X2B10G10R10
+ MAP(X2B10G10R10, RGB32_10, X2BGR10, 0),
+#endif
#ifdef VA_FOURCC_Y410
// libva doesn't include a fourcc for XV30 and the driver only declares
@@ -1016,9 +1017,11 @@ static const struct {
// support for Y410, so we must fudge the mapping here.
@@ -1047,9 +1050,11 @@ static const struct {
DRM_MAP(NV12, 1, DRM_FORMAT_NV12),
#if defined(VA_FOURCC_P010) && defined(DRM_FORMAT_R16)
DRM_MAP(P010, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
@ -22,18 +24,17 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
#endif
DRM_MAP(BGRA, 1, DRM_FORMAT_ARGB8888),
DRM_MAP(BGRX, 1, DRM_FORMAT_XRGB8888),
@@ -1030,6 +1033,10 @@ static const struct {
@@ -1073,6 +1078,9 @@ static const struct {
#if defined(VA_FOURCC_X2R10G10B10) && defined(DRM_FORMAT_XRGB2101010)
DRM_MAP(X2R10G10B10, 1, DRM_FORMAT_XRGB2101010),
#endif
DRM_MAP(ARGB, 1, DRM_FORMAT_BGRA8888),
DRM_MAP(XRGB, 1, DRM_FORMAT_BGRX8888),
+#ifdef VA_FOURCC_X2R10G10B10
+ DRM_MAP(X2R10G10B10, 1, DRM_FORMAT_XRGB2101010),
+#if defined(VA_FOURCC_X2B10G10R10) && defined(DRM_FORMAT_XBGR2101010)
+ DRM_MAP(X2B10G10R10, 1, DRM_FORMAT_XBGR2101010),
+#endif
#if defined(VA_FOURCC_XYUV) && defined(DRM_FORMAT_XYUV8888)
DRM_MAP(XYUV, 1, DRM_FORMAT_XYUV8888),
#endif
@@ -1094,12 +1101,6 @@ static int vaapi_map_from_drm(AVHWFrames
};
#undef DRM_MAP
@@ -1128,12 +1136,6 @@ static int vaapi_map_from_drm(AVHWFrames
desc = (AVDRMFrameDescriptor*)src->data[0];
@ -46,7 +47,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
va_fourcc = 0;
for (i = 0; i < FF_ARRAY_ELEMS(vaapi_drm_format_map); i++) {
if (desc->nb_layers != vaapi_drm_format_map[i].nb_layer_formats)
@@ -1239,6 +1240,12 @@ static int vaapi_map_from_drm(AVHWFrames
@@ -1273,6 +1275,12 @@ static int vaapi_map_from_drm(AVHWFrames
buffer_attrs, FF_ARRAY_ELEMS(buffer_attrs));
}
#else

View File

@ -1,161 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vulkan.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
@@ -2273,13 +2273,7 @@ static int vulkan_frames_init(AVHWFrames
AVVulkanDeviceContext *dev_hwctx = hwfc->device_ctx->hwctx;
VulkanDevicePriv *p = hwfc->device_ctx->internal->priv;
const VkImageDrmFormatModifierListCreateInfoEXT *modifier_info;
- const int has_modifiers = !!(p->extensions & FF_VK_EXT_DRM_MODIFIER_FLAGS);
-
- /* Default tiling flags */
- hwctx->tiling = hwctx->tiling ? hwctx->tiling :
- has_modifiers ? VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT :
- p->use_linear_images ? VK_IMAGE_TILING_LINEAR :
- VK_IMAGE_TILING_OPTIMAL;
+ int has_modifiers = !!(p->extensions & FF_VK_EXT_DRM_MODIFIER_FLAGS);
if (!hwctx->usage)
hwctx->usage = FF_VK_DEFAULT_USAGE_FLAGS;
@@ -2298,9 +2292,6 @@ static int vulkan_frames_init(AVHWFrames
const VkFormat *fmt = av_vkfmt_from_pixfmt(hwfc->sw_format);
VkImageDrmFormatModifierListCreateInfoEXT *modifier_info;
FFVulkanFunctions *vk = &p->vkfn;
- VkDrmFormatModifierPropertiesEXT *mod_props;
- uint64_t *modifiers;
- int modifier_count = 0;
VkDrmFormatModifierPropertiesListEXT mod_props_list = {
.sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
@@ -2316,66 +2307,76 @@ static int vulkan_frames_init(AVHWFrames
/* Get all supported modifiers */
vk->GetPhysicalDeviceFormatProperties2(dev_hwctx->phys_dev, fmt[0], &prop);
- if (!mod_props_list.drmFormatModifierCount) {
- av_log(hwfc, AV_LOG_ERROR, "There are no supported modifiers for the given sw_format\n");
- return AVERROR(EINVAL);
- }
-
- /* Createa structure to hold the modifier list info */
- modifier_info = av_mallocz(sizeof(*modifier_info));
- if (!modifier_info)
- return AVERROR(ENOMEM);
-
- modifier_info->pNext = NULL;
- modifier_info->sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT;
-
- /* Add structure to the image creation pNext chain */
- if (!hwctx->create_pnext)
- hwctx->create_pnext = modifier_info;
- else
- vk_link_struct(hwctx->create_pnext, (void *)modifier_info);
-
- /* Backup the allocated struct to be freed later */
- fp->modifier_info = modifier_info;
-
- /* Allocate list of modifiers */
- modifiers = av_mallocz(mod_props_list.drmFormatModifierCount *
- sizeof(*modifiers));
- if (!modifiers)
- return AVERROR(ENOMEM);
-
- modifier_info->pDrmFormatModifiers = modifiers;
-
- /* Allocate a temporary list to hold all modifiers supported */
- mod_props = av_mallocz(mod_props_list.drmFormatModifierCount *
- sizeof(*mod_props));
- if (!mod_props)
- return AVERROR(ENOMEM);
-
- mod_props_list.pDrmFormatModifierProperties = mod_props;
-
- /* Finally get all modifiers from the device */
- vk->GetPhysicalDeviceFormatProperties2(dev_hwctx->phys_dev, fmt[0], &prop);
+ if (mod_props_list.drmFormatModifierCount) {
+ VkDrmFormatModifierPropertiesEXT *mod_props;
+ uint64_t *modifiers;
+ int modifier_count = 0;
+
+ /* Createa structure to hold the modifier list info */
+ modifier_info = av_mallocz(sizeof(*modifier_info));
+ if (!modifier_info)
+ return AVERROR(ENOMEM);
+
+ modifier_info->pNext = NULL;
+ modifier_info->sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT;
+
+ /* Add structure to the image creation pNext chain */
+ if (!hwctx->create_pnext)
+ hwctx->create_pnext = modifier_info;
+ else
+ vk_link_struct(hwctx->create_pnext, (void *)modifier_info);
+
+ /* Backup the allocated struct to be freed later */
+ fp->modifier_info = modifier_info;
+
+ /* Allocate list of modifiers */
+ modifiers = av_mallocz(mod_props_list.drmFormatModifierCount *
+ sizeof(*modifiers));
+ if (!modifiers)
+ return AVERROR(ENOMEM);
+
+ modifier_info->pDrmFormatModifiers = modifiers;
+
+ /* Allocate a temporary list to hold all modifiers supported */
+ mod_props = av_mallocz(mod_props_list.drmFormatModifierCount *
+ sizeof(*mod_props));
+ if (!mod_props)
+ return AVERROR(ENOMEM);
+
+ mod_props_list.pDrmFormatModifierProperties = mod_props;
+
+ /* Finally get all modifiers from the device */
+ vk->GetPhysicalDeviceFormatProperties2(dev_hwctx->phys_dev, fmt[0], &prop);
+
+ /* Reject any modifiers that don't match our requirements */
+ for (int i = 0; i < mod_props_list.drmFormatModifierCount; i++) {
+ if (!(mod_props[i].drmFormatModifierTilingFeatures & hwctx->usage))
+ continue;
+
+ modifiers[modifier_count++] = mod_props[i].drmFormatModifier;
+ }
+
+ if (!modifier_count) {
+ av_log(hwfc, AV_LOG_ERROR, "None of the given modifiers supports"
+ " the usage flags!\n");
+ av_freep(&mod_props);
+ return AVERROR(EINVAL);
+ }
- /* Reject any modifiers that don't match our requirements */
- for (int i = 0; i < mod_props_list.drmFormatModifierCount; i++) {
- if (!(mod_props[i].drmFormatModifierTilingFeatures & hwctx->usage))
- continue;
-
- modifiers[modifier_count++] = mod_props[i].drmFormatModifier;
- }
-
- if (!modifier_count) {
- av_log(hwfc, AV_LOG_ERROR, "None of the given modifiers supports"
- " the usage flags!\n");
+ modifier_info->drmFormatModifierCount = modifier_count;
av_freep(&mod_props);
- return AVERROR(EINVAL);
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "There are no supported modifiers for the given sw_format\n");
+ has_modifiers = 0;
}
-
- modifier_info->drmFormatModifierCount = modifier_count;
- av_freep(&mod_props);
}
+ /* Default tiling flags */
+ hwctx->tiling = hwctx->tiling ? hwctx->tiling :
+ has_modifiers ? VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT :
+ p->use_linear_images ? VK_IMAGE_TILING_LINEAR :
+ VK_IMAGE_TILING_OPTIMAL;
+
err = create_exec_ctx(hwfc, &fp->conv_ctx,
dev_hwctx->queue_family_comp_index,
dev_hwctx->nb_comp_queues);

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavcodec/vaapi_encode_h265.c
Index: FFmpeg/libavcodec/vaapi_encode_h265.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/vaapi_encode_h265.c
+++ jellyfin-ffmpeg/libavcodec/vaapi_encode_h265.c
--- FFmpeg.orig/libavcodec/vaapi_encode_h265.c
+++ FFmpeg/libavcodec/vaapi_encode_h265.c
@@ -691,7 +691,25 @@ static int vaapi_encode_h265_init_sequen
sps->log2_min_pcm_luma_coding_block_size_minus3 +
sps->log2_diff_max_min_pcm_luma_coding_block_size,

View File

@ -1,96 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vulkan.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
@@ -774,7 +774,7 @@ static const char *vk_dev_type(enum VkPh
static int find_device(AVHWDeviceContext *ctx, VulkanDeviceSelection *select)
{
int err = 0, choice = -1;
- uint32_t num;
+ uint32_t num, api = 0;
VkResult ret;
VulkanDevicePriv *p = ctx->internal->priv;
FFVulkanFunctions *vk = &p->vkfn;
@@ -828,49 +828,61 @@ static int find_device(AVHWDeviceContext
if (select->has_uuid) {
for (int i = 0; i < num; i++) {
- if (!strncmp(idp[i].deviceUUID, select->uuid, VK_UUID_SIZE)) {
+ if (!strncmp(idp[i].deviceUUID, select->uuid, VK_UUID_SIZE) &&
+ prop[i].properties.apiVersion > api) {
choice = i;
- goto end;
- }
+ api = prop[i].properties.apiVersion;
+ }
+ }
+ if (choice == -1) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to find device by given UUID!\n");
+ err = AVERROR(ENODEV);
}
- av_log(ctx, AV_LOG_ERROR, "Unable to find device by given UUID!\n");
- err = AVERROR(ENODEV);
goto end;
} else if (select->name) {
av_log(ctx, AV_LOG_VERBOSE, "Requested device: %s\n", select->name);
for (int i = 0; i < num; i++) {
- if (strstr(prop[i].properties.deviceName, select->name)) {
+ if (strstr(prop[i].properties.deviceName, select->name) &&
+ prop[i].properties.apiVersion > api) {
choice = i;
- goto end;
- }
+ api = prop[i].properties.apiVersion;
+ }
+ }
+ if (choice == -1) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to find device \"%s\"!\n",
+ select->name);
+ err = AVERROR(ENODEV);
}
- av_log(ctx, AV_LOG_ERROR, "Unable to find device \"%s\"!\n",
- select->name);
- err = AVERROR(ENODEV);
goto end;
} else if (select->pci_device) {
av_log(ctx, AV_LOG_VERBOSE, "Requested device: 0x%x\n", select->pci_device);
for (int i = 0; i < num; i++) {
- if (select->pci_device == prop[i].properties.deviceID) {
+ if (select->pci_device == prop[i].properties.deviceID &&
+ prop[i].properties.apiVersion > api) {
choice = i;
- goto end;
+ api = prop[i].properties.apiVersion;
}
}
- av_log(ctx, AV_LOG_ERROR, "Unable to find device with PCI ID 0x%x!\n",
- select->pci_device);
- err = AVERROR(EINVAL);
+ if (choice == -1) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to find device with PCI ID 0x%x!\n",
+ select->pci_device);
+ err = AVERROR(EINVAL);
+ }
goto end;
} else if (select->vendor_id) {
av_log(ctx, AV_LOG_VERBOSE, "Requested vendor: 0x%x\n", select->vendor_id);
for (int i = 0; i < num; i++) {
- if (select->vendor_id == prop[i].properties.vendorID) {
+ if (select->vendor_id == prop[i].properties.vendorID &&
+ prop[i].properties.apiVersion > api) {
choice = i;
- goto end;
+ api = prop[i].properties.apiVersion;
}
}
- av_log(ctx, AV_LOG_ERROR, "Unable to find device with Vendor ID 0x%x!\n",
- select->vendor_id);
- err = AVERROR(ENODEV);
+ if (choice == -1) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to find device with Vendor ID 0x%x!\n",
+ select->vendor_id);
+ err = AVERROR(ENODEV);
+ }
goto end;
} else {
if (select->index < num) {

View File

@ -1,22 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vaapi.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
@@ -1319,8 +1319,16 @@ static int vaapi_map_to_drm_esh(AVHWFram
surface_id = (VASurfaceID)(uintptr_t)src->data[3];
export_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
- if (flags & AV_HWFRAME_MAP_READ)
+ if (flags & AV_HWFRAME_MAP_READ) {
export_flags |= VA_EXPORT_SURFACE_READ_ONLY;
+
+ vas = vaSyncSurface(hwctx->display, surface_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_WARNING, "Failed to sync surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+ }
+
if (flags & AV_HWFRAME_MAP_WRITE)
export_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavcodec/dxva2.c
Index: FFmpeg/libavcodec/dxva2.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dxva2.c
+++ jellyfin-ffmpeg/libavcodec/dxva2.c
--- FFmpeg.orig/libavcodec/dxva2.c
+++ FFmpeg/libavcodec/dxva2.c
@@ -714,8 +714,10 @@ int ff_dxva2_common_frame_params(AVCodec
#if CONFIG_D3D11VA
if (frames_ctx->format == AV_PIX_FMT_D3D11) {
@ -13,12 +13,12 @@ Index: jellyfin-ffmpeg/libavcodec/dxva2.c
}
#endif
Index: jellyfin-ffmpeg/libavfilter/qsvvpp.c
Index: FFmpeg/libavfilter/qsvvpp.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/qsvvpp.c
+++ jellyfin-ffmpeg/libavfilter/qsvvpp.c
@@ -598,6 +598,9 @@ static int init_vpp_session(AVFilterCont
out_frames_ctx->initial_pool_size += avctx->extra_hw_frames;
--- FFmpeg.orig/libavfilter/qsvvpp.c
+++ FFmpeg/libavfilter/qsvvpp.c
@@ -641,6 +641,9 @@ static int init_vpp_session(AVFilterCont
out_frames_hwctx->frame_type = s->out_mem_mode;
+ if (in_frames_hwctx)
@ -27,10 +27,10 @@ Index: jellyfin-ffmpeg/libavfilter/qsvvpp.c
ret = av_hwframe_ctx_init(out_frames_ref);
if (ret < 0) {
av_buffer_unref(&out_frames_ref);
Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.h
Index: FFmpeg/libavutil/hwcontext_d3d11va.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_d3d11va.h
+++ jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.h
--- FFmpeg.orig/libavutil/hwcontext_d3d11va.h
+++ FFmpeg/libavutil/hwcontext_d3d11va.h
@@ -183,6 +183,11 @@ typedef struct AVD3D11VAFramesContext {
* This field is ignored/invalid if a user-allocated texture is provided.
*/
@ -43,36 +43,40 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_d3d11va.h
} AVD3D11VAFramesContext;
#endif /* AVUTIL_HWCONTEXT_D3D11VA_H */
Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
Index: FFmpeg/libavutil/hwcontext_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_opencl.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
@@ -167,6 +167,10 @@ typedef struct OpenCLFramesContext {
--- FFmpeg.orig/libavutil/hwcontext_opencl.c
+++ FFmpeg/libavutil/hwcontext_opencl.c
@@ -181,6 +181,10 @@ typedef struct OpenCLFramesContext {
int nb_mapped_frames;
AVOpenCLFrameDescriptor *mapped_frames;
#endif
+#if HAVE_OPENCL_D3D11
+ ID3D11Asynchronous *sync_point;
+ ID3D11Texture2D *sync_tex_2x2;
+ ID3D11Asynchronous *sync_point;
+#endif
} OpenCLFramesContext;
static void CL_CALLBACK opencl_error_callback(const char *errinfo,
@@ -1788,7 +1792,12 @@ static void opencl_frames_uninit(AVHWFra
@@ -1809,7 +1813,16 @@ static void opencl_frames_uninit(AVHWFra
av_freep(&priv->mapped_frames);
}
#endif
-
+#if HAVE_OPENCL_D3D11
+ if (priv->sync_point)
+ ID3D11Asynchronous_Release(priv->sync_point);
+ if (priv->sync_tex_2x2)
+ if (priv->sync_tex_2x2) {
+ ID3D11Texture2D_Release(priv->sync_tex_2x2);
+ priv->sync_tex_2x2 = NULL;
+ }
+ if (priv->sync_point) {
+ ID3D11Asynchronous_Release(priv->sync_point);
+ priv->sync_point = NULL;
+ }
+#endif
if (priv->command_queue) {
cle = clReleaseCommandQueue(priv->command_queue);
if (cle != CL_SUCCESS) {
@@ -2563,6 +2572,82 @@ fail:
@@ -2583,6 +2596,98 @@ fail:
#if HAVE_OPENCL_D3D11
@ -83,6 +87,7 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+{
+ HRESULT hr;
+ D3D11_QUERY_DESC query = { D3D11_QUERY_EVENT, 0 };
+ D3D11_TEXTURE2D_DESC cur_desc = { 0 };
+ D3D11_TEXTURE2D_DESC src_desc = { 0 };
+ D3D11_TEXTURE2D_DESC dst_desc = {
+ .Width = 2,
@ -96,28 +101,43 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ if (!priv || !device_hwctx || !src_texture)
+ return AVERROR(EINVAL);
+
+ hr = ID3D11Device_CreateQuery(device_hwctx->device, &query,
+ (ID3D11Query **)&priv->sync_point);
+ if (FAILED(hr)) {
+ av_log(logctx, AV_LOG_ERROR, "Could not create the sync point (%lx)\n", (long)hr);
+ goto fail;
+ ID3D11Texture2D_GetDesc(src_texture, &src_desc);
+ if (priv->sync_tex_2x2) {
+ ID3D11Texture2D_GetDesc(priv->sync_tex_2x2, &cur_desc);
+ if (src_desc.Format != cur_desc.Format) {
+ ID3D11Texture2D_Release(priv->sync_tex_2x2);
+ priv->sync_tex_2x2 = NULL;
+ }
+ }
+ if (!priv->sync_tex_2x2) {
+ dst_desc.Format = src_desc.Format;
+ hr = ID3D11Device_CreateTexture2D(device_hwctx->device,
+ &dst_desc, NULL, &priv->sync_tex_2x2);
+ if (FAILED(hr)) {
+ av_log(logctx, AV_LOG_ERROR, "Could not create the sync texture (%lx)\n", (long)hr);
+ goto fail;
+ }
+ }
+
+ ID3D11Texture2D_GetDesc(src_texture, &src_desc);
+ dst_desc.Format = src_desc.Format;
+ hr = ID3D11Device_CreateTexture2D(device_hwctx->device,
+ &dst_desc, NULL, &priv->sync_tex_2x2);
+ if (FAILED(hr)) {
+ av_log(logctx, AV_LOG_ERROR, "Could not create the sync texture (%lx)\n", (long)hr);
+ goto fail;
+ if (!priv->sync_point) {
+ hr = ID3D11Device_CreateQuery(device_hwctx->device, &query,
+ (ID3D11Query **)&priv->sync_point);
+ if (FAILED(hr)) {
+ av_log(logctx, AV_LOG_ERROR, "Could not create the sync point (%lx)\n", (long)hr);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ if (priv->sync_point)
+ ID3D11Asynchronous_Release(priv->sync_point);
+ if (priv->sync_tex_2x2)
+ if (priv->sync_tex_2x2) {
+ ID3D11Texture2D_Release(priv->sync_tex_2x2);
+ priv->sync_tex_2x2 = NULL;
+ }
+ if (priv->sync_point) {
+ ID3D11Asynchronous_Release(priv->sync_point);
+ priv->sync_point = NULL;
+ }
+ return AVERROR_UNKNOWN;
+}
+
@ -155,59 +175,23 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
#if CONFIG_LIBMFX
static void opencl_unmap_from_d3d11_qsv(AVHWFramesContext *dst_fc,
@@ -2603,6 +2688,13 @@ static void opencl_unmap_from_d3d11_qsv(
@@ -2623,6 +2728,14 @@ static void opencl_unmap_from_d3d11_qsv(
static int opencl_map_from_d3d11_qsv(AVHWFramesContext *dst_fc, AVFrame *dst,
const AVFrame *src, int flags)
{
+ AVHWFramesContext *src_fc =
+ (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVHWDeviceContext *src_dev = src_fc->device_ctx;
+ FFHWDeviceContext *fsrc_dev = (FFHWDeviceContext*)src_dev;
+ AVHWDeviceContext *src_subdev =
+ (AVHWDeviceContext*)src_dev->internal->source_device->data;
+ (AVHWDeviceContext*)fsrc_dev->source_device->data;
+ AVD3D11VADeviceContext *device_hwctx = src_subdev->hwctx;
+ AVQSVFramesContext *src_hwctx = src_fc->hwctx;
AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
@@ -2630,6 +2722,14 @@ static int opencl_map_from_d3d11_qsv(AVH
return AVERROR(EINVAL);
}
+ if (src_hwctx->require_sync &&
+ frames_priv->sync_point && frames_priv->sync_tex_2x2) {
+ opencl_sync_d3d11_texture(frames_priv,
+ device_hwctx,
+ tex, (decoder_target ? index : 0),
+ dst_fc);
+ }
+
if (decoder_target) {
desc = &frames_priv->mapped_frames[index];
} else {
@@ -2701,6 +2801,10 @@ fail2:
static int opencl_frames_derive_from_d3d11_qsv(AVHWFramesContext *dst_fc,
AVHWFramesContext *src_fc, int flags)
{
+ AVHWDeviceContext *src_dev = src_fc->device_ctx;
+ AVHWDeviceContext *src_subdev =
+ (AVHWDeviceContext*)src_dev->internal->source_device->data;
+ AVD3D11VADeviceContext *device_hwctx = src_subdev->hwctx;
AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
AVQSVFramesContext *src_hwctx = src_fc->hwctx;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
@@ -2709,8 +2813,8 @@ static int opencl_frames_derive_from_d3d
cl_int cle;
int err, i, p, nb_planes = 2;
- mfxHDLPair *pair = (mfxHDLPair*)src_hwctx->surfaces[i].Data.MemId;
- ID3D11Texture2D *tex = (ID3D11Texture2D*)pair->first;
+ mfxHDLPair *pair = (mfxHDLPair *)src_hwctx->surfaces[0].Data.MemId;
+ ID3D11Texture2D *tex = (ID3D11Texture2D *)pair->first;
if (src_fc->sw_format != AV_PIX_FMT_NV12 &&
src_fc->sw_format != AV_PIX_FMT_P010) {
@@ -2725,6 +2829,14 @@ static int opencl_frames_derive_from_d3d
return AVERROR(EINVAL);
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
OpenCLFramesContext *frames_priv = dst_fc->hwctx;
AVOpenCLDeviceContext *dst_dev = &device_priv->p;
@@ -2652,6 +2765,21 @@ static int opencl_map_from_d3d11_qsv(AVH
}
}
+ if (src_hwctx->require_sync) {
@ -216,21 +200,19 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ tex, dst_fc);
+ if (err < 0)
+ return err;
+
+ if (frames_priv->sync_point || frames_priv->sync_tex_2x2) {
+ opencl_sync_d3d11_texture(frames_priv,
+ device_hwctx,
+ tex, (derived_frames ? index : 0),
+ dst_fc);
+ }
+ }
+
if (!(src_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET) ||
(src_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
(src_hwctx->frame_type & MFX_MEMTYPE_FROM_VPPOUT)) {
@@ -2748,6 +2860,8 @@ static int opencl_frames_derive_from_d3d
for (i = 0; i < frames_priv->nb_mapped_frames; i++) {
AVOpenCLFrameDescriptor *desc = &frames_priv->mapped_frames[i];
desc->nb_planes = nb_planes;
+ pair = (mfxHDLPair *)src_hwctx->surfaces[i].Data.MemId;
+ tex = (ID3D11Texture2D *)pair->first;
for (p = 0; p < nb_planes; p++) {
UINT subresource = 2 * i + p;
@@ -2816,6 +2930,10 @@ static void opencl_unmap_from_d3d11(AVHW
if (derived_frames) {
desc = &frames_priv->mapped_frames[index];
} else {
@@ -2843,6 +2971,10 @@ static void opencl_unmap_from_d3d11(AVHW
static int opencl_map_from_d3d11(AVHWFramesContext *dst_fc, AVFrame *dst,
const AVFrame *src, int flags)
{
@ -238,10 +220,10 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVD3D11VAFramesContext *src_hwctx = src_fc->hwctx;
+ AVD3D11VADeviceContext *device_hwctx = src_fc->device_ctx->hwctx;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
OpenCLFramesContext *frames_priv = dst_fc->internal->priv;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
OpenCLFramesContext *frames_priv = dst_fc->hwctx;
AVOpenCLFrameDescriptor *desc;
@@ -2846,6 +2964,14 @@ static int opencl_map_from_d3d11(AVHWFra
@@ -2873,6 +3005,14 @@ static int opencl_map_from_d3d11(AVHWFra
mem_objs = device_priv->d3d11_map_amd ? &desc->planes[nb_planes]
: desc->planes;
@ -249,22 +231,22 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
+ frames_priv->sync_point && frames_priv->sync_tex_2x2) {
+ opencl_sync_d3d11_texture(frames_priv,
+ device_hwctx,
+ (ID3D11Texture2D *)src->data[0], index,
+ (ID3D11Texture2D*)src->data[0], index,
+ dst_fc);
+ }
+
cle = device_priv->clEnqueueAcquireD3D11ObjectsKHR(
frames_priv->command_queue, num_objs, mem_objs,
0, NULL, &event);
@@ -2885,6 +3011,7 @@ fail:
@@ -2912,6 +3052,7 @@ fail:
static int opencl_frames_derive_from_d3d11(AVHWFramesContext *dst_fc,
AVHWFramesContext *src_fc, int flags)
{
+ AVD3D11VADeviceContext *device_hwctx = src_fc->device_ctx->hwctx;
AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
AVD3D11VAFramesContext *src_hwctx = src_fc->hwctx;
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->internal->priv;
@@ -2928,6 +3055,14 @@ static int opencl_frames_derive_from_d3d
OpenCLDeviceContext *device_priv = dst_fc->device_ctx->hwctx;
AVOpenCLDeviceContext *dst_dev = &device_priv->p;
@@ -2954,6 +3095,14 @@ static int opencl_frames_derive_from_d3d
if (!frames_priv->mapped_frames)
return AVERROR(ENOMEM);
@ -279,11 +261,19 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_opencl.c
for (i = 0; i < frames_priv->nb_mapped_frames; i++) {
AVOpenCLFrameDescriptor *desc = &frames_priv->mapped_frames[i];
desc->nb_planes = nb_planes;
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
Index: FFmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -1868,6 +1868,7 @@ static int qsv_frames_derive_to(AVHWFram
--- FFmpeg.orig/libavutil/hwcontext_qsv.c
+++ FFmpeg/libavutil/hwcontext_qsv.c
@@ -2016,6 +2016,7 @@ static int qsv_dynamic_frames_derive_to(
} else {
dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
+ dst_hwctx->require_sync = src_hwctx->require_sync;
}
break;
#endif
@@ -2091,6 +2092,7 @@ static int qsv_fixed_frames_derive_to(AV
} else {
dst_hwctx->frame_type |= MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
}
@ -291,14 +281,14 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
}
break;
#endif
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.h
Index: FFmpeg/libavutil/hwcontext_qsv.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.h
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.h
@@ -64,6 +64,11 @@ typedef struct AVQSVFramesContext {
* A combination of MFX_MEMTYPE_* describing the frame pool.
--- FFmpeg.orig/libavutil/hwcontext_qsv.h
+++ FFmpeg/libavutil/hwcontext_qsv.h
@@ -81,6 +81,11 @@ typedef struct AVQSVFramesContext {
* pool have the same mfxFrameInfo.
*/
int frame_type;
mfxFrameInfo *info;
+
+ /**
+ * Whether the frames require extra sync when exporting as external memory.

View File

@ -1,44 +0,0 @@
Index: jellyfin-ffmpeg/libavformat/assenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/assenc.c
+++ jellyfin-ffmpeg/libavformat/assenc.c
@@ -25,6 +25,7 @@
#include "mux.h"
#include "libavutil/opt.h"
+#include "libavutil/mem.h"
typedef struct DialogueLine {
int readorder;
@@ -56,6 +57,7 @@ static int write_header(AVFormatContext
avpriv_set_pts_info(s->streams[0], 64, 1, 100);
if (par->extradata_size > 0) {
size_t header_size = par->extradata_size;
+ char *header_string = NULL;
uint8_t *trailer = strstr(par->extradata, "\n[Events]");
if (trailer)
@@ -70,9 +72,20 @@ static int write_header(AVFormatContext
ass->trailer = trailer;
}
- avio_write(s->pb, par->extradata, header_size);
- if (par->extradata[header_size - 1] != '\n')
- avio_write(s->pb, "\r\n", 2);
+ header_string = av_malloc(header_size + 1);
+ if (!header_string)
+ return AVERROR(ENOMEM);
+
+ memcpy(header_string, par->extradata, header_size);
+ header_string[header_size] = 0;
+
+ avio_printf(s->pb, "%s", header_string);
+
+ if (header_string[strlen(header_string) - 1] != '\n')
+ avio_printf(s->pb, "\r\n");
+
+ av_free(header_string);
+
ass->ssa_mode = !strstr(par->extradata, "\n[V4+ Styles]");
if (!strstr(par->extradata, "\n[Events]"))
avio_printf(s->pb, "[Events]\r\nFormat: %s, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n",

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/fftools/Makefile
Index: FFmpeg/fftools/Makefile
===================================================================
--- jellyfin-ffmpeg.orig/fftools/Makefile
+++ jellyfin-ffmpeg/fftools/Makefile
@@ -35,6 +35,12 @@ endef
--- FFmpeg.orig/fftools/Makefile
+++ FFmpeg/fftools/Makefile
@@ -40,6 +40,12 @@ endef
$(foreach P,$(AVPROGS-yes),$(eval $(call DOFFTOOL,$(P))))
@ -15,10 +15,10 @@ Index: jellyfin-ffmpeg/fftools/Makefile
all: $(AVPROGS)
fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools
Index: jellyfin-ffmpeg/fftools/fftoolsres.rc
Index: FFmpeg/fftools/fftoolsres.rc
===================================================================
--- jellyfin-ffmpeg.orig/fftools/fftoolsres.rc
+++ jellyfin-ffmpeg/fftools/fftoolsres.rc
--- FFmpeg.orig/fftools/fftoolsres.rc
+++ FFmpeg/fftools/fftoolsres.rc
@@ -1,2 +1,34 @@
#include <windows.h>
+#include "libavutil/version.h"

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavformat/movenc.c
Index: FFmpeg/libavformat/movenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/movenc.c
+++ jellyfin-ffmpeg/libavformat/movenc.c
@@ -2716,7 +2716,7 @@ static int mov_write_stbl_tag(AVFormatCo
--- FFmpeg.orig/libavformat/movenc.c
+++ FFmpeg/libavformat/movenc.c
@@ -2908,7 +2908,7 @@ static int mov_write_stbl_tag(AVFormatCo
track->par->codec_tag == MKTAG('r','t','p',' ')) &&
track->has_keyframes && track->has_keyframes < track->entry)
mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE);

View File

@ -0,0 +1,68 @@
Index: FFmpeg/libavfilter/vf_scale_vt.c
===================================================================
--- FFmpeg.orig/libavfilter/vf_scale_vt.c
+++ FFmpeg/libavfilter/vf_scale_vt.c
@@ -40,11 +40,26 @@ typedef struct ScaleVtContext {
enum AVColorPrimaries colour_primaries;
enum AVColorTransferCharacteristic colour_transfer;
enum AVColorSpace colour_matrix;
+ enum AVPixelFormat format;
char *colour_primaries_string;
char *colour_transfer_string;
char *colour_matrix_string;
} ScaleVtContext;
+static const enum AVPixelFormat supported_formats[] = {
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_NONE,
+};
+
+static int format_is_supported(enum AVPixelFormat fmt)
+{
+ for (int i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
+ if (supported_formats[i] == fmt)
+ return 1;
+ return 0;
+}
+
static av_cold int scale_vt_init(AVFilterContext *avctx)
{
ScaleVtContext *s = avctx->priv;
@@ -179,6 +194,7 @@ static int scale_vt_config_output(AVFilt
AVFilterLink *inlink = outlink->src->inputs[0];
AVHWFramesContext *hw_frame_ctx_in;
AVHWFramesContext *hw_frame_ctx_out;
+ enum AVPixelFormat out_format;
err = ff_scale_eval_dimensions(s, s->w_expr, s->h_expr, inlink, outlink,
&s->output_width,
@@ -198,11 +214,18 @@ static int scale_vt_config_output(AVFilt
hw_frame_ctx_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+ out_format = (s->format == AV_PIX_FMT_NONE) ? hw_frame_ctx_in->sw_format : s->format;
+ if (!format_is_supported(s->format)) {
+ av_log(s, AV_LOG_ERROR, "Unsupported output format: %s\n",
+ av_get_pix_fmt_name(out_format));
+ return AVERROR(ENOSYS);
+ }
+
av_buffer_unref(&outlink->hw_frames_ctx);
outlink->hw_frames_ctx = av_hwframe_ctx_alloc(hw_frame_ctx_in->device_ref);
hw_frame_ctx_out = (AVHWFramesContext *)outlink->hw_frames_ctx->data;
hw_frame_ctx_out->format = AV_PIX_FMT_VIDEOTOOLBOX;
- hw_frame_ctx_out->sw_format = hw_frame_ctx_in->sw_format;
+ hw_frame_ctx_out->sw_format = out_format;
hw_frame_ctx_out->width = outlink->w;
hw_frame_ctx_out->height = outlink->h;
@@ -234,6 +257,8 @@ static const AVOption scale_vt_options[]
OFFSET(colour_primaries_string), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
{ "color_transfer", "Output colour transfer characteristics",
OFFSET(colour_transfer_string), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "format", "Output pixel format",
+ OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, .flags = FLAGS },
{ NULL },
};

View File

@ -1,80 +0,0 @@
Index: jellyfin-ffmpeg/libavformat/webvttdec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/webvttdec.c
+++ jellyfin-ffmpeg/libavformat/webvttdec.c
@@ -60,7 +60,7 @@ static int64_t read_ts(const char *s)
static int webvtt_read_header(AVFormatContext *s)
{
WebVTTContext *webvtt = s->priv_data;
- AVBPrint cue;
+ AVBPrint cue, header;
int res = 0;
AVStream *st = avformat_new_stream(s, NULL);
@@ -72,6 +72,7 @@ static int webvtt_read_header(AVFormatCo
st->disposition |= webvtt->kind;
av_bprint_init(&cue, 0, AV_BPRINT_SIZE_UNLIMITED);
+ av_bprint_init(&header, 0, AV_BPRINT_SIZE_UNLIMITED);
for (;;) {
int i;
@@ -89,12 +90,18 @@ static int webvtt_read_header(AVFormatCo
p = identifier = cue.str;
pos = avio_tell(s->pb);
- /* ignore header chunk */
+ /* ignore the magic word and any comments */
if (!strncmp(p, "\xEF\xBB\xBFWEBVTT", 9) ||
!strncmp(p, "WEBVTT", 6) ||
!strncmp(p, "NOTE", 4))
continue;
+ /* store the style and region blocks from the header */
+ if (!strncmp(p, "STYLE", 5) || !strncmp(p, "REGION", 6)) {
+ av_bprintf(&header, "%s%s", header.len ? "\n\n" : "", p);
+ continue;
+ }
+
/* optional cue identifier (can be a number like in SRT or some kind of
* chaptering id) */
for (i = 0; p[i] && p[i] != '\n' && p[i] != '\r'; i++) {
@@ -161,10 +168,15 @@ static int webvtt_read_header(AVFormatCo
SET_SIDE_DATA(settings, AV_PKT_DATA_WEBVTT_SETTINGS);
}
+ res = ff_bprint_to_codecpar_extradata(st->codecpar, &header);
+ if (res < 0)
+ goto end;
+
ff_subtitles_queue_finalize(s, &webvtt->q);
end:
av_bprint_finalize(&cue, NULL);
+ av_bprint_finalize(&header, NULL);
return res;
}
Index: jellyfin-ffmpeg/libavformat/webvttenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/webvttenc.c
+++ jellyfin-ffmpeg/libavformat/webvttenc.c
@@ -59,6 +59,18 @@ static int webvtt_write_header(AVFormatC
avio_printf(pb, "WEBVTT\n");
+ if (par->extradata_size > 0) {
+ size_t header_size = par->extradata_size;
+
+ if (par->extradata[0] != '\n')
+ avio_printf(pb, "\n");
+
+ avio_write(pb, par->extradata, header_size);
+
+ if (par->extradata[header_size - 1] != '\n')
+ avio_printf(pb, "\n");
+ }
+
return 0;
}

View File

@ -0,0 +1,513 @@
Index: FFmpeg/libavcodec/videotoolboxenc.c
===================================================================
--- FFmpeg.orig/libavcodec/videotoolboxenc.c
+++ FFmpeg/libavcodec/videotoolboxenc.c
@@ -226,9 +226,9 @@ typedef struct ExtraSEI {
typedef struct BufNode {
CMSampleBufferRef cm_buffer;
- ExtraSEI *sei;
+ ExtraSEI sei;
+ AVBufferRef *frame_buf;
struct BufNode* next;
- int error;
} BufNode;
typedef struct VTEncContext {
@@ -261,7 +261,7 @@ typedef struct VTEncContext {
int realtime;
int frames_before;
int frames_after;
- bool constant_bit_rate;
+ int constant_bit_rate;
int allow_sw;
int require_sw;
@@ -280,6 +280,18 @@ typedef struct VTEncContext {
int max_ref_frames;
} VTEncContext;
+static void vtenc_free_buf_node(BufNode *info)
+{
+ if (!info)
+ return;
+
+ av_free(info->sei.data);
+ if (info->cm_buffer)
+ CFRelease(info->cm_buffer);
+ av_buffer_unref(&info->frame_buf);
+ av_free(info);
+}
+
static int vt_dump_encoder(AVCodecContext *avctx)
{
VTEncContext *vtctx = avctx->priv_data;
@@ -347,8 +359,7 @@ static void set_async_error(VTEncContext
while (info) {
BufNode *next = info->next;
- CFRelease(info->cm_buffer);
- av_free(info);
+ vtenc_free_buf_node(info);
info = next;
}
@@ -388,7 +399,7 @@ static void vtenc_reset(VTEncContext *vt
}
}
-static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
+static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI *sei)
{
BufNode *info;
@@ -426,31 +437,18 @@ static int vtenc_q_pop(VTEncContext *vtc
pthread_mutex_unlock(&vtctx->lock);
*buf = info->cm_buffer;
+ info->cm_buffer = NULL;
if (sei && *buf) {
*sei = info->sei;
- } else if (info->sei) {
- if (info->sei->data) av_free(info->sei->data);
- av_free(info->sei);
+ info->sei = (ExtraSEI) {0};
}
- av_free(info);
-
+ vtenc_free_buf_node(info);
return 0;
}
-static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
+static void vtenc_q_push(VTEncContext *vtctx, BufNode *info)
{
- BufNode *info = av_malloc(sizeof(BufNode));
- if (!info) {
- set_async_error(vtctx, AVERROR(ENOMEM));
- return;
- }
-
- CFRetain(buffer);
- info->cm_buffer = buffer;
- info->sei = sei;
- info->next = NULL;
-
pthread_mutex_lock(&vtctx->lock);
if (!vtctx->q_head) {
@@ -735,13 +733,16 @@ static void vtenc_output_callback(
{
AVCodecContext *avctx = ctx;
VTEncContext *vtctx = avctx->priv_data;
- ExtraSEI *sei = sourceFrameCtx;
+ BufNode *info = sourceFrameCtx;
+ av_buffer_unref(&info->frame_buf);
if (vtctx->async_error) {
+ vtenc_free_buf_node(info);
return;
}
if (status) {
+ vtenc_free_buf_node(info);
av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
set_async_error(vtctx, AVERROR_EXTERNAL);
return;
@@ -751,15 +752,19 @@ static void vtenc_output_callback(
return;
}
+ CFRetain(sample_buffer);
+ info->cm_buffer = sample_buffer;
+
if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
int set_status = set_extradata(avctx, sample_buffer);
if (set_status) {
+ vtenc_free_buf_node(info);
set_async_error(vtctx, set_status);
return;
}
}
- vtenc_q_push(vtctx, sample_buffer, sei);
+ vtenc_q_push(vtctx, info);
}
static int get_length_code_size(
@@ -2449,7 +2454,8 @@ static int copy_avframe_to_pixel_buffer(
static int create_cv_pixel_buffer(AVCodecContext *avctx,
const AVFrame *frame,
- CVPixelBufferRef *cv_img)
+ CVPixelBufferRef *cv_img,
+ BufNode *node)
{
int plane_count;
int color;
@@ -2468,6 +2474,12 @@ static int create_cv_pixel_buffer(AVCode
av_assert0(*cv_img);
CFRetain(*cv_img);
+ if (frame->buf[0]) {
+ node->frame_buf = av_buffer_ref(frame->buf[0]);
+ if (!node->frame_buf)
+ return AVERROR(ENOMEM);
+ }
+
return 0;
}
@@ -2565,33 +2577,29 @@ static int vtenc_send_frame(AVCodecConte
const AVFrame *frame)
{
CMTime time;
- CFDictionaryRef frame_dict;
+ CFDictionaryRef frame_dict = NULL;
CVPixelBufferRef cv_img = NULL;
AVFrameSideData *side_data = NULL;
- ExtraSEI *sei = NULL;
- int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
+ BufNode *node = av_mallocz(sizeof(*node));
+ int status;
- if (status) return status;
+ if (!node)
+ return AVERROR(ENOMEM);
+
+ status = create_cv_pixel_buffer(avctx, frame, &cv_img, node);
+ if (status)
+ goto out;
status = create_encoder_dict_h264(frame, &frame_dict);
- if (status) {
- CFRelease(cv_img);
- return status;
- }
+ if (status)
+ goto out;
#if CONFIG_ATSC_A53
side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
if (vtctx->a53_cc && side_data && side_data->size) {
- sei = av_mallocz(sizeof(*sei));
- if (!sei) {
- av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
- } else {
- int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
- av_free(sei);
- sei = NULL;
- }
+ status = ff_alloc_a53_sei(frame, 0, &node->sei.data, &node->sei.size);
+ if (status < 0) {
+ goto out;
}
}
#endif
@@ -2603,19 +2611,26 @@ static int vtenc_send_frame(AVCodecConte
time,
kCMTimeInvalid,
frame_dict,
- sei,
+ node,
NULL
);
- if (frame_dict) CFRelease(frame_dict);
- CFRelease(cv_img);
-
if (status) {
av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
- return AVERROR_EXTERNAL;
+ status = AVERROR_EXTERNAL;
+ // Not necessary, just in case new code put after here
+ goto out;
}
- return 0;
+out:
+ if (frame_dict)
+ CFRelease(frame_dict);
+ if (cv_img)
+ CFRelease(cv_img);
+ if (status)
+ vtenc_free_buf_node(node);
+
+ return status;
}
static av_cold int vtenc_frame(
@@ -2628,7 +2643,7 @@ static av_cold int vtenc_frame(
bool get_frame;
int status;
CMSampleBufferRef buf = NULL;
- ExtraSEI *sei = NULL;
+ ExtraSEI sei = {0};
if (frame) {
status = vtenc_send_frame(avctx, vtctx, frame);
@@ -2669,11 +2684,8 @@ static av_cold int vtenc_frame(
if (status) goto end_nopkt;
if (!buf) goto end_nopkt;
- status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
- if (sei) {
- if (sei->data) av_free(sei->data);
- av_free(sei);
- }
+ status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei.data ? &sei : NULL);
+ av_free(sei.data);
CFRelease(buf);
if (status) goto end_nopkt;
@@ -2698,6 +2710,10 @@ static int vtenc_populate_extradata(AVCo
CVPixelBufferRef pix_buf = NULL;
CMTime time;
CMSampleBufferRef buf = NULL;
+ BufNode *node = av_mallocz(sizeof(*node));
+
+ if (!node)
+ return AVERROR(ENOMEM);
status = vtenc_create_encoder(avctx,
codec_type,
@@ -2733,7 +2749,7 @@ static int vtenc_populate_extradata(AVCo
time,
kCMTimeInvalid,
NULL,
- NULL,
+ node,
NULL);
if (status) {
@@ -2744,6 +2760,7 @@ static int vtenc_populate_extradata(AVCo
status = AVERROR_EXTERNAL;
goto pe_cleanup;
}
+ node = NULL;
//Populates extradata - output frames are flushed and param sets are available.
status = VTCompressionSessionCompleteFrames(vtctx->session,
@@ -2766,10 +2783,19 @@ static int vtenc_populate_extradata(AVCo
pe_cleanup:
CVPixelBufferRelease(pix_buf);
- vtenc_reset(vtctx);
+
+ if (status) {
+ vtenc_reset(vtctx);
+ } else if (vtctx->session) {
+ CFRelease(vtctx->session);
+ vtctx->session = NULL;
+ }
+
vtctx->frame_ct_out = 0;
av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
+ if (!status)
+ vtenc_free_buf_node(node);
return status;
}
Index: FFmpeg/libavutil/hwcontext_videotoolbox.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_videotoolbox.c
+++ FFmpeg/libavutil/hwcontext_videotoolbox.c
@@ -342,8 +342,10 @@ static int vt_pixbuf_set_par(void *log_c
CFNumberRef num = NULL, den = NULL;
AVRational avpar = src->sample_aspect_ratio;
- if (avpar.num == 0)
+ if (avpar.num == 0) {
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferPixelAspectRatioKey);
return 0;
+ }
av_reduce(&avpar.num, &avpar.den,
avpar.num, avpar.den,
@@ -423,7 +425,10 @@ static int vt_pixbuf_set_chromaloc(void
kCVImageBufferChromaLocationTopFieldKey,
loc,
kCVAttachmentMode_ShouldPropagate);
- }
+ } else
+ CVBufferRemoveAttachment(
+ pixbuf,
+ kCVImageBufferChromaLocationTopFieldKey);
return 0;
}
@@ -527,59 +532,116 @@ CFStringRef av_map_videotoolbox_color_tr
}
}
+/**
+ * Copy all attachments for the specified mode from the given buffer.
+ */
+static CFDictionaryRef vt_cv_buffer_copy_attachments(CVBufferRef buffer,
+ CVAttachmentMode attachment_mode)
+{
+ CFDictionaryRef dict;
+
+ // Check that our SDK is at least macOS 12 / iOS 15 / tvOS 15
+ #if (TARGET_OS_OSX && defined(__MAC_12_0) && __MAC_OS_X_VERSION_MAX_ALLOWED >= __MAC_12_0) || \
+ (TARGET_OS_IOS && defined(__IPHONE_15_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_15_0) || \
+ (TARGET_OS_TV && defined(__TVOS_15_0) && __TV_OS_VERSION_MAX_ALLOWED >= __TVOS_15_0)
+ // On recent enough versions, just use the respective API
+ if (__builtin_available(macOS 12.0, iOS 15.0, tvOS 15.0, *))
+ return CVBufferCopyAttachments(buffer, attachment_mode);
+ #endif
+
+ // Check that the target is lower than macOS 12 / iOS 15 / tvOS 15
+ // else this would generate a deprecation warning and anyway never run because
+ // the runtime availability check above would be always true.
+ #if (TARGET_OS_OSX && (!defined(__MAC_12_0) || __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_12_0)) || \
+ (TARGET_OS_IOS && (!defined(__IPHONE_15_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_15_0)) || \
+ (TARGET_OS_TV && (!defined(__TVOS_15_0) || __TV_OS_VERSION_MIN_REQUIRED < __TVOS_15_0))
+ // Fallback on SDKs or runtime versions < macOS 12 / iOS 15 / tvOS 15
+ dict = CVBufferGetAttachments(buffer, attachment_mode);
+ return (dict) ? CFDictionaryCreateCopy(NULL, dict) : NULL;
+ #else
+ return NULL; // Impossible, just make the compiler happy
+ #endif
+}
+
static int vt_pixbuf_set_colorspace(void *log_ctx,
CVPixelBufferRef pixbuf, const AVFrame *src)
{
+ CGColorSpaceRef colorspace = NULL;
CFStringRef colormatrix = NULL, colorpri = NULL, colortrc = NULL;
Float32 gamma = 0;
colormatrix = av_map_videotoolbox_color_matrix_from_av(src->colorspace);
- if (!colormatrix && src->colorspace != AVCOL_SPC_UNSPECIFIED)
- av_log(log_ctx, AV_LOG_WARNING, "Color space %s is not supported.\n", av_color_space_name(src->colorspace));
+ if (colormatrix)
+ CVBufferSetAttachment(pixbuf, kCVImageBufferYCbCrMatrixKey,
+ colormatrix, kCVAttachmentMode_ShouldPropagate);
+ else {
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferYCbCrMatrixKey);
+ if (src->colorspace != AVCOL_SPC_UNSPECIFIED)
+ av_log(log_ctx, AV_LOG_WARNING,
+ "Color space %s is not supported.\n",
+ av_color_space_name(src->colorspace));
+ }
colorpri = av_map_videotoolbox_color_primaries_from_av(src->color_primaries);
- if (!colorpri && src->color_primaries != AVCOL_PRI_UNSPECIFIED)
- av_log(log_ctx, AV_LOG_WARNING, "Color primaries %s is not supported.\n", av_color_primaries_name(src->color_primaries));
+ if (colorpri)
+ CVBufferSetAttachment(pixbuf, kCVImageBufferColorPrimariesKey,
+ colorpri, kCVAttachmentMode_ShouldPropagate);
+ else {
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferColorPrimariesKey);
+ if (src->color_primaries != AVCOL_SPC_UNSPECIFIED)
+ av_log(log_ctx, AV_LOG_WARNING,
+ "Color primaries %s is not supported.\n",
+ av_color_primaries_name(src->color_primaries));
+ }
colortrc = av_map_videotoolbox_color_trc_from_av(src->color_trc);
- if (!colortrc && src->color_trc != AVCOL_TRC_UNSPECIFIED)
- av_log(log_ctx, AV_LOG_WARNING, "Color transfer function %s is not supported.\n", av_color_transfer_name(src->color_trc));
+ if (colortrc)
+ CVBufferSetAttachment(pixbuf, kCVImageBufferTransferFunctionKey,
+ colortrc, kCVAttachmentMode_ShouldPropagate);
+ else {
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferTransferFunctionKey);
+ if (src->color_trc != AVCOL_TRC_UNSPECIFIED)
+ av_log(log_ctx, AV_LOG_WARNING,
+ "Color transfer function %s is not supported.\n",
+ av_color_transfer_name(src->color_trc));
+ }
if (src->color_trc == AVCOL_TRC_GAMMA22)
gamma = 2.2;
else if (src->color_trc == AVCOL_TRC_GAMMA28)
gamma = 2.8;
- if (colormatrix) {
- CVBufferSetAttachment(
- pixbuf,
- kCVImageBufferYCbCrMatrixKey,
- colormatrix,
- kCVAttachmentMode_ShouldPropagate);
- }
- if (colorpri) {
- CVBufferSetAttachment(
- pixbuf,
- kCVImageBufferColorPrimariesKey,
- colorpri,
- kCVAttachmentMode_ShouldPropagate);
- }
- if (colortrc) {
- CVBufferSetAttachment(
- pixbuf,
- kCVImageBufferTransferFunctionKey,
- colortrc,
- kCVAttachmentMode_ShouldPropagate);
- }
if (gamma != 0) {
CFNumberRef gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
- CVBufferSetAttachment(
- pixbuf,
- kCVImageBufferGammaLevelKey,
- gamma_level,
- kCVAttachmentMode_ShouldPropagate);
+ CVBufferSetAttachment(pixbuf, kCVImageBufferGammaLevelKey,
+ gamma_level, kCVAttachmentMode_ShouldPropagate);
CFRelease(gamma_level);
+ } else
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferGammaLevelKey);
+
+#if (TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 100800) || \
+ (TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 100000)
+ if (__builtin_available(macOS 10.8, iOS 10, *)) {
+ CFDictionaryRef attachments =
+ vt_cv_buffer_copy_attachments(pixbuf, kCVAttachmentMode_ShouldPropagate);
+
+ if (attachments) {
+ colorspace =
+ CVImageBufferCreateColorSpaceFromAttachments(attachments);
+ CFRelease(attachments);
+ }
}
+#endif
+
+ // Done outside the above preprocessor code and if's so that
+ // in any case a wrong kCVImageBufferCGColorSpaceKey is removed
+ // if the above code is not used or fails.
+ if (colorspace) {
+ CVBufferSetAttachment(pixbuf, kCVImageBufferCGColorSpaceKey,
+ colorspace, kCVAttachmentMode_ShouldPropagate);
+ CFRelease(colorspace);
+ } else
+ CVBufferRemoveAttachment(pixbuf, kCVImageBufferCGColorSpaceKey);
return 0;
}
Index: FFmpeg/libavutil/hwcontext_videotoolbox.h
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_videotoolbox.h
+++ FFmpeg/libavutil/hwcontext_videotoolbox.h
@@ -90,8 +90,15 @@ CFStringRef av_map_videotoolbox_color_pr
CFStringRef av_map_videotoolbox_color_trc_from_av(enum AVColorTransferCharacteristic trc);
/**
- * Update a CVPixelBufferRef's metadata to based on an AVFrame.
- * Returns 0 if no known equivalent was found.
+ * Set CVPixelBufferRef's metadata based on an AVFrame.
+ *
+ * Sets/unsets the CVPixelBuffer attachments to match as closely as possible the
+ * AVFrame metadata. To prevent inconsistent attachments, the attachments for properties
+ * that could not be matched or are unspecified in the given AVFrame are unset. So if
+ * any attachments already covered by AVFrame metadata need to be set to a specific
+ * value, this should happen after calling this function.
+ *
+ * Returns < 0 in case of an error.
*/
int av_vt_pixbuf_set_attachments(void *log_ctx,
CVPixelBufferRef pixbuf, const struct AVFrame *src);

View File

@ -1,143 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/vf_libplacebo.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_libplacebo.c
+++ jellyfin-ffmpeg/libavfilter/vf_libplacebo.c
@@ -62,6 +62,7 @@ static const struct pl_tone_map_function
typedef struct LibplaceboContext {
/* lavfi vulkan*/
FFVulkanContext vkctx;
+ int initialized;
/* libplacebo */
pl_log log;
@@ -258,25 +259,10 @@ static int init_vulkan(AVFilterContext *
{
int err = 0;
LibplaceboContext *s = avctx->priv;
- const AVHWDeviceContext *avhwctx;
- const AVVulkanDeviceContext *hwctx;
+ const AVVulkanDeviceContext *hwctx = s->vkctx.hwctx;
uint8_t *buf = NULL;
size_t buf_len;
- if (!avctx->hw_device_ctx) {
- av_log(s, AV_LOG_ERROR, "Missing vulkan hwdevice for vf_libplacebo.\n");
- return AVERROR(EINVAL);
- }
-
- avhwctx = (AVHWDeviceContext *) avctx->hw_device_ctx->data;
- if (avhwctx->type != AV_HWDEVICE_TYPE_VULKAN) {
- av_log(s, AV_LOG_ERROR, "Expected vulkan hwdevice for vf_libplacebo, got %s.\n",
- av_hwdevice_get_type_name(avhwctx->type));
- return AVERROR(EINVAL);
- }
-
- hwctx = avhwctx->hwctx;
-
/* Import libavfilter vulkan context into libplacebo */
s->vulkan = pl_vulkan_import(s->log, pl_vulkan_import_params(
.instance = hwctx->inst,
@@ -325,6 +311,7 @@ static int init_vulkan(AVFilterContext *
fail:
if (buf)
av_file_unmap(buf, buf_len);
+ s->initialized = 1;
return err;
}
@@ -340,6 +327,7 @@ static void libplacebo_uninit(AVFilterCo
pl_vulkan_destroy(&s->vulkan);
pl_log_destroy(&s->log);
ff_vk_uninit(&s->vkctx);
+ s->initialized = 0;
s->gpu = NULL;
}
@@ -500,6 +488,8 @@ static int filter_frame(AVFilterLink *li
}
pl_log_level_update(s->log, get_log_level());
+ if (!s->initialized)
+ RET(init_vulkan(ctx));
RET(av_frame_copy_props(out, in));
out->width = outlink->w;
@@ -551,69 +541,6 @@ fail:
return err;
}
-static int libplacebo_query_format(AVFilterContext *ctx)
-{
- int err;
- LibplaceboContext *s = ctx->priv;
- const AVPixFmtDescriptor *desc = NULL;
- AVFilterFormats *infmts = NULL, *outfmts = NULL;
-
- RET(init_vulkan(ctx));
-
- while ((desc = av_pix_fmt_desc_next(desc))) {
- enum AVPixelFormat pixfmt = av_pix_fmt_desc_get_id(desc);
-
-#if PL_API_VER < 232
- // Older libplacebo can't handle >64-bit pixel formats, so safe-guard
- // this to prevent triggering an assertion
- if (av_get_bits_per_pixel(desc) > 64)
- continue;
-#endif
-
- if (!pl_test_pixfmt(s->gpu, pixfmt))
- continue;
-
- RET(ff_add_format(&infmts, pixfmt));
-
- /* Filter for supported output pixel formats */
- if (desc->flags & AV_PIX_FMT_FLAG_BE)
- continue; /* BE formats are not supported by pl_download_avframe */
-
- /* Mask based on user specified format */
- if (s->out_format != AV_PIX_FMT_NONE) {
- if (pixfmt == AV_PIX_FMT_VULKAN && av_vkfmt_from_pixfmt(s->out_format)) {
- /* OK */
- } else if (pixfmt == s->out_format) {
- /* OK */
- } else {
- continue; /* Not OK */
- }
- }
-
- RET(ff_add_format(&outfmts, pixfmt));
- }
-
- if (!infmts || !outfmts) {
- if (s->out_format) {
- av_log(s, AV_LOG_ERROR, "Invalid output format '%s'!\n",
- av_get_pix_fmt_name(s->out_format));
- }
- err = AVERROR(EINVAL);
- goto fail;
- }
-
- RET(ff_formats_ref(infmts, &ctx->inputs[0]->outcfg.formats));
- RET(ff_formats_ref(outfmts, &ctx->outputs[0]->incfg.formats));
- return 0;
-
-fail:
- if (infmts && !infmts->refcount)
- ff_formats_unref(&infmts);
- if (outfmts && !outfmts->refcount)
- ff_formats_unref(&outfmts);
- return err;
-}
-
static int libplacebo_config_input(AVFilterLink *inlink)
{
AVFilterContext *avctx = inlink->dst;
@@ -881,7 +808,7 @@ const AVFilter ff_vf_libplacebo = {
.process_command = &ff_filter_process_command,
FILTER_INPUTS(libplacebo_inputs),
FILTER_OUTPUTS(libplacebo_outputs),
- FILTER_QUERY_FUNC(libplacebo_query_format),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VULKAN),
.priv_class = &libplacebo_class,
.flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,142 +1,20 @@
From patchwork Wed Feb 21 01:53:42 2024
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: Gnattu OC <gnattuoc@me.com>
X-Patchwork-Id: 46405
Delivered-To: ffmpegpatchwork2@gmail.com
Received: by 2002:a05:6a20:1b29:b0:19e:cdac:8cce with SMTP id ch41csp44637pzb;
Tue, 20 Feb 2024 17:54:51 -0800 (PST)
X-Forwarded-Encrypted: i=2;
AJvYcCWVmg9uSOgXDSptUDycJzvuzMfBW0nwtmNODnO5Os0h0qImKu6Joi8mZvHLrSdqYuv4qliU+iuam5MemURDp4D/gHKPhZypuqWRnQ==
X-Google-Smtp-Source:
AGHT+IEnKKUHOZtH/lX3bS/zOhFfTlOltIDmAde6mFLv6NfQLCC1aaS/PPVbCiTfCg8miXQS/JkZ
X-Received: by 2002:a17:906:cd1a:b0:a3e:4093:89df with SMTP id
oz26-20020a170906cd1a00b00a3e409389dfmr6408355ejb.74.1708480491363;
Tue, 20 Feb 2024 17:54:51 -0800 (PST)
ARC-Seal: i=1; a=rsa-sha256; t=1708480491; cv=none;
d=google.com; s=arc-20160816;
b=kJWQpkwS2M3eutHiGxO5RWdw0OXgXeYIQBqag0jK22IcBmYGEw25YfNIZgNKz2xAIK
ZQV6aN+cDYX52RJt+GT5SFHUjK32leIwWlfh0SsOQxiv6yWD3WDCFZnFnmRA1prFU+5R
Sf0IL8IVE275Jh7r3mm0t/ov4KH6Am6AB4NjZk9dTWqN4kB/5K7n/U/qITukrTOj2Ed2
fFM47Gld65XO18c9cp9v3N2OtepCv2+/395Lm3/YBo415ZndWGOrq+YE0E5p1T8aojli
E76HCYiDS5cbpF5tFxx6wG37/CbmndhdJmUsREMX6OcdxJhzqoE4wtg3VtHdjmnJZFl7
y/AA==
ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com;
s=arc-20160816;
h=sender:errors-to:content-transfer-encoding:cc:reply-to:from
:list-subscribe:list-help:list-post:list-archive:list-unsubscribe
:list-id:precedence:subject:mime-version:message-id:date:to
:delivered-to;
bh=omrfyeNpcLW9uawHsYmpqZqpdEwsP7g9Flsbi9eSxXA=;
fh=2gWgGvVN03792RqC4MXCXsB/4cbAJUKJ5Jr7thIsQ3A=;
b=qs0c2W8oNWcsdzgZKs/yyA96g899aSzc+0cPDDl56XEy6wnk9nOSyCc/yIQxqeDSV9
f3ilmLM4/jxwh3RL8n0agvvsAYuWGAZ8hDhlwfVXiYoy0OKI5eQrKr5i4IaolOiiYpKc
i4pLbn2oM5Nm8F+pS1TRUFm01V2X1hDnumYVNc7oJGUM5X4F0jzIGAiN6KJaL4WGIVBr
85KWfcjcx7VkeCUU/+ogzZoIR8GFNoAgFvbwRYITrEZhDoUIEN6tTHE/JyhPAF0KbFPM
hZmlcLFo9WxlIXDKq13Lzdf7XY1c29fUkXtsLq5kvVpV6jgoMHJWU9sx7j3/y3ASc3Mm
hI7A==;
dara=google.com
ARC-Authentication-Results: i=1; mx.google.com;
spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org
designates 79.124.17.100 as permitted sender)
smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org
Return-Path: <ffmpeg-devel-bounces@ffmpeg.org>
Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org. [79.124.17.100])
by mx.google.com with ESMTP id
nb18-20020a1709071c9200b00a3ef1214742si1392017ejc.85.2024.02.20.17.54.50;
Tue, 20 Feb 2024 17:54:51 -0800 (PST)
Received-SPF: pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org
designates 79.124.17.100 as permitted sender) client-ip=79.124.17.100;
Authentication-Results: mx.google.com;
spf=pass (google.com: domain of ffmpeg-devel-bounces@ffmpeg.org
designates 79.124.17.100 as permitted sender)
smtp.mailfrom=ffmpeg-devel-bounces@ffmpeg.org
Received: from [127.0.1.1] (localhost [127.0.0.1])
by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 3F09568D085;
Wed, 21 Feb 2024 03:54:47 +0200 (EET)
X-Original-To: ffmpeg-devel@ffmpeg.org
Delivered-To: ffmpeg-devel@ffmpeg.org
Received: from mr85p00im-zteg06021501.me.com (mr85p00im-zteg06021501.me.com
[17.58.23.183])
by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id B898368C74E
for <ffmpeg-devel@ffmpeg.org>; Wed, 21 Feb 2024 03:54:40 +0200 (EET)
Received: from Yakumo-Yukari.lan.lan (mr38p00im-dlb-asmtp-mailmevip.me.com
[17.57.152.18])
by mr85p00im-zteg06021501.me.com (Postfix) with ESMTPSA id B44942794090;
Wed, 21 Feb 2024 01:54:37 +0000 (UTC)
To: ffmpeg-devel@ffmpeg.org
Date: Wed, 21 Feb 2024 09:53:42 +0800
Message-Id: <20240221015342.5450-1-gnattuoc@me.com>
X-Mailer: git-send-email 2.39.3 (Apple Git-145)
MIME-Version: 1.0
X-Proofpoint-ORIG-GUID: 0zEgxigYbdxf8WO7rCxYHRqRobHUPHuf
X-Proofpoint-GUID: 0zEgxigYbdxf8WO7rCxYHRqRobHUPHuf
X-Proofpoint-Virus-Version: vendor=baseguard
engine=ICAP:2.0.272,Aquarius:18.0.1011,Hydra:6.0.619,FMLib:17.11.176.26
definitions=2024-02-20_06,2024-02-20_01,2023-05-22_02
X-Proofpoint-Spam-Details: rule=notspam policy=default score=0 adultscore=0
bulkscore=0 spamscore=0
phishscore=0 mlxlogscore=999 suspectscore=0 mlxscore=0 clxscore=1015
malwarescore=0 classifier=spam adjust=0 reason=mlx scancount=1
engine=8.19.0-2308100000 definitions=main-2402210013
Subject: [FFmpeg-devel] [PATCH v3] avfilter: add vf_overlay_videotoolbox
X-BeenThere: ffmpeg-devel@ffmpeg.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: FFmpeg development discussions and patches <ffmpeg-devel.ffmpeg.org>
List-Unsubscribe: <https://ffmpeg.org/mailman/options/ffmpeg-devel>,
<mailto:ffmpeg-devel-request@ffmpeg.org?subject=unsubscribe>
List-Archive: <https://ffmpeg.org/pipermail/ffmpeg-devel>
List-Post: <mailto:ffmpeg-devel@ffmpeg.org>
List-Help: <mailto:ffmpeg-devel-request@ffmpeg.org?subject=help>
List-Subscribe: <https://ffmpeg.org/mailman/listinfo/ffmpeg-devel>,
<mailto:ffmpeg-devel-request@ffmpeg.org?subject=subscribe>
X-Patchwork-Original-From: gnattu via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
From: Gnattu OC <gnattuoc@me.com>
Reply-To: FFmpeg development discussions and patches <ffmpeg-devel@ffmpeg.org>
Cc: gnattu <gnattuoc@me.com>
Errors-To: ffmpeg-devel-bounces@ffmpeg.org
Sender: "ffmpeg-devel" <ffmpeg-devel-bounces@ffmpeg.org>
X-TUID: 0+JgYL+d/2FH
Overlay filter for VideoToolbox hwframes. Unlike most hardware
overlay filters, this filter does not require the two inputs to
have the same pixel format; instead, it will perform format
conversion automatically with hardware accelerated methods.
Signed-off-by: Gnattu OC <gnattuoc@me.com>
---
Changelog | 1 +
configure | 1 +
doc/filters.texi | 52 ++
libavfilter/Makefile | 3 +
libavfilter/allfilters.c | 1 +
libavfilter/metal/utils.h | 1 -
libavfilter/metal/utils.m | 7 +-
.../metal/vf_overlay_videotoolbox.metal | 58 ++
libavfilter/vf_overlay_videotoolbox.m | 551 ++++++++++++++++++
9 files changed, 672 insertions(+), 3 deletions(-)
create mode 100644 libavfilter/metal/vf_overlay_videotoolbox.metal
create mode 100644 libavfilter/vf_overlay_videotoolbox.m
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3722,6 +3722,7 @@ overlay_qsv_filter_select="qsvvpp"
@@ -3883,6 +3883,7 @@ overlay_opencl_filter_deps="opencl"
overlay_qsv_filter_deps="libmfx"
overlay_qsv_filter_select="qsvvpp"
overlay_vaapi_filter_deps="vaapi VAProcPipelineCaps_blend_flags"
+overlay_videotoolbox_filter_deps="metal corevideo coreimage videotoolbox"
overlay_vulkan_filter_deps="vulkan spirv_compiler"
overlay_rkrga_filter_deps="rkrga"
+overlay_videotoolbox_filter_deps="metal corevideo coreimage videotoolbox"
owdenoise_filter_deps="gpl"
pad_opencl_filter_deps="opencl"
pan_filter_deps="swresample"
Index: FFmpeg/doc/filters.texi
===================================================================
--- FFmpeg.orig/doc/filters.texi
+++ FFmpeg/doc/filters.texi
@@ -18351,6 +18351,58 @@ See @ref{framesync}.
@@ -19047,6 +19047,58 @@ See @ref{framesync}.
This filter also supports the @ref{framesync} options.
@ -199,38 +77,28 @@ Index: FFmpeg/libavfilter/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -403,6 +403,9 @@ OBJS-$(CONFIG_OVERLAY_OPENCL_FILTER)
@@ -414,6 +414,9 @@ OBJS-$(CONFIG_OVERLAY_OPENCL_FILTER)
opencl/overlay.o framesync.o
OBJS-$(CONFIG_OVERLAY_QSV_FILTER) += vf_overlay_qsv.o framesync.o
OBJS-$(CONFIG_OVERLAY_VAAPI_FILTER) += vf_overlay_vaapi.o framesync.o vaapi_vpp.o
OBJS-$(CONFIG_OVERLAY_VULKAN_FILTER) += vf_overlay_vulkan.o vulkan.o vulkan_filter.o
+OBJS-$(CONFIG_OVERLAY_VIDEOTOOLBOX_FILTER) += vf_overlay_videotoolbox.o framesync.o \
+OBJS-$(CONFIG_OVERLAY_VIDEOTOOLBOX_FILTER) += vf_overlay_videotoolbox.o framesync.o \
+ metal/vf_overlay_videotoolbox.metallib.o \
+ metal/utils.o
OBJS-$(CONFIG_OVERLAY_VULKAN_FILTER) += vf_overlay_vulkan.o vulkan.o vulkan_filter.o
OBJS-$(CONFIG_OVERLAY_RKRGA_FILTER) += vf_overlay_rkrga.o framesync.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -380,6 +380,7 @@ extern const AVFilter ff_vf_overlay_vaap
@@ -388,6 +388,7 @@ extern const AVFilter ff_vf_overlay;
extern const AVFilter ff_vf_overlay_opencl;
extern const AVFilter ff_vf_overlay_qsv;
extern const AVFilter ff_vf_overlay_vaapi;
+extern const AVFilter ff_vf_overlay_videotoolbox;
extern const AVFilter ff_vf_overlay_vulkan;
extern const AVFilter ff_vf_overlay_cuda;
extern const AVFilter ff_vf_overlay_rkrga;
+extern const AVFilter ff_vf_overlay_videotoolbox;
extern const AVFilter ff_vf_owdenoise;
extern const AVFilter ff_vf_pad;
extern const AVFilter ff_vf_pad_opencl;
Index: FFmpeg/libavfilter/metal/utils.h
===================================================================
--- FFmpeg.orig/libavfilter/metal/utils.h
+++ FFmpeg/libavfilter/metal/utils.h
@@ -55,5 +55,4 @@ CVMetalTextureRef ff_metal_texture_from_
int plane,
MTLPixelFormat format)
API_AVAILABLE(macos(10.11), ios(8.0));
-
#endif /* AVFILTER_METAL_UTILS_H */
Index: FFmpeg/libavfilter/metal/utils.m
===================================================================
--- FFmpeg.orig/libavfilter/metal/utils.m

View File

@ -0,0 +1,573 @@
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3934,7 +3934,7 @@ tonemap_vaapi_filter_deps="vaapi VAProcF
tonemap_opencl_filter_deps="opencl const_nan"
transpose_opencl_filter_deps="opencl"
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
-transpose_vt_filter_deps="videotoolbox VTPixelRotationSessionCreate"
+transpose_vt_filter_deps="coreimage videotoolbox"
transpose_vulkan_filter_deps="vulkan spirv_compiler"
unsharp_opencl_filter_deps="opencl"
uspp_filter_deps="gpl avcodec"
Index: FFmpeg/libavfilter/vf_transpose_vt.c
===================================================================
--- FFmpeg.orig/libavfilter/vf_transpose_vt.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Copyright (c) 2023 Zhao Zhili <zhilizhao@tencent.com>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <VideoToolbox/VideoToolbox.h>
-
-#include "libavutil/hwcontext.h"
-#include "libavutil/hwcontext_videotoolbox.h"
-#include "libavutil/opt.h"
-#include "libavutil/pixdesc.h"
-#include "internal.h"
-#include "transpose.h"
-#include "video.h"
-
-typedef struct TransposeVtContext {
- AVClass *class;
-
- VTPixelRotationSessionRef session;
- int dir;
- int passthrough;
-} TransposeVtContext;
-
-static av_cold int transpose_vt_init(AVFilterContext *avctx)
-{
- TransposeVtContext *s = avctx->priv;
- int ret;
-
- ret = VTPixelRotationSessionCreate(kCFAllocatorDefault, &s->session);
- if (ret != noErr) {
- av_log(avctx, AV_LOG_ERROR, "Rotation session create failed, %d\n", ret);
- return AVERROR_EXTERNAL;
- }
-
- return 0;
-}
-
-static av_cold void transpose_vt_uninit(AVFilterContext *avctx)
-{
- TransposeVtContext *s = avctx->priv;
-
- if (s->session) {
- VTPixelRotationSessionInvalidate(s->session);
- CFRelease(s->session);
- s->session = NULL;
- }
-}
-
-static int transpose_vt_filter_frame(AVFilterLink *link, AVFrame *in)
-{
- int ret;
- AVFilterContext *ctx = link->dst;
- TransposeVtContext *s = ctx->priv;
- AVFilterLink *outlink = ctx->outputs[0];
- CVPixelBufferRef src;
- CVPixelBufferRef dst;
- AVFrame *out;
-
- if (s->passthrough)
- return ff_filter_frame(outlink, in);
-
- out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
- if (!out) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- ret = av_frame_copy_props(out, in);
- if (ret < 0)
- goto fail;
-
- src = (CVPixelBufferRef)in->data[3];
- dst = (CVPixelBufferRef)out->data[3];
- ret = VTPixelRotationSessionRotateImage(s->session, src, dst);
- if (ret != noErr) {
- av_log(ctx, AV_LOG_ERROR, "transfer image failed, %d\n", ret);
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
- av_frame_free(&in);
-
- return ff_filter_frame(outlink, out);
-
-fail:
- av_frame_free(&in);
- av_frame_free(&out);
- return ret;
-}
-
-static int transpose_vt_recreate_hw_ctx(AVFilterLink *outlink)
-{
- AVFilterContext *avctx = outlink->src;
- AVFilterLink *inlink = outlink->src->inputs[0];
- AVHWFramesContext *hw_frame_ctx_in;
- AVHWFramesContext *hw_frame_ctx_out;
- int err;
-
- av_buffer_unref(&outlink->hw_frames_ctx);
-
- hw_frame_ctx_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
- outlink->hw_frames_ctx = av_hwframe_ctx_alloc(hw_frame_ctx_in->device_ref);
- hw_frame_ctx_out = (AVHWFramesContext *)outlink->hw_frames_ctx->data;
- hw_frame_ctx_out->format = AV_PIX_FMT_VIDEOTOOLBOX;
- hw_frame_ctx_out->sw_format = hw_frame_ctx_in->sw_format;
- hw_frame_ctx_out->width = outlink->w;
- hw_frame_ctx_out->height = outlink->h;
-
- err = ff_filter_init_hw_frames(avctx, outlink, 1);
- if (err < 0)
- return err;
-
- err = av_hwframe_ctx_init(outlink->hw_frames_ctx);
- if (err < 0) {
- av_log(avctx, AV_LOG_ERROR,
- "Failed to init videotoolbox frame context, %s\n",
- av_err2str(err));
- return err;
- }
-
- return 0;
-}
-
-static int transpose_vt_config_output(AVFilterLink *outlink)
-{
- int err;
- AVFilterContext *avctx = outlink->src;
- TransposeVtContext *s = avctx->priv;
- AVFilterLink *inlink = outlink->src->inputs[0];
- CFStringRef rotation = kVTRotation_0;
- CFBooleanRef vflip = kCFBooleanFalse;
- CFBooleanRef hflip = kCFBooleanFalse;
- int swap_w_h = 0;
-
- av_buffer_unref(&outlink->hw_frames_ctx);
- outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
-
- if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
- (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
- av_log(avctx, AV_LOG_VERBOSE,
- "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
- inlink->w, inlink->h, inlink->w, inlink->h);
- return 0;
- }
-
- s->passthrough = TRANSPOSE_PT_TYPE_NONE;
-
- switch (s->dir) {
- case TRANSPOSE_CCLOCK_FLIP:
- rotation = kVTRotation_CCW90;
- vflip = kCFBooleanTrue;
- swap_w_h = 1;
- break;
- case TRANSPOSE_CCLOCK:
- rotation = kVTRotation_CCW90;
- swap_w_h = 1;
- break;
- case TRANSPOSE_CLOCK:
- rotation = kVTRotation_CW90;
- swap_w_h = 1;
- break;
- case TRANSPOSE_CLOCK_FLIP:
- rotation = kVTRotation_CW90;
- vflip = kCFBooleanTrue;
- swap_w_h = 1;
- break;
- case TRANSPOSE_REVERSAL:
- rotation = kVTRotation_180;
- break;
- case TRANSPOSE_HFLIP:
- hflip = kCFBooleanTrue;
- break;
- case TRANSPOSE_VFLIP:
- vflip = kCFBooleanTrue;
- break;
- default:
- av_log(avctx, AV_LOG_ERROR, "Failed to set direction to %d\n", s->dir);
- return AVERROR(EINVAL);
- }
-
- err = VTSessionSetProperty(s->session, kVTPixelRotationPropertyKey_Rotation,
- rotation);
- if (err != noErr) {
- av_log(avctx, AV_LOG_ERROR, "Set rotation property failed, %d\n", err);
- return AVERROR_EXTERNAL;
- }
- err = VTSessionSetProperty(s->session, kVTPixelRotationPropertyKey_FlipVerticalOrientation,
- vflip);
- if (err != noErr) {
- av_log(avctx, AV_LOG_ERROR, "Set vertical flip property failed, %d\n", err);
- return AVERROR_EXTERNAL;
- }
- err = VTSessionSetProperty(s->session, kVTPixelRotationPropertyKey_FlipHorizontalOrientation,
- hflip);
- if (err != noErr) {
- av_log(avctx, AV_LOG_ERROR, "Set horizontal flip property failed, %d\n", err);
- return AVERROR_EXTERNAL;
- }
-
- if (!swap_w_h)
- return 0;
-
- outlink->w = inlink->h;
- outlink->h = inlink->w;
- return transpose_vt_recreate_hw_ctx(outlink);
-}
-
-#define OFFSET(x) offsetof(TransposeVtContext, x)
-#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
-static const AVOption transpose_vt_options[] = {
- { "dir", "set transpose direction",
- OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, .unit = "dir" },
- { "cclock_flip", "rotate counter-clockwise with vertical flip",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
- { "clock", "rotate clockwise",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
- { "cclock", "rotate counter-clockwise",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
- { "clock_flip", "rotate clockwise with vertical flip",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
- { "reversal", "rotate by half-turn",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, .flags=FLAGS, .unit = "dir" },
- { "hflip", "flip horizontally",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, .flags=FLAGS, .unit = "dir" },
- { "vflip", "flip vertically",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "dir" },
-
- { "passthrough", "do not apply transposition if the input matches the specified geometry",
- OFFSET(passthrough), AV_OPT_TYPE_INT, { .i64=TRANSPOSE_PT_TYPE_NONE }, 0, INT_MAX, FLAGS, .unit = "passthrough" },
- { "none", "always apply transposition",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_NONE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
- { "portrait", "preserve portrait geometry",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_PORTRAIT }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
- { "landscape", "preserve landscape geometry",
- 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_LANDSCAPE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
-
- { NULL }
-};
-
-AVFILTER_DEFINE_CLASS(transpose_vt);
-
-static const AVFilterPad transpose_vt_inputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .filter_frame = &transpose_vt_filter_frame,
- },
-};
-
-static const AVFilterPad transpose_vt_outputs[] = {
- {
- .name = "default",
- .type = AVMEDIA_TYPE_VIDEO,
- .config_props = &transpose_vt_config_output,
- },
-};
-
-const AVFilter ff_vf_transpose_vt = {
- .name = "transpose_vt",
- .description = NULL_IF_CONFIG_SMALL("Transpose Videotoolbox frames"),
- .priv_size = sizeof(TransposeVtContext),
- .init = transpose_vt_init,
- .uninit = transpose_vt_uninit,
- FILTER_INPUTS(transpose_vt_inputs),
- FILTER_OUTPUTS(transpose_vt_outputs),
- FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VIDEOTOOLBOX),
- .priv_class = &transpose_vt_class,
- .flags = AVFILTER_FLAG_HWDEVICE,
- .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
-};
Index: FFmpeg/libavfilter/vf_transpose_vt.m
===================================================================
--- /dev/null
+++ FFmpeg/libavfilter/vf_transpose_vt.m
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2024 Gnattu OC <gnattuoc@me.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <CoreImage/CoreImage.h>
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+#include "transpose.h"
+#include "video.h"
+
+typedef struct TransposeVtContext {
+ AVClass *class;
+ CIContext *ci_ctx;
+ CGImagePropertyOrientation orientation;
+
+ int dir;
+ int passthrough;
+} TransposeVtContext;
+
+static av_cold int transpose_vt_init(AVFilterContext *avctx)
+{
+ TransposeVtContext *s = avctx->priv;
+ s->ci_ctx = CFBridgingRetain([CIContext context]);
+ if (!s->ci_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "CoreImage Context create failed\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ return 0;
+}
+
+static av_cold void transpose_vt_uninit(AVFilterContext *avctx)
+{
+ TransposeVtContext *s = avctx->priv;
+ if (s->ci_ctx) {
+ CFRelease(s->ci_ctx);
+ s->ci_ctx = NULL;
+ }
+}
+
+static int transpose_vt_filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ int ret;
+ AVFilterContext *ctx = link->dst;
+ TransposeVtContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ CVPixelBufferRef src;
+ CVPixelBufferRef dst;
+ AVFrame *out;
+ CIImage *source_image = NULL;
+ CIImage *transposed_image = NULL;
+
+ if (s->passthrough)
+ return ff_filter_frame(outlink, in);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = av_frame_copy_props(out, in);
+ if (ret < 0)
+ goto fail;
+
+ src = (CVPixelBufferRef)in->data[3];
+ dst = (CVPixelBufferRef)out->data[3];
+
+ source_image = CFBridgingRetain([CIImage imageWithCVPixelBuffer: src]);
+ transposed_image = CFBridgingRetain([source_image imageByApplyingCGOrientation: s->orientation]);
+ if (!transposed_image) {
+ CFRelease(source_image);
+ av_log(ctx, AV_LOG_ERROR, "transpose image failed, %d\n", ret);
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ [(__bridge CIContext*)s->ci_ctx render: (__bridge CIImage*)transposed_image toCVPixelBuffer: dst];
+ CFRelease(source_image);
+ CFRelease(transposed_image);
+ CVBufferPropagateAttachments(src, dst);
+
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+
+ fail:
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+}
+
+static int transpose_vt_recreate_hw_ctx(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ AVHWFramesContext *hw_frame_ctx_in;
+ AVHWFramesContext *hw_frame_ctx_out;
+ int err;
+
+ av_buffer_unref(&outlink->hw_frames_ctx);
+
+ hw_frame_ctx_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+ outlink->hw_frames_ctx = av_hwframe_ctx_alloc(hw_frame_ctx_in->device_ref);
+ hw_frame_ctx_out = (AVHWFramesContext *)outlink->hw_frames_ctx->data;
+ hw_frame_ctx_out->format = AV_PIX_FMT_VIDEOTOOLBOX;
+ hw_frame_ctx_out->sw_format = hw_frame_ctx_in->sw_format;
+ hw_frame_ctx_out->width = outlink->w;
+ hw_frame_ctx_out->height = outlink->h;
+
+ err = ff_filter_init_hw_frames(avctx, outlink, 1);
+ if (err < 0)
+ return err;
+
+ err = av_hwframe_ctx_init(outlink->hw_frames_ctx);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to init videotoolbox frame context, %s\n",
+ av_err2str(err));
+ return err;
+ }
+
+ return 0;
+}
+
+static int transpose_vt_config_output(AVFilterLink *outlink)
+{
+ int err;
+ AVFilterContext *avctx = outlink->src;
+ TransposeVtContext *s = avctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int swap_w_h = 0;
+
+ av_buffer_unref(&outlink->hw_frames_ctx);
+ outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
+
+ if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
+ (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
+ inlink->w, inlink->h, inlink->w, inlink->h);
+ s->orientation = kCGImagePropertyOrientationUp;
+ return 0;
+ }
+
+ s->passthrough = TRANSPOSE_PT_TYPE_NONE;
+
+ switch (s->dir) {
+ case TRANSPOSE_CCLOCK_FLIP:
+ s->orientation = kCGImagePropertyOrientationLeftMirrored;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CCLOCK:
+ s->orientation = kCGImagePropertyOrientationLeft;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CLOCK:
+ s->orientation = kCGImagePropertyOrientationRight;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CLOCK_FLIP:
+ s->orientation = kCGImagePropertyOrientationRightMirrored;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_REVERSAL:
+ s->orientation = kCGImagePropertyOrientationDown;
+ break;
+ case TRANSPOSE_HFLIP:
+ s->orientation = kCGImagePropertyOrientationUpMirrored;
+ break;
+ case TRANSPOSE_VFLIP:
+ s->orientation = kCGImagePropertyOrientationDownMirrored;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Failed to set direction to %d\n", s->dir);
+ return AVERROR(EINVAL);
+ }
+
+ if (!swap_w_h)
+ return 0;
+
+ outlink->w = inlink->h;
+ outlink->h = inlink->w;
+ return transpose_vt_recreate_hw_ctx(outlink);
+}
+
+#define OFFSET(x) offsetof(TransposeVtContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption transpose_vt_options[] = {
+ { "dir", "set transpose direction",
+ OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, .unit = "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
+ { "clock", "rotate clockwise",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
+ { "reversal", "rotate by half-turn",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, .flags=FLAGS, .unit = "dir" },
+ { "hflip", "flip horizontally",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, .flags=FLAGS, .unit = "dir" },
+ { "vflip", "flip vertically",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "dir" },
+
+ { "passthrough", "do not apply transposition if the input matches the specified geometry",
+ OFFSET(passthrough), AV_OPT_TYPE_INT, { .i64=TRANSPOSE_PT_TYPE_NONE }, 0, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "none", "always apply transposition",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_NONE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "portrait", "preserve portrait geometry",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_PORTRAIT }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "landscape", "preserve landscape geometry",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_LANDSCAPE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(transpose_vt);
+
+static const AVFilterPad transpose_vt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = &transpose_vt_filter_frame,
+ },
+};
+
+static const AVFilterPad transpose_vt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = &transpose_vt_config_output,
+ },
+};
+
+const AVFilter ff_vf_transpose_vt = {
+ .name = "transpose_vt",
+ .description = NULL_IF_CONFIG_SMALL("Transpose Videotoolbox frames"),
+ .priv_size = sizeof(TransposeVtContext),
+ .init = transpose_vt_init,
+ .uninit = transpose_vt_uninit,
+ FILTER_INPUTS(transpose_vt_inputs),
+ FILTER_OUTPUTS(transpose_vt_outputs),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VIDEOTOOLBOX),
+ .priv_class = &transpose_vt_class,
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};

View File

@ -1,272 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsv.c
+++ jellyfin-ffmpeg/libavcodec/qsv.c
@@ -685,18 +685,31 @@ static int qsv_create_mfx_session(AVCode
int ff_qsv_init_internal_session(AVCodecContext *avctx, QSVSession *qs,
const char *load_plugins, int gpu_copy)
{
+ mfxIMPL impls[] = {
#if CONFIG_D3D11VA
- mfxIMPL impl = MFX_IMPL_AUTO_ANY | MFX_IMPL_VIA_D3D11;
-#else
- mfxIMPL impl = MFX_IMPL_AUTO_ANY;
+ MFX_IMPL_AUTO_ANY | MFX_IMPL_VIA_D3D11,
#endif
+ MFX_IMPL_AUTO_ANY
+ };
+ mfxIMPL impl;
mfxVersion ver = { { QSV_VERSION_MINOR, QSV_VERSION_MAJOR } };
const char *desc;
- int ret = qsv_create_mfx_session(avctx, impl, &ver, gpu_copy, &qs->session,
+ int ret;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(impls); i++) {
+ ret = qsv_create_mfx_session(avctx, impls[i], &ver, gpu_copy, &qs->session,
&qs->loader);
- if (ret)
- return ret;
+
+ if (ret == 0)
+ break;
+
+ if (i == FF_ARRAY_ELEMS(impls) - 1)
+ return ret;
+ else
+ av_log(avctx, AV_LOG_ERROR, "The current mfx implementation is not "
+ "supported, try next mfx implementation.\n");
+ }
#ifdef AVCODEC_QSV_LINUX_SESSION_HANDLE
ret = ff_qsv_set_display_handle(avctx, qs);
Index: jellyfin-ffmpeg/libavcodec/qsvenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/qsvenc.c
+++ jellyfin-ffmpeg/libavcodec/qsvenc.c
@@ -838,7 +838,9 @@ static int init_video_param(AVCodecConte
// for progressive video, the height should be aligned to 16 for
// H.264. For HEVC, depending on the version of MFX, it should be
// either 32 or 16. The lower number is better if possible.
- q->height_align = avctx->codec_id == AV_CODEC_ID_HEVC ? 32 : 16;
+ // For AV1, it is 32
+ q->height_align = (avctx->codec_id == AV_CODEC_ID_HEVC ||
+ avctx->codec_id == AV_CODEC_ID_AV1) ? 32 : 16;
}
q->param.mfx.FrameInfo.Height = FFALIGN(avctx->height, q->height_align);
@@ -1913,6 +1915,62 @@ static int qsvenc_fill_padding_area(AVFr
return 0;
}
+/* frame width / height have been aligned with the alignment */
+static int qsvenc_get_continuous_buffer(AVFrame *frame)
+{
+ int total_size;
+
+ switch (frame->format) {
+ case AV_PIX_FMT_NV12:
+ frame->linesize[0] = frame->width;
+ frame->linesize[1] = frame->linesize[0];
+ total_size = frame->linesize[0] * frame->height + frame->linesize[1] * frame->height / 2;
+ break;
+
+ case AV_PIX_FMT_P010:
+ case AV_PIX_FMT_P012:
+ frame->linesize[0] = 2 * frame->width;
+ frame->linesize[1] = frame->linesize[0];
+ total_size = frame->linesize[0] * frame->height + frame->linesize[1] * frame->height / 2;
+ break;
+
+ case AV_PIX_FMT_YUYV422:
+ frame->linesize[0] = 2 * frame->width;
+ frame->linesize[1] = 0;
+ total_size = frame->linesize[0] * frame->height;
+ break;
+
+ case AV_PIX_FMT_Y210:
+ case AV_PIX_FMT_VUYX:
+ case AV_PIX_FMT_XV30:
+ case AV_PIX_FMT_BGRA:
+ case AV_PIX_FMT_X2RGB10:
+ frame->linesize[0] = 4 * frame->width;
+ frame->linesize[1] = 0;
+ total_size = frame->linesize[0] * frame->height;
+ break;
+
+ default:
+ // This should never be reached
+ av_assert0(0);
+ return AVERROR(EINVAL);
+ }
+
+ frame->buf[0] = av_buffer_alloc(total_size);
+ if (!frame->buf[0])
+ return AVERROR(ENOMEM);
+
+ frame->data[0] = frame->buf[0]->data;
+ frame->extended_data = frame->data;
+
+ if (frame->format == AV_PIX_FMT_NV12 ||
+ frame->format == AV_PIX_FMT_P010 ||
+ frame->format == AV_PIX_FMT_P012)
+ frame->data[1] = frame->data[0] + frame->linesize[0] * frame->height;
+
+ return 0;
+}
+
static int submit_frame(QSVEncContext *q, const AVFrame *frame,
QSVFrame **new_frame)
{
@@ -1963,8 +2021,9 @@ static int submit_frame(QSVEncContext *q
} else {
/* make a copy if the input is not padded as libmfx requires */
/* and to make allocation continious for data[0]/data[1] */
- if ((frame->height & 31 || frame->linesize[0] & (q->width_align - 1)) ||
- (frame->data[1] - frame->data[0] != frame->linesize[0] * FFALIGN(qf->frame->height, q->height_align))) {
+ if ((frame->height & (q->height_align - 1) || frame->linesize[0] & (q->width_align - 1)) ||
+ ((frame->format == AV_PIX_FMT_NV12 || frame->format == AV_PIX_FMT_P010 || frame->format == AV_PIX_FMT_P012) &&
+ (frame->data[1] - frame->data[0] != frame->linesize[0] * FFALIGN(qf->frame->height, q->height_align)))) {
int tmp_w, tmp_h;
qf->frame->height = tmp_h = FFALIGN(frame->height, q->height_align);
qf->frame->width = tmp_w = FFALIGN(frame->width, q->width_align);
@@ -1972,7 +2031,7 @@ static int submit_frame(QSVEncContext *q
qf->frame->format = frame->format;
if (!qf->frame->data[0]) {
- ret = av_frame_get_buffer(qf->frame, q->width_align);
+ ret = qsvenc_get_continuous_buffer(qf->frame);
if (ret < 0)
return ret;
}
@@ -2579,7 +2638,7 @@ int ff_qsv_encode(AVCodecContext *avctx,
pict_type = AV_PICTURE_TYPE_P;
else if (qpkt.bs->FrameType & MFX_FRAMETYPE_B || qpkt.bs->FrameType & MFX_FRAMETYPE_xB)
pict_type = AV_PICTURE_TYPE_B;
- else if (qpkt.bs->FrameType == MFX_FRAMETYPE_UNKNOWN) {
+ else if (qpkt.bs->FrameType == MFX_FRAMETYPE_UNKNOWN && qpkt.bs->DataLength) {
pict_type = AV_PICTURE_TYPE_NONE;
av_log(avctx, AV_LOG_WARNING, "Unknown FrameType, set pict_type to AV_PICTURE_TYPE_NONE.\n");
} else {
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -663,6 +663,7 @@ static mfxStatus frame_get_hdl(mfxHDL pt
static int qsv_d3d11_update_config(void *ctx, mfxHDL handle, mfxConfig cfg)
{
+ int ret = AVERROR_UNKNOWN;
#if CONFIG_D3D11VA
mfxStatus sts;
IDXGIAdapter *pDXGIAdapter;
@@ -677,7 +678,8 @@ static int qsv_d3d11_update_config(void
hr = IDXGIDevice_GetAdapter(pDXGIDevice, &pDXGIAdapter);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error IDXGIDevice_GetAdapter %d\n", hr);
- goto fail;
+ IDXGIDevice_Release(pDXGIDevice);
+ return ret;
}
hr = IDXGIAdapter_GetDesc(pDXGIAdapter, &adapterDesc);
@@ -687,7 +689,7 @@ static int qsv_d3d11_update_config(void
}
} else {
av_log(ctx, AV_LOG_ERROR, "Error ID3D11Device_QueryInterface %d\n", hr);
- goto fail;
+ return ret;
}
impl_value.Type = MFX_VARIANT_TYPE_U16;
@@ -720,11 +722,13 @@ static int qsv_d3d11_update_config(void
goto fail;
}
- return 0;
+ ret = 0;
fail:
+ IDXGIAdapter_Release(pDXGIAdapter);
+ IDXGIDevice_Release(pDXGIDevice);
#endif
- return AVERROR_UNKNOWN;
+ return ret;
}
static int qsv_d3d9_update_config(void *ctx, mfxHDL handle, mfxConfig cfg)
@@ -750,25 +754,28 @@ static int qsv_d3d9_update_config(void *
hr = IDirect3DDeviceManager9_LockDevice(devmgr, device_handle, &device, TRUE);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error LockDevice %d\n", hr);
+ IDirect3DDeviceManager9_CloseDeviceHandle(devmgr, device_handle);
goto fail;
}
hr = IDirect3DDevice9Ex_GetCreationParameters(device, &params);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9_GetCreationParameters %d\n", hr);
+ IDirect3DDevice9Ex_Release(device);
goto unlock;
}
hr = IDirect3DDevice9Ex_GetDirect3D(device, &d3d9ex);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetAdapterLUID %d\n", hr);
+ IDirect3DDevice9Ex_Release(device);
goto unlock;
}
hr = IDirect3D9Ex_GetAdapterLUID(d3d9ex, params.AdapterOrdinal, &luid);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetAdapterLUID %d\n", hr);
- goto unlock;
+ goto release;
}
impl_value.Type = MFX_VARIANT_TYPE_PTR;
@@ -778,13 +785,18 @@ static int qsv_d3d9_update_config(void *
if (sts != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error adding a MFX configuration"
"DeviceLUID property: %d.\n", sts);
- goto unlock;
+ goto release;
}
ret = 0;
+release:
+ IDirect3D9Ex_Release(d3d9ex);
+ IDirect3DDevice9Ex_Release(device);
+
unlock:
IDirect3DDeviceManager9_UnlockDevice(devmgr, device_handle, FALSE);
+ IDirect3DDeviceManager9_CloseDeviceHandle(devmgr, device_handle);
fail:
#endif
return ret;
@@ -1927,7 +1939,7 @@ static int qsv_map_to(AVHWFramesContext
case AV_PIX_FMT_VAAPI:
{
mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
- if (*(VASurfaceID*)pair->first == (VASurfaceID)src->data[3]) {
+ if (*(VASurfaceID*)pair->first == (VASurfaceID)(uintptr_t)src->data[3]) {
index = i;
break;
}
@@ -2187,6 +2199,15 @@ static int qsv_device_derive(AVHWDeviceC
AVDictionary *opts, int flags)
{
mfxIMPL impl;
+ QSVDevicePriv *priv;
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv)
+ return AVERROR(ENOMEM);
+
+ ctx->user_opaque = priv;
+ ctx->free = qsv_device_free;
+
impl = choose_implementation("hw_any", child_device_ctx->type);
return qsv_device_derive_from_child(ctx, impl,
child_device_ctx, flags);

View File

@ -1,43 +1,41 @@
Subject: [PATCH] avfilter: add vf_tonemap_videotoolbox
---
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3772,6 +3772,7 @@ tinterlace_pad_test_deps="tinterlace_fil
@@ -3931,6 +3931,7 @@ tinterlace_merge_test_deps="tinterlace_f
tinterlace_pad_test_deps="tinterlace_filter"
tonemap_filter_deps="const_nan"
tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
tonemap_opencl_filter_deps="opencl const_nan"
+tonemap_videotoolbox_filter_deps="metal corevideo videotoolbox const_nan"
tonemap_opencl_filter_deps="opencl const_nan"
transpose_opencl_filter_deps="opencl"
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
transpose_vulkan_filter_deps="vulkan spirv_compiler"
Index: FFmpeg/libavfilter/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -521,6 +521,9 @@ OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER)
OBJS-$(CONFIG_TONEMAP_CUDA_FILTER) += vf_tonemap_cuda.o cuda/tonemap.ptx.o \
cuda/host_util.o
@@ -535,6 +535,9 @@ OBJS-$(CONFIG_TONEMAP_CUDA_FILTER)
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o opencl.o \
opencl/tonemap.o opencl/colorspace_common.o
OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER) += vf_tonemap_vaapi.o vaapi_vpp.o
+OBJS-$(CONFIG_TONEMAP_VIDEOTOOLBOX_FILTER) += vf_tonemap_videotoolbox.o \
+ metal/vf_tonemap_videotoolbox.metallib.o \
+ metal/utils.o
OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
OBJS-$(CONFIG_TRANSPOSE_CUDA_FILTER) += vf_transpose_cuda.o vf_transpose_cuda.ptx.o \
OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -487,6 +487,7 @@ extern const AVFilter ff_vf_tonemap;
@@ -501,6 +501,7 @@ extern const AVFilter ff_vf_tonemap;
extern const AVFilter ff_vf_tonemap_cuda;
extern const AVFilter ff_vf_tonemap_opencl;
extern const AVFilter ff_vf_tonemap_vaapi;
+extern const AVFilter ff_vf_tonemap_videotoolbox;
extern const AVFilter ff_vf_tpad;
extern const AVFilter ff_vf_transpose;
extern const AVFilter ff_vf_transpose_cuda;
extern const AVFilter ff_vf_transpose_npp;
Index: FFmpeg/libavfilter/metal/vf_tonemap_videotoolbox.metal
===================================================================
--- /dev/null
@ -1838,39 +1836,39 @@ Index: FFmpeg/libavfilter/vf_tonemap_videotoolbox.m
+#define OFFSET(x) offsetof(TonemapVideoToolboxContext, x)
+
+static const AVOption tonemap_videotoolbox_options[] = {
+ { "tonemap", "Tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, { .i64 = TONEMAP_NONE }, TONEMAP_NONE, TONEMAP_COUNT - 1, FLAGS, "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_NONE }, 0, 0, FLAGS, "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_LINEAR }, 0, 0, FLAGS, "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_GAMMA }, 0, 0, FLAGS, "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_CLIP }, 0, 0, FLAGS, "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_REINHARD }, 0, 0, FLAGS, "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_HABLE }, 0, 0, FLAGS, "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MOBIUS }, 0, 0, FLAGS, "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_BT2390 }, 0, 0, FLAGS, "tonemap" },
+ { "tonemap_mode", "Tonemap mode selection", OFFSET(tonemap_mode), AV_OPT_TYPE_INT, { .i64 = TONEMAP_MODE_MAX }, TONEMAP_MODE_MAX, TONEMAP_MODE_COUNT - 1, FLAGS, "tonemap_mode" },
+ { "max", "Brightest channel based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_MAX }, 0, 0, FLAGS, "tonemap_mode" },
+ { "rgb", "Per-channel based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_RGB }, 0, 0, FLAGS, "tonemap_mode" },
+ { "lum", "Relative luminance based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_LUM }, 0, 0, FLAGS, "tonemap_mode" },
+ { "transfer", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_BT709 }, -1, INT_MAX, FLAGS, "transfer" },
+ { "t", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_BT709 }, -1, INT_MAX, FLAGS, "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_BT709 }, 0, 0, FLAGS, "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_BT2020_10 }, 0, 0, FLAGS, "transfer" },
+ { "smpte2084", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_SMPTE2084 }, 0, 0, FLAGS, "transfer" },
+ { "matrix", "Set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_BT709 }, -1, INT_MAX, FLAGS, "matrix" },
+ { "m", "Set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_BT709 }, -1, INT_MAX, FLAGS, "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT709 }, 0, 0, FLAGS, "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT2020_NCL }, 0, 0, FLAGS, "matrix" },
+ { "primaries", "Set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_BT709 }, -1, INT_MAX, FLAGS, "primaries" },
+ { "p", "Set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_BT709 }, -1, INT_MAX, FLAGS, "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_PRI_BT709 }, 0, 0, FLAGS, "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_PRI_BT2020 }, 0, 0, FLAGS, "primaries" },
+ { "range", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_MPEG }, -1, INT_MAX, FLAGS, "range" },
+ { "r", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_MPEG }, -1, INT_MAX, FLAGS, "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, "range" },
+ { "format", "Output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, FLAGS, "fmt" },
+ { "tonemap", "Tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, { .i64 = TONEMAP_NONE }, TONEMAP_NONE, TONEMAP_COUNT - 1, FLAGS, .unit = "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_NONE }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_LINEAR }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_GAMMA }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_CLIP }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_REINHARD }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_HABLE }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MOBIUS }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_BT2390 }, 0, 0, FLAGS, .unit = "tonemap" },
+ { "tonemap_mode", "Tonemap mode selection", OFFSET(tonemap_mode), AV_OPT_TYPE_INT, { .i64 = TONEMAP_MODE_MAX }, TONEMAP_MODE_MAX, TONEMAP_MODE_COUNT - 1, FLAGS, .unit = "tonemap_mode" },
+ { "max", "Brightest channel based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_MAX }, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "rgb", "Per-channel based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_RGB }, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "lum", "Relative luminance based tonemap", 0, AV_OPT_TYPE_CONST, { .i64 = TONEMAP_MODE_LUM }, 0, 0, FLAGS, .unit = "tonemap_mode" },
+ { "transfer", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_BT709 }, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "t", "Set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_BT709 }, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_BT709 }, 0, 0, FLAGS, .unit = "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_BT2020_10 }, 0, 0, FLAGS, .unit = "transfer" },
+ { "smpte2084", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_TRC_SMPTE2084 }, 0, 0, FLAGS, .unit = "transfer" },
+ { "matrix", "Set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_BT709 }, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "m", "Set colorspace matrix", OFFSET(colorspace), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_BT709 }, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT709 }, 0, 0, FLAGS, .unit = "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_SPC_BT2020_NCL }, 0, 0, FLAGS, .unit = "matrix" },
+ { "primaries", "Set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_BT709 }, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "p", "Set color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_BT709 }, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_PRI_BT709 }, 0, 0, FLAGS, .unit = "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_PRI_BT2020 }, 0, 0, FLAGS, .unit = "primaries" },
+ { "range", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_MPEG }, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "r", "Set color range", OFFSET(range), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_MPEG }, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, .unit = "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, .unit = "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_MPEG }, 0, 0, FLAGS, .unit = "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, { .i64 = AVCOL_RANGE_JPEG }, 0, 0, FLAGS, .unit = "range" },
+ { "format", "Output pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, FLAGS },
+ { "apply_dovi", "Apply Dolby Vision metadata if possible", OFFSET(apply_dovi), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
+ { "peak", "Signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
+ { "param", "Tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, { .dbl = NAN }, DBL_MIN, DBL_MAX, FLAGS },

View File

@ -1,23 +1,21 @@
Subject: [PATCH] lavc/videotoolboxenc: add MJPEG support
---
Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3347,6 +3347,8 @@ hevc_videotoolbox_encoder_deps="pthreads
@@ -3488,6 +3488,8 @@ h264_videotoolbox_encoder_deps="pthreads
h264_videotoolbox_encoder_select="atsc_a53 videotoolbox_encoder"
hevc_videotoolbox_encoder_deps="pthreads"
hevc_videotoolbox_encoder_select="atsc_a53 videotoolbox_encoder"
prores_videotoolbox_encoder_deps="pthreads"
prores_videotoolbox_encoder_select="videotoolbox_encoder"
+mjpeg_videotoolbox_encoder_deps="pthreads"
+mjpeg_videotoolbox_encoder_select="videotoolbox_encoder"
prores_videotoolbox_encoder_deps="pthreads"
prores_videotoolbox_encoder_select="videotoolbox_encoder"
libaom_av1_decoder_deps="libaom"
libaom_av1_encoder_deps="libaom"
libaom_av1_encoder_select="extract_extradata_bsf"
Index: FFmpeg/libavcodec/Makefile
===================================================================
--- FFmpeg.orig/libavcodec/Makefile
+++ FFmpeg/libavcodec/Makefile
@@ -504,6 +504,7 @@ OBJS-$(CONFIG_MJPEG_CUVID_DECODER) +
@@ -508,6 +508,7 @@ OBJS-$(CONFIG_MJPEG_CUVID_DECODER) +
OBJS-$(CONFIG_MJPEG_QSV_ENCODER) += qsvenc_jpeg.o
OBJS-$(CONFIG_MJPEG_RKMPP_ENCODER) += rkmppenc.o
OBJS-$(CONFIG_MJPEG_VAAPI_ENCODER) += vaapi_encode_mjpeg.o
@ -29,27 +27,27 @@ Index: FFmpeg/libavcodec/allcodecs.c
===================================================================
--- FFmpeg.orig/libavcodec/allcodecs.c
+++ FFmpeg/libavcodec/allcodecs.c
@@ -883,6 +883,7 @@ extern const FFCodec ff_mpeg4_mediacodec
extern const FFCodec ff_mpeg4_omx_encoder;
extern const FFCodec ff_mpeg4_v4l2m2m_encoder;
extern const FFCodec ff_prores_videotoolbox_encoder;
@@ -876,6 +876,7 @@ extern const FFCodec ff_mjpeg_qsv_encode
extern const FFCodec ff_mjpeg_qsv_decoder;
extern const FFCodec ff_mjpeg_rkmpp_encoder;
extern const FFCodec ff_mjpeg_vaapi_encoder;
+extern const FFCodec ff_mjpeg_videotoolbox_encoder;
extern const FFCodec ff_vc1_cuvid_decoder;
extern const FFCodec ff_vp8_cuvid_decoder;
extern const FFCodec ff_vp8_mediacodec_decoder;
extern const FFCodec ff_mp3_mf_encoder;
extern const FFCodec ff_mpeg1_cuvid_decoder;
extern const FFCodec ff_mpeg2_cuvid_decoder;
Index: FFmpeg/libavcodec/videotoolboxenc.c
===================================================================
--- FFmpeg.orig/libavcodec/videotoolboxenc.c
+++ FFmpeg/libavcodec/videotoolboxenc.c
@@ -547,6 +547,7 @@ static CMVideoCodecType get_cm_codec_type(AVCodecContext *avctx,
else
return MKBETAG('a','p','c','n'); // kCMVideoCodecType_AppleProRes422
}
+ case AV_CODEC_ID_MJPEG: return kCMVideoCodecType_JPEG;
default: return 0;
@@ -545,6 +545,7 @@ static CMVideoCodecType get_cm_codec_typ
else
return MKBETAG('a','p','c','n'); // kCMVideoCodecType_AppleProRes422
}
+ case AV_CODEC_ID_MJPEG: return kCMVideoCodecType_JPEG;
default: return 0;
}
}
@@ -1233,7 +1234,7 @@ static int vtenc_create_encoder(AVCodecContext *avctx,
@@ -1238,7 +1239,7 @@ static int vtenc_create_encoder(AVCodecC
kVTCompressionPropertyKey_Quality,
quality_num);
CFRelease(quality_num);
@ -58,28 +56,28 @@ Index: FFmpeg/libavcodec/videotoolboxenc.c
bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt32Type,
&bit_rate);
@@ -1347,7 +1348,7 @@ static int vtenc_create_encoder(AVCodecContext *avctx,
@@ -1352,7 +1353,7 @@ static int vtenc_create_encoder(AVCodecC
}
}
- if (avctx->gop_size > 0 && avctx->codec_id != AV_CODEC_ID_PRORES) {
+ if (avctx->gop_size > 0 && avctx->codec_id != AV_CODEC_ID_PRORES && avctx->codec_id != AV_CODEC_ID_MJPEG) {
CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
kCFNumberIntType,
&avctx->gop_size);
@@ -1496,7 +1497,7 @@ static int vtenc_create_encoder(AVCodecContext *avctx,
@@ -1501,7 +1502,7 @@ static int vtenc_create_encoder(AVCodecC
}
}
- if (!vtctx->has_b_frames && avctx->codec_id != AV_CODEC_ID_PRORES) {
+ if (!vtctx->has_b_frames && avctx->codec_id != AV_CODEC_ID_PRORES && avctx->codec_id != AV_CODEC_ID_MJPEG) {
status = VTSessionSetProperty(vtctx->session,
kVTCompressionPropertyKey_AllowFrameReordering,
kCFBooleanFalse);
@@ -2844,6 +2845,13 @@ static const enum AVPixelFormat prores_pix_fmts[] = {
@@ -2870,6 +2871,13 @@ static const enum AVPixelFormat prores_p
AV_PIX_FMT_NONE
};
+static const enum AVPixelFormat mjpeg_pix_fmts[] = {
+ AV_PIX_FMT_VIDEOTOOLBOX,
+ AV_PIX_FMT_NV12,
@ -90,10 +88,11 @@ Index: FFmpeg/libavcodec/videotoolboxenc.c
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
#define COMMON_OPTIONS \
{ "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
@@ -3004,3 +3012,33 @@ const FFCodec ff_prores_videotoolbox_encoder = {
@@ -3039,4 +3047,35 @@ const FFCodec ff_prores_videotoolbox_enc
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.wrapper_name = "videotoolbox",
.hw_configs = vt_encode_hw_configs,
};
+};
+
+static const AVOption mjpeg_options[] = {
+ { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE },
@ -116,6 +115,7 @@ Index: FFmpeg/libavcodec/videotoolboxenc.c
+ AV_CODEC_CAP_HARDWARE,
+ .priv_data_size = sizeof(VTEncContext),
+ .p.pix_fmts = mjpeg_pix_fmts,
+ .defaults = vt_defaults,
+ .init = vtenc_init,
+ FF_CODEC_ENCODE_CB(vtenc_frame),
+ .close = vtenc_close,
@ -123,4 +123,4 @@ Index: FFmpeg/libavcodec/videotoolboxenc.c
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+ .p.wrapper_name = "videotoolbox",
+ .hw_configs = vt_encode_hw_configs,
+};
};

View File

@ -0,0 +1,58 @@
Index: FFmpeg/libavcodec/avcodec.h
===================================================================
--- FFmpeg.orig/libavcodec/avcodec.h
+++ FFmpeg/libavcodec/avcodec.h
@@ -2174,6 +2174,13 @@ typedef struct AVHWAccel {
#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
/**
+ * Some hardware decoders (like VideoToolbox) supports decode session priority
+ * that run decode pipeline at a lower priority than is used for realtime decoding.
+ * This will be useful for background processing without interrupting normal playback.
+ */
+#define AV_HWACCEL_FLAG_LOW_PRIORITY (1 << 4)
+
+/**
* @}
*/
Index: FFmpeg/libavcodec/options_table.h
===================================================================
--- FFmpeg.orig/libavcodec/options_table.h
+++ FFmpeg/libavcodec/options_table.h
@@ -407,6 +407,7 @@ static const AVOption avcodec_options[]
{"mastering_display_metadata", .default_val.i64 = AV_PKT_DATA_MASTERING_DISPLAY_METADATA, .type = AV_OPT_TYPE_CONST, .flags = A|D, .unit = "side_data_pkt" },
{"content_light_level", .default_val.i64 = AV_PKT_DATA_CONTENT_LIGHT_LEVEL, .type = AV_OPT_TYPE_CONST, .flags = A|D, .unit = "side_data_pkt" },
{"icc_profile", .default_val.i64 = AV_PKT_DATA_ICC_PROFILE, .type = AV_OPT_TYPE_CONST, .flags = A|D, .unit = "side_data_pkt" },
+{"low_priority", "attempt to run decode pipeline at a lower priority than is used for realtime decoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_LOW_PRIORITY }, INT_MIN, INT_MAX, V | D, .unit = "hwaccel_flags"},
{NULL},
};
Index: FFmpeg/libavcodec/videotoolbox.c
===================================================================
--- FFmpeg.orig/libavcodec/videotoolbox.c
+++ FFmpeg/libavcodec/videotoolbox.c
@@ -984,6 +984,23 @@ static int videotoolbox_start(AVCodecCon
av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
return AVERROR_INVALIDDATA;
case 0:
+ if (avctx->skip_frame >= AVDISCARD_NONKEY) {
+ status = VTSessionSetProperty(videotoolbox->session,
+ kVTDecompressionPropertyKey_OnlyTheseFrames,
+ kVTDecompressionProperty_OnlyTheseFrames_KeyFrames);
+ if (status) {
+ av_log(avctx, AV_LOG_WARNING, "kVTDecompressionProperty_OnlyTheseFrames_KeyFrames is not supported on this device. Ignoring.\n");
+ }
+ }
+ if (avctx->hwaccel_flags & AV_HWACCEL_FLAG_LOW_PRIORITY) {
+ status = VTSessionSetProperty(videotoolbox->session,
+ kVTDecompressionPropertyKey_RealTime,
+ kCFBooleanFalse);
+ av_log(avctx, AV_LOG_INFO, "Decoder running at lower priority.\n");
+ if (status) {
+ av_log(avctx, AV_LOG_WARNING, "kVTDecompressionPropertyKey_RealTime is not supported on this device. Ignoring.\n");
+ }
+ }
return 0;
default:
av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);

View File

@ -1,203 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vulkan.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
@@ -111,6 +111,9 @@ typedef struct VulkanDevicePriv {
/* Intel */
int dev_is_intel;
+
+ /* Amd */
+ int dev_is_amd;
} VulkanDevicePriv;
typedef struct VulkanFramesPriv {
@@ -231,6 +234,7 @@ static const struct {
{ AV_PIX_FMT_BGR32, { VK_FORMAT_A8B8G8R8_UNORM_PACK32 } },
{ AV_PIX_FMT_0BGR32, { VK_FORMAT_A8B8G8R8_UNORM_PACK32 } },
+ { AV_PIX_FMT_X2BGR10, { VK_FORMAT_A2B10G10R10_UNORM_PACK32 } },
{ AV_PIX_FMT_X2RGB10, { VK_FORMAT_A2R10G10B10_UNORM_PACK32 } },
{ AV_PIX_FMT_GBRAP, { VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UNORM } },
@@ -1482,6 +1486,13 @@ static int vulkan_device_init(AVHWDevice
p->dev_is_nvidia = (p->props.properties.vendorID == 0x10de);
p->dev_is_intel = (p->props.properties.vendorID == 0x8086);
+ p->dev_is_amd = (p->props.properties.vendorID == 0x1002);
+
+#if CONFIG_LIBDRM
+ /* AMD encoder requires contiguous and linear images */
+ if (p->dev_is_amd)
+ p->use_linear_images = 1;
+#endif
vk->GetPhysicalDeviceQueueFamilyProperties(hwctx->phys_dev, &queue_num, NULL);
if (!queue_num) {
@@ -2292,7 +2303,7 @@ static int vulkan_frames_init(AVHWFrames
if (!(hwctx->flags & AV_VK_FRAME_FLAG_NONE)) {
if (p->contiguous_planes == 1 ||
- ((p->contiguous_planes == -1) && p->dev_is_intel))
+ (p->contiguous_planes == -1 && (p->dev_is_intel || p->dev_is_amd)))
hwctx->flags |= AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY;
}
@@ -2360,19 +2371,23 @@ static int vulkan_frames_init(AVHWFrames
/* Finally get all modifiers from the device */
vk->GetPhysicalDeviceFormatProperties2(dev_hwctx->phys_dev, fmt[0], &prop);
- /* Reject any modifiers that don't match our requirements */
- for (int i = 0; i < mod_props_list.drmFormatModifierCount; i++) {
- if (!(mod_props[i].drmFormatModifierTilingFeatures & hwctx->usage))
- continue;
-
- modifiers[modifier_count++] = mod_props[i].drmFormatModifier;
- }
+ if (p->use_linear_images) {
+ has_modifiers = 0;
+ modifiers[modifier_count++] = 0x0;
+ } else {
+ /* Reject any modifiers that don't match our requirements */
+ for (int i = 0; i < mod_props_list.drmFormatModifierCount; i++) {
+ if (!(mod_props[i].drmFormatModifierTilingFeatures & hwctx->usage))
+ continue;
+ modifiers[modifier_count++] = mod_props[i].drmFormatModifier;
+ }
- if (!modifier_count) {
- av_log(hwfc, AV_LOG_ERROR, "None of the given modifiers supports"
- " the usage flags!\n");
- av_freep(&mod_props);
- return AVERROR(EINVAL);
+ if (!modifier_count) {
+ av_log(hwfc, AV_LOG_ERROR, "None of the given modifiers supports"
+ " the usage flags!\n");
+ av_freep(&mod_props);
+ return AVERROR(EINVAL);
+ }
}
modifier_info->drmFormatModifierCount = modifier_count;
@@ -2465,9 +2480,11 @@ static void vulkan_unmap_frame(AVHWFrame
{
VulkanMapping *map = hwmap->priv;
AVVulkanDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ AVVulkanFramesContext *hwfctx = hwfc->hwctx;
const int planes = av_pix_fmt_count_planes(hwfc->sw_format);
VulkanDevicePriv *p = hwfc->device_ctx->internal->priv;
FFVulkanFunctions *vk = &p->vkfn;
+ int mem_planes = 0;
/* Check if buffer needs flushing */
if ((map->flags & AV_HWFRAME_MAP_WRITE) &&
@@ -2489,7 +2506,8 @@ static void vulkan_unmap_frame(AVHWFrame
}
}
- for (int i = 0; i < planes; i++)
+ mem_planes = hwfctx->flags & AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY ? 1 : planes;
+ for (int i = 0; i < mem_planes; i++)
vk->UnmapMemory(hwctx->act_dev, map->frame->mem[i]);
av_free(map);
@@ -2638,6 +2656,10 @@ static const struct {
{ DRM_FORMAT_XRGB8888, VK_FORMAT_B8G8R8A8_UNORM },
{ DRM_FORMAT_ABGR8888, VK_FORMAT_R8G8B8A8_UNORM },
{ DRM_FORMAT_XBGR8888, VK_FORMAT_R8G8B8A8_UNORM },
+ { DRM_FORMAT_ARGB2101010, VK_FORMAT_A2R10G10B10_UNORM_PACK32 },
+ { DRM_FORMAT_XRGB2101010, VK_FORMAT_A2R10G10B10_UNORM_PACK32 },
+ { DRM_FORMAT_ABGR2101010, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
+ { DRM_FORMAT_XBGR2101010, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
// All these DRM_FORMATs were added in the same libdrm commit.
#ifdef DRM_FORMAT_XYUV8888
@@ -2672,6 +2694,7 @@ static int vulkan_map_from_drm_frame_des
const AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)src->data[0];
VkBindImageMemoryInfo bind_info[AV_DRM_MAX_PLANES];
VkBindImagePlaneMemoryInfo plane_info[AV_DRM_MAX_PLANES];
+ const int has_modifiers = !!(p->extensions & FF_VK_EXT_DRM_MODIFIER_FLAGS);
for (int i = 0; i < desc->nb_layers; i++) {
if (drm_to_vulkan_fmt(desc->layers[i].format) == VK_FORMAT_UNDEFINED) {
@@ -2681,13 +2704,22 @@ static int vulkan_map_from_drm_frame_des
}
}
+ if (!has_modifiers &&
+ desc->objects[0].format_modifier != DRM_FORMAT_MOD_INVALID &&
+ desc->objects[0].format_modifier != DRM_FORMAT_MOD_LINEAR) {
+ av_log(ctx, AV_LOG_ERROR, "The driver can only import DRM frame with invalid/linear modifier!\n");
+ err = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
if (!(f = av_vk_frame_alloc())) {
av_log(ctx, AV_LOG_ERROR, "Unable to allocate memory for AVVkFrame!\n");
err = AVERROR(ENOMEM);
goto fail;
}
- f->tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ f->tiling = has_modifiers ? VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT :
+ VK_IMAGE_TILING_LINEAR;
for (int i = 0; i < desc->nb_layers; i++) {
const int planes = desc->layers[i].nb_planes;
@@ -2758,7 +2790,7 @@ static int vulkan_map_from_drm_frame_des
};
VkPhysicalDeviceImageFormatInfo2 fmt_props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
- .pNext = &props_ext,
+ .pNext = has_modifiers ? &props_ext : NULL,
.format = create_info.format,
.type = create_info.imageType,
.tiling = create_info.tiling,
@@ -2776,6 +2808,10 @@ static int vulkan_map_from_drm_frame_des
goto fail;
}
+ /* Skip checking if the driver has no support for the DRM modifier extension */
+ if (!has_modifiers && !fmt_props.pNext)
+ fmt_props.pNext = &props_ext;
+
/* Set the image width/height */
get_plane_wh(&create_info.extent.width, &create_info.extent.height,
hwfc->sw_format, src->width, src->height, i);
@@ -3328,6 +3364,7 @@ static int vulkan_map_to_drm(AVHWFramesC
AVVulkanDeviceContext *hwctx = hwfc->device_ctx->hwctx;
AVVulkanFramesContext *hwfctx = hwfc->hwctx;
const int planes = av_pix_fmt_count_planes(hwfc->sw_format);
+ const int has_modifiers = !!(p->extensions & FF_VK_EXT_DRM_MODIFIER_FLAGS);
VkImageDrmFormatModifierPropertiesEXT drm_mod = {
.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
};
@@ -3355,10 +3392,16 @@ static int vulkan_map_to_drm(AVHWFramesC
if (err < 0)
goto end;
- ret = vk->GetImageDrmFormatModifierPropertiesEXT(hwctx->act_dev, f->img[0],
+ if (has_modifiers) {
+ ret = vk->GetImageDrmFormatModifierPropertiesEXT(hwctx->act_dev, f->img[0],
&drm_mod);
- if (ret != VK_SUCCESS) {
- av_log(hwfc, AV_LOG_ERROR, "Failed to retrieve DRM format modifier!\n");
+ if (ret != VK_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to retrieve DRM format modifier!\n");
+ err = AVERROR_EXTERNAL;
+ goto end;
+ }
+ } else if (f->tiling != VK_IMAGE_TILING_LINEAR) {
+ av_log(hwfc, AV_LOG_ERROR, "The driver can only export linear images to DRM frame!\n");
err = AVERROR_EXTERNAL;
goto end;
}
@@ -3380,7 +3423,7 @@ static int vulkan_map_to_drm(AVHWFramesC
drm_desc->nb_objects++;
drm_desc->objects[i].size = f->size[i];
- drm_desc->objects[i].format_modifier = drm_mod.drmFormatModifier;
+ drm_desc->objects[i].format_modifier = has_modifiers ? drm_mod.drmFormatModifier : 0x0;
}
drm_desc->nb_layers = planes;

View File

@ -0,0 +1,27 @@
Index: FFmpeg/libavutil/hwcontext_videotoolbox.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_videotoolbox.c
+++ FFmpeg/libavutil/hwcontext_videotoolbox.c
@@ -825,6 +825,14 @@ static int vt_device_create(AVHWDeviceCo
return 0;
}
+static int vt_device_derive(AVHWDeviceContext *device_ctx,
+ AVHWDeviceContext *src_ctx, AVDictionary *opts,
+ int flags)
+{
+ // There is no context to be setup with VT, just return.
+ return 0;
+}
+
const HWContextType ff_hwcontext_type_videotoolbox = {
.type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
.name = "videotoolbox",
@@ -832,6 +840,7 @@ const HWContextType ff_hwcontext_type_vi
.frames_hwctx_size = sizeof(VTFramesContext),
.device_create = vt_device_create,
+ .device_derive = vt_device_derive,
.frames_init = vt_frames_init,
.frames_get_buffer = vt_get_buffer,
.frames_get_constraints = vt_frames_get_constraints,

View File

@ -1,59 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/vf_libplacebo.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_libplacebo.c
+++ jellyfin-ffmpeg/libavfilter/vf_libplacebo.c
@@ -98,7 +98,6 @@ typedef struct LibplaceboContext {
float polar_cutoff;
int disable_linear;
int disable_builtin;
- int force_icc_lut;
int force_dither;
int disable_fbos;
@@ -138,6 +137,7 @@ typedef struct LibplaceboContext {
float desat_exp;
int gamut_warning;
int gamut_clipping;
+ int force_icc_lut;
/* pl_dither_params */
int dithering;
@@ -414,7 +414,9 @@ static int process_frames(AVFilterContex
.minimum_peak = s->min_peak,
.scene_threshold_low = s->scene_low,
.scene_threshold_high = s->scene_high,
+#if PL_API_VER < 256
.overshoot_margin = s->overshoot,
+#endif
),
.color_map_params = pl_color_map_params(
@@ -446,7 +448,6 @@ static int process_frames(AVFilterContex
.polar_cutoff = s->polar_cutoff,
.disable_linear_scaling = s->disable_linear,
.disable_builtin_scalers = s->disable_builtin,
- .force_icc_lut = s->force_icc_lut,
.force_dither = s->force_dither,
.disable_fbos = s->disable_fbos,
};
@@ -774,7 +775,7 @@ static const AVOption libplacebo_options
{ "polar_cutoff", "Polar LUT cutoff", OFFSET(polar_cutoff), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0.0, 1.0, DYNAMIC },
{ "disable_linear", "Disable linear scaling", OFFSET(disable_linear), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
{ "disable_builtin", "Disable built-in scalers", OFFSET(disable_builtin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
- { "force_icc_lut", "Force the use of a full ICC 3DLUT for color mapping", OFFSET(force_icc_lut), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
+ { "force_icc_lut", "Deprecated, does nothing", OFFSET(force_icc_lut), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC | AV_OPT_FLAG_DEPRECATED },
{ "force_dither", "Force dithering", OFFSET(force_dither), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
{ "disable_fbos", "Force-disable FBOs", OFFSET(disable_fbos), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DYNAMIC },
{ NULL },
Index: jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vulkan.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
@@ -1394,6 +1394,7 @@ static int vulkan_device_create_internal
goto end;
}
p->device_features_1_2.timelineSemaphore = 1;
+ p->device_features_1_2.hostQueryReset = dev_features_1_2.hostQueryReset;
/* Setup queue family */
if ((err = setup_queue_families(ctx, &dev_info)))

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/configure
Index: FFmpeg/configure
===================================================================
--- jellyfin-ffmpeg.orig/configure
+++ jellyfin-ffmpeg/configure
@@ -3149,6 +3149,8 @@ thumbnail_cuda_filter_deps="ffnvcodec"
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -3297,6 +3297,8 @@ thumbnail_cuda_filter_deps="ffnvcodec"
thumbnail_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
tonemap_cuda_filter_deps="ffnvcodec const_nan"
tonemap_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
@ -11,12 +11,12 @@ Index: jellyfin-ffmpeg/configure
transpose_npp_filter_deps="ffnvcodec libnpp"
overlay_cuda_filter_deps="ffnvcodec"
overlay_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
Index: jellyfin-ffmpeg/libavfilter/Makefile
Index: FFmpeg/libavfilter/Makefile
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/Makefile
+++ jellyfin-ffmpeg/libavfilter/Makefile
@@ -523,6 +523,8 @@ OBJS-$(CONFIG_TONEMAP_CUDA_FILTER)
OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER) += vf_tonemap_vaapi.o vaapi_vpp.o
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -540,6 +540,8 @@ OBJS-$(CONFIG_TONEMAP_VIDEOTOOLBOX_FILTE
metal/utils.o
OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
+OBJS-$(CONFIG_TRANSPOSE_CUDA_FILTER) += vf_transpose_cuda.o vf_transpose_cuda.ptx.o \
@ -24,23 +24,23 @@ Index: jellyfin-ffmpeg/libavfilter/Makefile
OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o
OBJS-$(CONFIG_TRANSPOSE_OPENCL_FILTER) += vf_transpose_opencl.o opencl.o opencl/transpose.o
OBJS-$(CONFIG_TRANSPOSE_VAAPI_FILTER) += vf_transpose_vaapi.o vaapi_vpp.o
Index: jellyfin-ffmpeg/libavfilter/allfilters.c
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/allfilters.c
+++ jellyfin-ffmpeg/libavfilter/allfilters.c
@@ -489,6 +489,7 @@ extern const AVFilter ff_vf_tonemap_open
extern const AVFilter ff_vf_tonemap_vaapi;
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -504,6 +504,7 @@ extern const AVFilter ff_vf_tonemap_vaap
extern const AVFilter ff_vf_tonemap_videotoolbox;
extern const AVFilter ff_vf_tpad;
extern const AVFilter ff_vf_transpose;
+extern const AVFilter ff_vf_transpose_cuda;
extern const AVFilter ff_vf_transpose_npp;
extern const AVFilter ff_vf_transpose_opencl;
extern const AVFilter ff_vf_transpose_vaapi;
Index: jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.c
Index: FFmpeg/libavfilter/vf_transpose_cuda.c
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.c
@@ -0,0 +1,478 @@
+++ FFmpeg/libavfilter/vf_transpose_cuda.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2024 NyanMisaka
+ *
@ -70,7 +70,6 @@ Index: jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.c
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "transpose.h"
@ -471,19 +470,19 @@ Index: jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.c
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+
+static const AVOption cudatranspose_options[] = {
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 0, FLAGS, "dir" },
+ { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, 0, 0, FLAGS, "dir" },
+ { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, 0, 0, FLAGS, "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, 0, 0, FLAGS, "dir" },
+ { "reversal", "rotate by half-turn", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, 0, 0, FLAGS, "dir" },
+ { "hflip", "flip horizontally", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, 0, 0, FLAGS, "dir" },
+ { "vflip", "flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, 0, 0, FLAGS, "dir" },
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, .unit = "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 0, FLAGS, .unit = "dir" },
+ { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, 0, 0, FLAGS, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, 0, 0, FLAGS, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, 0, 0, FLAGS, .unit = "dir" },
+ { "reversal", "rotate by half-turn", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, 0, 0, FLAGS, .unit = "dir" },
+ { "hflip", "flip horizontally", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, 0, 0, FLAGS, .unit = "dir" },
+ { "vflip", "flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, 0, 0, FLAGS, .unit = "dir" },
+
+ { "passthrough", "do not apply transposition if the input matches the specified geometry", OFFSET(passthrough), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_PT_TYPE_NONE }, 0, 2, FLAGS, "passthrough" },
+ { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_NONE }, 0, 0, FLAGS, "passthrough" },
+ { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_LANDSCAPE }, 0, 0, FLAGS, "passthrough" },
+ { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_PORTRAIT }, 0, 0, FLAGS, "passthrough" },
+ { "passthrough", "do not apply transposition if the input matches the specified geometry", OFFSET(passthrough), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_PT_TYPE_NONE }, 0, 2, FLAGS, .unit = "passthrough" },
+ { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_NONE }, 0, 0, FLAGS, .unit = "passthrough" },
+ { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_LANDSCAPE }, 0, 0, FLAGS, .unit = "passthrough" },
+ { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_PORTRAIT }, 0, 0, FLAGS, .unit = "passthrough" },
+
+ { NULL },
+};
@ -519,10 +518,10 @@ Index: jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.c
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_CUDA),
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};
Index: jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.cu
Index: FFmpeg/libavfilter/vf_transpose_cuda.cu
===================================================================
--- /dev/null
+++ jellyfin-ffmpeg/libavfilter/vf_transpose_cuda.cu
+++ FFmpeg/libavfilter/vf_transpose_cuda.cu
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2024 NyanMisaka

View File

@ -1,7 +1,7 @@
Index: jellyfin-ffmpeg/libavfilter/opencl/transpose.cl
Index: FFmpeg/libavfilter/opencl/transpose.cl
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/opencl/transpose.cl
+++ jellyfin-ffmpeg/libavfilter/opencl/transpose.cl
--- FFmpeg.orig/libavfilter/opencl/transpose.cl
+++ FFmpeg/libavfilter/opencl/transpose.cl
@@ -26,8 +26,10 @@ kernel void transpose(__write_only image
int x = get_global_id(0);
int y = get_global_id(1);
@ -15,10 +15,10 @@ Index: jellyfin-ffmpeg/libavfilter/opencl/transpose.cl
float4 data = read_imagef(src, sampler, (int2)(xin, yin));
if (x < size.x && y < size.y)
Index: jellyfin-ffmpeg/libavfilter/vf_transpose_opencl.c
Index: FFmpeg/libavfilter/vf_transpose_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_transpose_opencl.c
+++ jellyfin-ffmpeg/libavfilter/vf_transpose_opencl.c
--- FFmpeg.orig/libavfilter/vf_transpose_opencl.c
+++ FFmpeg/libavfilter/vf_transpose_opencl.c
@@ -101,8 +101,20 @@ static int transpose_opencl_config_outpu
return AVERROR(EINVAL);
}
@ -59,8 +59,8 @@ Index: jellyfin-ffmpeg/libavfilter/vf_transpose_opencl.c
#define OFFSET(x) offsetof(TransposeOpenCLContext, x)
#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption transpose_opencl_options[] = {
- { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 3, FLAGS, "dir" },
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, "dir" },
- { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 3, FLAGS, .unit = "dir" },
+ { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, .unit = "dir" },
{ "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
{ "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
{ "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
@ -70,4 +70,4 @@ Index: jellyfin-ffmpeg/libavfilter/vf_transpose_opencl.c
+ { "vflip", "flip vertically", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "dir" },
{ "passthrough", "do not apply transposition if the input matches the specified geometry",
OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, .unit = "passthrough" },

View File

@ -1,8 +1,8 @@
Index: jellyfin-ffmpeg/libavcodec/vaapi_encode.c
Index: FFmpeg/libavcodec/vaapi_encode.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/vaapi_encode.c
+++ jellyfin-ffmpeg/libavcodec/vaapi_encode.c
@@ -2714,6 +2714,17 @@ static av_cold int vaapi_encode_create_r
--- FFmpeg.orig/libavcodec/vaapi_encode.c
+++ FFmpeg/libavcodec/vaapi_encode.c
@@ -2728,6 +2728,17 @@ static av_cold int vaapi_encode_create_r
av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
"reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
@ -20,11 +20,11 @@ Index: jellyfin-ffmpeg/libavcodec/vaapi_encode.c
if (ctx->surface_width < constraints->min_width ||
ctx->surface_height < constraints->min_height ||
ctx->surface_width > constraints->max_width ||
Index: jellyfin-ffmpeg/libavutil/hwcontext.h
Index: FFmpeg/libavutil/hwcontext.h
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext.h
+++ jellyfin-ffmpeg/libavutil/hwcontext.h
@@ -478,6 +478,13 @@ typedef struct AVHWFramesConstraints {
--- FFmpeg.orig/libavutil/hwcontext.h
+++ FFmpeg/libavutil/hwcontext.h
@@ -467,6 +467,13 @@ typedef struct AVHWFramesConstraints {
*/
int max_width;
int max_height;
@ -38,11 +38,11 @@ Index: jellyfin-ffmpeg/libavutil/hwcontext.h
} AVHWFramesConstraints;
/**
Index: jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
Index: FFmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vaapi.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vaapi.c
@@ -276,6 +276,14 @@ static int vaapi_frames_get_constraints(
--- FFmpeg.orig/libavutil/hwcontext_vaapi.c
+++ FFmpeg/libavutil/hwcontext_vaapi.c
@@ -297,6 +297,14 @@ static int vaapi_frames_get_constraints(
case VASurfaceAttribMaxHeight:
constraints->max_height = attr_list[i].value.value.i;
break;

View File

@ -1,27 +0,0 @@
Index: jellyfin-ffmpeg/libavfilter/vf_tonemap_opencl.c
===================================================================
--- jellyfin-ffmpeg.orig/libavfilter/vf_tonemap_opencl.c
+++ jellyfin-ffmpeg/libavfilter/vf_tonemap_opencl.c
@@ -333,6 +333,7 @@ static int tonemap_opencl_init(AVFilterC
cl_uint max_compute_units, device_vendor_id;
cl_int cle;
cl_mem_flags dovi_buf_flags = CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR;
+ char *device_vendor = NULL;
char *device_name = NULL;
char *device_exts = NULL;
int i, j, err;
@@ -407,6 +408,14 @@ static int tonemap_opencl_init(AVFilterC
}
av_free(device_name);
}
+ } else if (device_is_integrated == CL_TRUE) {
+ device_vendor = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_VENDOR);
+ device_name = check_opencl_device_str(ctx->ocf.hwctx->device_id, CL_DEVICE_NAME);
+ if (!strstr(device_vendor, "ARM") &&
+ !strstr(device_name, "Mali"))
+ ctx->tradeoff = 0;
+ av_free(device_vendor);
+ av_free(device_name);
} else {
ctx->tradeoff = 0;
}

View File

@ -1,14 +1,8 @@
Subject: [PATCH] avcodec/libopusenc: Allow 5.1(side) channel inputs
---
Index: libavcodec/libopusenc.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
Index: FFmpeg/libavcodec/libopusenc.c
===================================================================
diff --git a/libavcodec/libopusenc.c b/libavcodec/libopusenc.c
--- a/libavcodec/libopusenc.c (revision 38aaefefec762dd185b631298752d489dcf084fe)
+++ b/libavcodec/libopusenc.c (revision 104006c9a93f3cd72b21b243884ea91449cbdd10)
@@ -196,6 +196,10 @@
--- FFmpeg.orig/libavcodec/libopusenc.c
+++ FFmpeg/libavcodec/libopusenc.c
@@ -196,6 +196,10 @@ static int libopus_check_vorbis_layout(A
av_log(avctx, AV_LOG_WARNING,
"No channel layout specified. Opus encoder will use Vorbis "
"channel layout for %d channels.\n", avctx->ch_layout.nb_channels);
@ -18,4 +12,4 @@ diff --git a/libavcodec/libopusenc.c b/libavcodec/libopusenc.c
+ return 0;
} else if (av_channel_layout_compare(&avctx->ch_layout, &ff_vorbis_ch_layouts[avctx->ch_layout.nb_channels - 1])) {
char name[32];

View File

@ -2,17 +2,17 @@ Index: FFmpeg/configure
===================================================================
--- FFmpeg.orig/configure
+++ FFmpeg/configure
@@ -2211,6 +2211,9 @@ HEADERS_LIST="
@@ -2314,6 +2314,9 @@ HEADERS_LIST="
INTRINSICS_LIST="
intrinsics_neon
+ intrinsics_sse42
+ intrinsics_avx2
+ intrinsics_fma3
+ intrinsics_avx2
"
MATH_FUNCS="
@@ -2676,6 +2679,10 @@ avx2_deps="avx"
@@ -2797,6 +2800,10 @@ avx2_deps="avx"
avx512_deps="avx2"
avx512icl_deps="avx512"
@ -23,15 +23,15 @@ Index: FFmpeg/configure
mmx_external_deps="x86asm"
mmx_inline_deps="inline_asm x86"
mmx_suggest="mmx_external mmx_inline"
@@ -3772,6 +3779,7 @@ tinterlace_filter_deps="gpl"
@@ -3934,6 +3941,7 @@ tinterlace_filter_deps="gpl"
tinterlace_merge_test_deps="tinterlace_filter"
tinterlace_pad_test_deps="tinterlace_filter"
tonemap_filter_deps="const_nan"
+tonemapx_filter_deps="const_nan"
tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
tonemap_opencl_filter_deps="opencl const_nan"
tonemap_videotoolbox_filter_deps="metal corevideo videotoolbox const_nan"
@@ -6230,6 +6238,19 @@ fi
tonemap_opencl_filter_deps="opencl const_nan"
@@ -6464,6 +6472,19 @@ fi
check_cc intrinsics_neon arm_neon.h "int16x8_t test = vdupq_n_s16(0)"
@ -51,7 +51,7 @@ Index: FFmpeg/configure
check_ldflags -Wl,--as-needed
check_ldflags -Wl,-z,noexecstack
@@ -7295,6 +7316,16 @@ elif enabled gcc; then
@@ -7586,6 +7607,16 @@ elif enabled gcc; then
check_cflags -mpreferred-stack-boundary=4
;;
esac
@ -72,22 +72,24 @@ Index: FFmpeg/libavfilter/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/Makefile
+++ FFmpeg/libavfilter/Makefile
@@ -516,6 +516,7 @@ OBJS-$(CONFIG_TMEDIAN_FILTER)
@@ -530,6 +530,7 @@ OBJS-$(CONFIG_TMEDIAN_FILTER)
OBJS-$(CONFIG_TMIDEQUALIZER_FILTER) += vf_tmidequalizer.o
OBJS-$(CONFIG_TMIX_FILTER) += vf_mix.o framesync.o
OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o
+OBJS-$(CONFIG_TONEMAPX_FILTER) += vf_tonemapx.o
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o opencl.o \
opencl/tonemap.o opencl/colorspace_common.o
OBJS-$(CONFIG_TONEMAP_CUDA_FILTER) += vf_tonemap_cuda.o cuda/tonemap.ptx.o \
cuda/host_util.o
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o opencl.o \
Index: FFmpeg/libavfilter/aarch64/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/aarch64/Makefile
+++ FFmpeg/libavfilter/aarch64/Makefile
@@ -1,3 +1,4 @@
@@ -1,5 +1,6 @@
OBJS-$(CONFIG_BWDIF_FILTER) += aarch64/vf_bwdif_init_aarch64.o
OBJS-$(CONFIG_NLMEANS_FILTER) += aarch64/vf_nlmeans_init.o
+OBJS-$(CONFIG_TONEMAPX_FILTER) += aarch64/vf_tonemapx_intrin_neon.o
NEON-OBJS-$(CONFIG_BWDIF_FILTER) += aarch64/vf_bwdif_neon.o
NEON-OBJS-$(CONFIG_NLMEANS_FILTER) += aarch64/vf_nlmeans_neon.o
Index: FFmpeg/libavfilter/aarch64/vf_tonemapx_intrin_neon.c
===================================================================
@ -1348,8 +1350,8 @@ Index: FFmpeg/libavfilter/aarch64/vf_tonemapx_intrin_neon.h
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_TONEMAPX_INTRIN_NEON_H
+#define AVFILTER_TONEMAPX_INTRIN_NEON_H
+#ifndef AVFILTER_AARCH64_TONEMAPX_INTRIN_NEON_H
+#define AVFILTER_AARCH64_TONEMAPX_INTRIN_NEON_H
+
+#include "libavfilter/vf_tonemapx.h"
+
@ -1381,12 +1383,12 @@ Index: FFmpeg/libavfilter/aarch64/vf_tonemapx_intrin_neon.h
+ int width, int height,
+ const struct TonemapIntParams *params);
+
+#endif // AVFILTER_TONEMAPX_INTRIN_NEON_H
+#endif // AVFILTER_AARCH64_TONEMAPX_INTRIN_NEON_H
Index: FFmpeg/libavfilter/allfilters.c
===================================================================
--- FFmpeg.orig/libavfilter/allfilters.c
+++ FFmpeg/libavfilter/allfilters.c
@@ -484,6 +484,7 @@ extern const AVFilter ff_vf_tmedian;
@@ -498,6 +498,7 @@ extern const AVFilter ff_vf_tmedian;
extern const AVFilter ff_vf_tmidequalizer;
extern const AVFilter ff_vf_tmix;
extern const AVFilter ff_vf_tonemap;
@ -1475,7 +1477,7 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
===================================================================
--- /dev/null
+++ FFmpeg/libavfilter/vf_tonemapx.c
@@ -0,0 +1,1267 @@
@@ -0,0 +1,1260 @@
+/*
+ * This file is part of FFmpeg.
+ *
@ -2481,8 +2483,8 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+ td.desc = desc;
+ td.odesc = odesc;
+ td.peak = peak;
+ ctx->internal->execute(ctx, s->filter_slice, &td, NULL,
+ FFMIN(outlink->h >> FFMAX(desc->log2_chroma_h, odesc->log2_chroma_h), ff_filter_get_nb_threads(ctx)));
+ ff_filter_execute(ctx, s->filter_slice, &td, NULL,
+ FFMIN(outlink->h >> FFMAX(desc->log2_chroma_h, odesc->log2_chroma_h), ff_filter_get_nb_threads(ctx)));
+
+ av_frame_free(&in);
+
@ -2640,7 +2642,7 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+ s->tonemap_func_planar10 = tonemap_frame_420p10_2_420p10;
+ }
+
+ switch(active_simd) {
+ switch (active_simd) {
+ case SIMD_NEON:
+ av_log(s, AV_LOG_INFO, "Using CPU capability: NEON\n");
+ break;
@ -2656,7 +2658,7 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+ break;
+ }
+
+ switch(s->tonemap) {
+ switch (s->tonemap) {
+ case TONEMAP_GAMMA:
+ if (isnan(s->param))
+ s->param = 1.8f;
@ -2680,33 +2682,33 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+#define OFFSET(x) offsetof(TonemapxContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption tonemapx_options[] = {
+ { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_BT2390}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_BT2390}, 0, 0, FLAGS, "tonemap" },
+ { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
+ { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, "transfer" },
+ { "matrix", "set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, "matrix" },
+ { "m", "set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, "matrix" },
+ { "primaries", "set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, "primaries" },
+ { "p", "set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, "primaries" },
+ { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, "range" },
+ { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range" },
+ { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_BT2390}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, .unit = "tonemap" },
+ { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "bt2390", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_BT2390}, 0, 0, FLAGS, .unit = "tonemap" },
+ { "transfer", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "t", "set transfer characteristic", OFFSET(trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_BT709}, -1, INT_MAX, FLAGS, .unit = "transfer" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709}, 0, 0, FLAGS, .unit = "transfer" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10}, 0, 0, FLAGS, .unit = "transfer" },
+ { "matrix", "set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "m", "set colorspace matrix", OFFSET(spc), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_BT709}, -1, INT_MAX, FLAGS, .unit = "matrix" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709}, 0, 0, FLAGS, .unit = "matrix" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL}, 0, 0, FLAGS, .unit = "matrix" },
+ { "primaries", "set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "p", "set color primaries", OFFSET(pri), AV_OPT_TYPE_INT, {.i64 = AVCOL_PRI_BT709}, -1, INT_MAX, FLAGS, .unit = "primaries" },
+ { "bt709", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT709}, 0, 0, FLAGS, .unit = "primaries" },
+ { "bt2020", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_BT2020}, 0, 0, FLAGS, .unit = "primaries" },
+ { "range", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "r", "set color range", OFFSET(range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_MPEG}, -1, INT_MAX, FLAGS, .unit = "range" },
+ { "tv", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "pc", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "limited", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "full", 0, 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_JPEG}, 0, 0, FLAGS, .unit = "range" },
+ { "format", "output format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+ { "param", "tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
+ { "desat", "desaturation strength", OFFSET(desat), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
@ -2724,13 +2726,6 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+ },
+};
+
+static const AVFilterPad tonemapx_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+};
+
+AVFilter ff_vf_tonemapx = {
+ .name = "tonemapx",
+ .description = NULL_IF_CONFIG_SMALL("SIMD optimized HDR to SDR tonemapping"),
@ -2739,7 +2734,7 @@ Index: FFmpeg/libavfilter/vf_tonemapx.c
+ .priv_size = sizeof(TonemapxContext),
+ .priv_class = &tonemapx_class,
+ FILTER_INPUTS(tonemapx_inputs),
+ FILTER_OUTPUTS(tonemapx_outputs),
+ FILTER_OUTPUTS(ff_video_default_filterpad),
+ FILTER_QUERY_FUNC(query_formats),
+ .flags = AVFILTER_FLAG_SLICE_THREADS,
+};
@ -2846,20 +2841,20 @@ Index: FFmpeg/libavfilter/vf_tonemapx.h
+ int width, int height,
+ const struct TonemapIntParams *params);
+
+#endif //AVFILTER_TONEMAPX_H
+#endif // AVFILTER_TONEMAPX_H
Index: FFmpeg/libavfilter/x86/Makefile
===================================================================
--- FFmpeg.orig/libavfilter/x86/Makefile
+++ FFmpeg/libavfilter/x86/Makefile
@@ -39,6 +39,8 @@ OBJS-$(CONFIG_VOLUME_FILTER)
OBJS-$(CONFIG_V360_FILTER) += x86/vf_v360_init.o
OBJS-$(CONFIG_W3FDIF_FILTER) += x86/vf_w3fdif_init.o
OBJS-$(CONFIG_YADIF_FILTER) += x86/vf_yadif_init.o
@@ -34,6 +34,8 @@ OBJS-$(CONFIG_STEREO3D_FILTER)
OBJS-$(CONFIG_TBLEND_FILTER) += x86/vf_blend_init.o
OBJS-$(CONFIG_THRESHOLD_FILTER) += x86/vf_threshold_init.o
OBJS-$(CONFIG_TINTERLACE_FILTER) += x86/vf_tinterlace_init.o
+OBJS-$(CONFIG_TONEMAPX_FILTER) += x86/vf_tonemapx_intrin_sse.o \
+ x86/vf_tonemapx_intrin_avx.o
X86ASM-OBJS-$(CONFIG_SCENE_SAD) += x86/scene_sad.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += x86/vf_transpose_init.o
OBJS-$(CONFIG_VOLUME_FILTER) += x86/af_volume_init.o
OBJS-$(CONFIG_V360_FILTER) += x86/vf_v360_init.o
Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_avx.c
===================================================================
--- /dev/null
@ -4257,8 +4252,8 @@ Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_avx.h
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_TONEMAPX_INTRIN_AVX_H
+#define AVFILTER_TONEMAPX_INTRIN_AVX_H
+#ifndef AVFILTER_X86_TONEMAPX_INTRIN_AVX_H
+#define AVFILTER_X86_TONEMAPX_INTRIN_AVX_H
+
+#include "libavfilter/vf_tonemapx.h"
+
@ -4290,7 +4285,7 @@ Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_avx.h
+ int width, int height,
+ const struct TonemapIntParams *params);
+
+#endif // AVFILTER_TONEMAPX_INTRIN_AVX_H
+#endif // AVFILTER_X86_TONEMAPX_INTRIN_AVX_H
Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_sse.c
===================================================================
--- /dev/null
@ -5680,8 +5675,8 @@ Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_sse.h
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_TONEMAPX_INTRIN_SSE_H
+#define AVFILTER_TONEMAPX_INTRIN_SSE_H
+#ifndef AVFILTER_X86_TONEMAPX_INTRIN_SSE_H
+#define AVFILTER_X86_TONEMAPX_INTRIN_SSE_H
+
+#include "libavfilter/vf_tonemapx.h"
+
@ -5713,4 +5708,4 @@ Index: FFmpeg/libavfilter/x86/vf_tonemapx_intrin_sse.h
+ int width, int height,
+ const struct TonemapIntParams *params);
+
+#endif // AVFILTER_TONEMAPX_INTRIN_SSE_H
+#endif // AVFILTER_X86_TONEMAPX_INTRIN_SSE_H

View File

@ -1,21 +1,8 @@
Index: FFmpeg/libavcodec/kbdwin.h
===================================================================
--- FFmpeg.orig/libavcodec/kbdwin.h
+++ FFmpeg/libavcodec/kbdwin.h
@@ -24,7 +24,7 @@
/**
* Maximum window size for ff_kbd_window_init.
*/
-#define FF_KBD_WINDOW_MAX 1024
+#define FF_KBD_WINDOW_MAX 2048
/**
* Generate a Kaiser-Bessel Derived Window.
Index: FFmpeg/libavcodec/Makefile
===================================================================
--- FFmpeg.orig/libavcodec/Makefile
+++ FFmpeg/libavcodec/Makefile
@@ -201,6 +201,7 @@ OBJS-$(CONFIG_AC3_ENCODER) +
@@ -203,6 +203,7 @@ OBJS-$(CONFIG_AC3_ENCODER) +
ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_MF_ENCODER) += mfenc.o mf_utils.o
@ -23,23 +10,11 @@ Index: FFmpeg/libavcodec/Makefile
OBJS-$(CONFIG_ACELP_KELVIN_DECODER) += g729dec.o lsp.o celp_math.o celp_filters.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_AGM_DECODER) += agm.o jpegquanttables.o
OBJS-$(CONFIG_AIC_DECODER) += aic.o
Index: FFmpeg/libavcodec/allcodecs.c
===================================================================
--- FFmpeg.orig/libavcodec/allcodecs.c
+++ FFmpeg/libavcodec/allcodecs.c
@@ -434,6 +434,7 @@ extern const FFCodec ff_ac3_encoder;
extern const FFCodec ff_ac3_decoder;
extern const FFCodec ff_ac3_fixed_encoder;
extern const FFCodec ff_ac3_fixed_decoder;
+extern const FFCodec ff_ac4_decoder;
extern const FFCodec ff_acelp_kelvin_decoder;
extern const FFCodec ff_alac_encoder;
extern const FFCodec ff_alac_decoder;
Index: FFmpeg/libavcodec/ac4dec.c
===================================================================
--- /dev/null
+++ FFmpeg/libavcodec/ac4dec.c
@@ -0,0 +1,5900 @@
@@ -0,0 +1,5924 @@
+/*
+ * AC-4 Audio Decoder
+ *
@ -604,6 +579,23 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ { 0 },
+};
+
+#define VLC_INIT_CUSTOM_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, flags, static_size) \
+ do { \
+ static VLCElem table[static_size]; \
+ (vlc)->table = table; \
+ (vlc)->table_allocated = static_size; \
+ ff_vlc_init_sparse(vlc, bits, a, b, c, d, e, f, g, h, i, j, \
+ flags | VLC_INIT_USE_STATIC); \
+ } while (0)
+
+#define VLC_INIT_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
+ VLC_INIT_CUSTOM_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, 0, static_size)
+
+#define VLC_INIT_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
+ VLC_INIT_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
+
+static VLC channel_mode_vlc;
+static VLC bitrate_indicator_vlc;
+static VLC scale_factors_vlc;
@ -626,137 +618,137 @@ Index: FFmpeg/libavcodec/ac4dec.c
+
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+
+ INIT_VLC_STATIC(&channel_mode_vlc, 9, sizeof(channel_mode_bits),
+ VLC_INIT_STATIC(&channel_mode_vlc, 9, sizeof(channel_mode_bits),
+ channel_mode_bits, 1, 1, channel_mode_codes, 2, 2, 512);
+ INIT_VLC_STATIC(&bitrate_indicator_vlc, 5, sizeof(bitrate_indicator_bits),
+ VLC_INIT_STATIC(&bitrate_indicator_vlc, 5, sizeof(bitrate_indicator_bits),
+ bitrate_indicator_bits, 1, 1, bitrate_indicator_codes, 1, 1, 32);
+ INIT_VLC_STATIC(&scale_factors_vlc, 9, sizeof(scale_factors_bits),
+ VLC_INIT_STATIC(&scale_factors_vlc, 9, sizeof(scale_factors_bits),
+ scale_factors_bits, 1, 1, scale_factors_codes, 1, 1, 850);
+ INIT_VLC_STATIC(&snf_vlc, 6, sizeof(snf_bits),
+ VLC_INIT_STATIC(&snf_vlc, 6, sizeof(snf_bits),
+ snf_bits, 1, 1, snf_codes, 1, 1, 70);
+
+ INIT_VLC_STATIC(&asf_codebook_vlc[0], 9, sizeof(asf_codebook_1_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[0], 9, sizeof(asf_codebook_1_bits),
+ asf_codebook_1_bits, 1, 1, asf_codebook_1_codes, 1, 1, 542);
+ INIT_VLC_STATIC(&asf_codebook_vlc[1], 9, sizeof(asf_codebook_2_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[1], 9, sizeof(asf_codebook_2_bits),
+ asf_codebook_2_bits, 1, 1, asf_codebook_2_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&asf_codebook_vlc[2], 9, sizeof(asf_codebook_3_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[2], 9, sizeof(asf_codebook_3_bits),
+ asf_codebook_3_bits, 1, 1, asf_codebook_3_codes, 1, 1, 612);
+ INIT_VLC_STATIC(&asf_codebook_vlc[3], 9, sizeof(asf_codebook_4_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[3], 9, sizeof(asf_codebook_4_bits),
+ asf_codebook_4_bits, 1, 1, asf_codebook_4_codes, 1, 1, 544);
+ INIT_VLC_STATIC(&asf_codebook_vlc[4], 9, sizeof(asf_codebook_5_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[4], 9, sizeof(asf_codebook_5_bits),
+ asf_codebook_5_bits, 1, 1, asf_codebook_5_codes, 1, 1, 576);
+ INIT_VLC_STATIC(&asf_codebook_vlc[5], 9, sizeof(asf_codebook_6_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[5], 9, sizeof(asf_codebook_6_bits),
+ asf_codebook_6_bits, 1, 1, asf_codebook_6_codes, 1, 1, 546);
+ INIT_VLC_STATIC(&asf_codebook_vlc[6], 9, sizeof(asf_codebook_7_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[6], 9, sizeof(asf_codebook_7_bits),
+ asf_codebook_7_bits, 1, 1, asf_codebook_7_codes, 1, 1, 542);
+ INIT_VLC_STATIC(&asf_codebook_vlc[7], 9, sizeof(asf_codebook_8_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[7], 9, sizeof(asf_codebook_8_bits),
+ asf_codebook_8_bits, 1, 1, asf_codebook_8_codes, 1, 1, 522);
+ INIT_VLC_STATIC(&asf_codebook_vlc[8], 9, sizeof(asf_codebook_9_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[8], 9, sizeof(asf_codebook_9_bits),
+ asf_codebook_9_bits, 1, 1, asf_codebook_9_codes, 1, 1, 670);
+ INIT_VLC_STATIC(&asf_codebook_vlc[9], 9, sizeof(asf_codebook_10_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[9], 9, sizeof(asf_codebook_10_bits),
+ asf_codebook_10_bits, 1, 1, asf_codebook_10_codes, 1, 1, 604);
+ INIT_VLC_STATIC(&asf_codebook_vlc[10], 9, sizeof(asf_codebook_11_bits),
+ VLC_INIT_STATIC(&asf_codebook_vlc[10], 9, sizeof(asf_codebook_11_bits),
+ asf_codebook_11_bits, 1, 1, asf_codebook_11_codes, 1, 1, 674);
+
+ INIT_VLC_STATIC(&aspx_int_class_vlc, 5, sizeof(aspx_int_class_bits),
+ VLC_INIT_STATIC(&aspx_int_class_vlc, 5, sizeof(aspx_int_class_bits),
+ aspx_int_class_bits, 1, 1, aspx_int_class_codes, 1, 1, 32);
+
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][0][0], 9, sizeof(aspx_hcb_env_level_15_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][0][0], 9, sizeof(aspx_hcb_env_level_15_f0_bits),
+ aspx_hcb_env_level_15_f0_bits, 1, 1, aspx_hcb_env_level_15_f0_codes, 4, 4, 1024);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][0][1], 9, sizeof(aspx_hcb_env_level_15_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][0][1], 9, sizeof(aspx_hcb_env_level_15_df_bits),
+ aspx_hcb_env_level_15_df_bits, 1, 1, aspx_hcb_env_level_15_df_codes, 4, 4, 1888);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][0][2], 9, sizeof(aspx_hcb_env_level_15_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][0][2], 9, sizeof(aspx_hcb_env_level_15_dt_bits),
+ aspx_hcb_env_level_15_dt_bits, 1, 1, aspx_hcb_env_level_15_dt_codes, 4, 4, 1368);
+
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][1][0], 9, sizeof(aspx_hcb_env_level_30_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][1][0], 9, sizeof(aspx_hcb_env_level_30_f0_bits),
+ aspx_hcb_env_level_30_f0_bits, 1, 1, aspx_hcb_env_level_30_f0_codes, 4, 4, 772);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][1][1], 9, sizeof(aspx_hcb_env_level_30_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][1][1], 9, sizeof(aspx_hcb_env_level_30_df_bits),
+ aspx_hcb_env_level_30_df_bits, 1, 1, aspx_hcb_env_level_30_df_codes, 4, 4, 1624);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[0][1][2], 9, sizeof(aspx_hcb_env_level_30_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[0][1][2], 9, sizeof(aspx_hcb_env_level_30_dt_bits),
+ aspx_hcb_env_level_30_dt_bits, 1, 1, aspx_hcb_env_level_30_dt_codes, 4, 4, 1598);
+
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][0][0], 9, sizeof(aspx_hcb_env_balance_15_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][0][0], 9, sizeof(aspx_hcb_env_balance_15_f0_bits),
+ aspx_hcb_env_balance_15_f0_bits, 1, 1, aspx_hcb_env_balance_15_f0_codes, 4, 4, 644);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][0][1], 9, sizeof(aspx_hcb_env_balance_15_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][0][1], 9, sizeof(aspx_hcb_env_balance_15_df_bits),
+ aspx_hcb_env_balance_15_df_bits, 1, 1, aspx_hcb_env_balance_15_df_codes, 4, 4, 1056);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][0][2], 9, sizeof(aspx_hcb_env_balance_15_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][0][2], 9, sizeof(aspx_hcb_env_balance_15_dt_bits),
+ aspx_hcb_env_balance_15_dt_bits, 1, 1, aspx_hcb_env_balance_15_dt_codes, 4, 4, 616);
+
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][1][0], 9, sizeof(aspx_hcb_env_balance_30_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][1][0], 9, sizeof(aspx_hcb_env_balance_30_f0_bits),
+ aspx_hcb_env_balance_30_f0_bits, 1, 1, aspx_hcb_env_balance_30_f0_codes, 2, 2, 520);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][1][1], 9, sizeof(aspx_hcb_env_balance_30_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][1][1], 9, sizeof(aspx_hcb_env_balance_30_df_bits),
+ aspx_hcb_env_balance_30_df_bits, 1, 1, aspx_hcb_env_balance_30_df_codes, 4, 4, 768);
+ INIT_VLC_STATIC(&aspx_codebook_signal_vlc[1][1][2], 9, sizeof(aspx_hcb_env_balance_30_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_signal_vlc[1][1][2], 9, sizeof(aspx_hcb_env_balance_30_dt_bits),
+ aspx_hcb_env_balance_30_dt_bits, 1, 1, aspx_hcb_env_balance_30_dt_codes, 2, 2, 576);
+
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[0][0], 9, sizeof(aspx_hcb_noise_level_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[0][0], 9, sizeof(aspx_hcb_noise_level_f0_bits),
+ aspx_hcb_noise_level_f0_bits, 1, 1, aspx_hcb_noise_level_f0_codes, 2, 2, 672);
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[0][1], 9, sizeof(aspx_hcb_noise_level_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[0][1], 9, sizeof(aspx_hcb_noise_level_df_bits),
+ aspx_hcb_noise_level_df_bits, 1, 1, aspx_hcb_noise_level_df_codes, 4, 4, 1024);
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[0][2], 9, sizeof(aspx_hcb_noise_level_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[0][2], 9, sizeof(aspx_hcb_noise_level_dt_bits),
+ aspx_hcb_noise_level_dt_bits, 1, 1, aspx_hcb_noise_level_dt_codes, 2, 2, 768);
+
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[1][0], 9, sizeof(aspx_hcb_noise_balance_f0_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[1][0], 9, sizeof(aspx_hcb_noise_balance_f0_bits),
+ aspx_hcb_noise_balance_f0_bits, 1, 1, aspx_hcb_noise_balance_f0_codes, 2, 2, 516);
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[1][1], 9, sizeof(aspx_hcb_noise_balance_df_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[1][1], 9, sizeof(aspx_hcb_noise_balance_df_bits),
+ aspx_hcb_noise_balance_df_bits, 1, 1, aspx_hcb_noise_balance_df_codes, 2, 2, 536);
+ INIT_VLC_STATIC(&aspx_codebook_noise_vlc[1][2], 9, sizeof(aspx_hcb_noise_balance_dt_bits),
+ VLC_INIT_STATIC(&aspx_codebook_noise_vlc[1][2], 9, sizeof(aspx_hcb_noise_balance_dt_bits),
+ aspx_hcb_noise_balance_dt_bits, 1, 1, aspx_hcb_noise_balance_dt_codes, 2, 2, 530);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][1][0], 9, sizeof(acpl_hcb_alpha_coarse_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][1][0], 9, sizeof(acpl_hcb_alpha_coarse_f0_bits),
+ acpl_hcb_alpha_coarse_f0_bits, 1, 1, acpl_hcb_alpha_coarse_f0_codes, 2, 2, 516);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][1][1], 9, sizeof(acpl_hcb_alpha_coarse_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][1][1], 9, sizeof(acpl_hcb_alpha_coarse_df_bits),
+ acpl_hcb_alpha_coarse_df_bits, 1, 1, acpl_hcb_alpha_coarse_df_codes, 4, 4, 1032);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][1][2], 9, sizeof(acpl_hcb_alpha_coarse_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][1][2], 9, sizeof(acpl_hcb_alpha_coarse_dt_bits),
+ acpl_hcb_alpha_coarse_dt_bits, 1, 1, acpl_hcb_alpha_coarse_dt_codes, 4, 4, 642);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][0][0], 9, sizeof(acpl_hcb_alpha_fine_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][0][0], 9, sizeof(acpl_hcb_alpha_fine_f0_bits),
+ acpl_hcb_alpha_fine_f0_bits, 1, 1, acpl_hcb_alpha_fine_f0_codes, 2, 2, 530);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][0][1], 9, sizeof(acpl_hcb_alpha_fine_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][0][1], 9, sizeof(acpl_hcb_alpha_fine_df_bits),
+ acpl_hcb_alpha_fine_df_bits, 1, 1, acpl_hcb_alpha_fine_df_codes, 4, 4, 1176);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[0][0][2], 9, sizeof(acpl_hcb_alpha_fine_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[0][0][2], 9, sizeof(acpl_hcb_alpha_fine_dt_bits),
+ acpl_hcb_alpha_fine_dt_bits, 1, 1, acpl_hcb_alpha_fine_dt_codes, 4, 4, 1158);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][1][0], 9, sizeof(acpl_hcb_beta_coarse_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][1][0], 9, sizeof(acpl_hcb_beta_coarse_f0_bits),
+ acpl_hcb_beta_coarse_f0_bits, 1, 1, acpl_hcb_beta_coarse_f0_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][1][1], 9, sizeof(acpl_hcb_beta_coarse_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][1][1], 9, sizeof(acpl_hcb_beta_coarse_df_bits),
+ acpl_hcb_beta_coarse_df_bits, 1, 1, acpl_hcb_beta_coarse_df_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][1][2], 9, sizeof(acpl_hcb_beta_coarse_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][1][2], 9, sizeof(acpl_hcb_beta_coarse_dt_bits),
+ acpl_hcb_beta_coarse_dt_bits, 1, 1, acpl_hcb_beta_coarse_dt_codes, 1, 1, 512);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][0][0], 9, sizeof(acpl_hcb_beta_fine_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][0][0], 9, sizeof(acpl_hcb_beta_fine_f0_bits),
+ acpl_hcb_beta_fine_f0_bits, 1, 1, acpl_hcb_beta_fine_f0_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][0][1], 9, sizeof(acpl_hcb_beta_fine_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][0][1], 9, sizeof(acpl_hcb_beta_fine_df_bits),
+ acpl_hcb_beta_fine_df_bits, 1, 1, acpl_hcb_beta_fine_df_codes, 4, 4, 528);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[1][0][2], 9, sizeof(acpl_hcb_beta_fine_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[1][0][2], 9, sizeof(acpl_hcb_beta_fine_dt_bits),
+ acpl_hcb_beta_fine_dt_bits, 1, 1, acpl_hcb_beta_fine_dt_codes, 4, 4, 576);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][1][0], 9, sizeof(acpl_hcb_beta3_coarse_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][1][0], 9, sizeof(acpl_hcb_beta3_coarse_f0_bits),
+ acpl_hcb_beta3_coarse_f0_bits, 1, 1, acpl_hcb_beta3_coarse_f0_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][1][1], 9, sizeof(acpl_hcb_beta3_coarse_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][1][1], 9, sizeof(acpl_hcb_beta3_coarse_df_bits),
+ acpl_hcb_beta3_coarse_df_bits, 1, 1, acpl_hcb_beta3_coarse_df_codes, 4, 4, 528);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][1][2], 9, sizeof(acpl_hcb_beta3_coarse_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][1][2], 9, sizeof(acpl_hcb_beta3_coarse_dt_bits),
+ acpl_hcb_beta3_coarse_dt_bits, 1, 1, acpl_hcb_beta3_coarse_dt_codes, 2, 2, 576);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][0][0], 9, sizeof(acpl_hcb_beta3_fine_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][0][0], 9, sizeof(acpl_hcb_beta3_fine_f0_bits),
+ acpl_hcb_beta3_fine_f0_bits, 1, 1, acpl_hcb_beta3_fine_f0_codes, 1, 1, 512);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][0][1], 9, sizeof(acpl_hcb_beta3_fine_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][0][1], 9, sizeof(acpl_hcb_beta3_fine_df_bits),
+ acpl_hcb_beta3_fine_df_bits, 1, 1, acpl_hcb_beta3_fine_df_codes, 4, 4, 580);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[2][0][2], 9, sizeof(acpl_hcb_beta3_fine_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[2][0][2], 9, sizeof(acpl_hcb_beta3_fine_dt_bits),
+ acpl_hcb_beta3_fine_dt_bits, 1, 1, acpl_hcb_beta3_fine_dt_codes, 4, 4, 768);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][1][0], 9, sizeof(acpl_hcb_gamma_coarse_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][1][0], 9, sizeof(acpl_hcb_gamma_coarse_f0_bits),
+ acpl_hcb_gamma_coarse_f0_bits, 1, 1, acpl_hcb_gamma_coarse_f0_codes, 2, 2, 528);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][1][1], 9, sizeof(acpl_hcb_gamma_coarse_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][1][1], 9, sizeof(acpl_hcb_gamma_coarse_df_bits),
+ acpl_hcb_gamma_coarse_df_bits, 1, 1, acpl_hcb_gamma_coarse_df_codes, 4, 4, 644);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][1][2], 9, sizeof(acpl_hcb_gamma_coarse_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][1][2], 9, sizeof(acpl_hcb_gamma_coarse_dt_bits),
+ acpl_hcb_gamma_coarse_dt_bits, 1, 1, acpl_hcb_gamma_coarse_dt_codes, 4, 4, 896);
+
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][0][0], 9, sizeof(acpl_hcb_gamma_fine_f0_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][0][0], 9, sizeof(acpl_hcb_gamma_fine_f0_bits),
+ acpl_hcb_gamma_fine_f0_bits, 1, 1, acpl_hcb_gamma_fine_f0_codes, 4, 4, 544);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][0][1], 9, sizeof(acpl_hcb_gamma_fine_df_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][0][1], 9, sizeof(acpl_hcb_gamma_fine_df_bits),
+ acpl_hcb_gamma_fine_df_bits, 1, 1, acpl_hcb_gamma_fine_df_codes, 4, 4, 1026);
+ INIT_VLC_STATIC(&acpl_codebook_vlc[3][0][2], 9, sizeof(acpl_hcb_gamma_fine_dt_bits),
+ VLC_INIT_STATIC(&acpl_codebook_vlc[3][0][2], 9, sizeof(acpl_hcb_gamma_fine_dt_bits),
+ acpl_hcb_gamma_fine_dt_bits, 1, 1, acpl_hcb_gamma_fine_dt_codes, 4, 4, 1792);
+
+ for (int j = 0; j < 8; j++) {
@ -1875,6 +1867,7 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ return 0;
+}
+
+#if 0
+static int num_sfb_96(int transf_length)
+{
+ if (transf_length >= 4096)
@ -1908,6 +1901,7 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ else
+ return 18;
+}
+#endif
+
+static int num_sfb_48(int transf_length)
+{
@ -2198,6 +2192,7 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ return 0;
+}
+
+#if 0
+static int32_t ac_decode(AC4DecodeContext *s, uint32_t cdf_low,
+ uint32_t cdf_high,
+ ACState *acs)
@ -2232,6 +2227,7 @@ Index: FFmpeg/libavcodec/ac4dec.c
+
+ return 0;
+}
+#endif
+
+static int32_t ac_decode_finish(ACState *acs)
+
@ -5882,7 +5878,10 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ for (int ch = 0; ch < avctx->ch_layout.nb_channels; ch++)
+ decode_channel(s, ch, (float *)frame->extended_data[ch]);
+
+ frame->key_frame = s->iframe_global;
+ if (s->iframe_global)
+ frame->flags |= AV_FRAME_FLAG_KEY;
+ else
+ frame->flags &= ~AV_FRAME_FLAG_KEY;
+
+ *got_frame_ptr = 1;
+
@ -5915,7 +5914,7 @@ Index: FFmpeg/libavcodec/ac4dec.c
+
+static const AVOption options[] = {
+ { "presentation", "select presentation", OFFSET(target_presentation), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, FLAGS },
+ { NULL},
+ { NULL },
+};
+
+static const AVClass ac4_decoder_class = {
@ -5938,13 +5937,13 @@ Index: FFmpeg/libavcodec/ac4dec.c
+ .flush = ac4_flush,
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE },
+ AV_SAMPLE_FMT_NONE },
+};
Index: FFmpeg/libavcodec/ac4dec_data.h
===================================================================
--- /dev/null
+++ FFmpeg/libavcodec/ac4dec_data.h
@@ -0,0 +1,1665 @@
@@ -0,0 +1,1664 @@
+/*
+ * AC-4 Audio Decoder
+ *
@ -5968,8 +5967,7 @@ Index: FFmpeg/libavcodec/ac4dec_data.h
+#ifndef AVCODEC_AC4DEC_DATA_H
+#define AVCODEC_AC4DEC_DATA_H
+
+#include <stdint.h>
+#include <libavutil/mem.h>
+#include "libavutil/mem_internal.h"
+
+static const uint8_t aspx_hcb_env_level_15_f0_bits[71] = {
+ 7, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6,
@ -7609,200 +7607,37 @@ Index: FFmpeg/libavcodec/ac4dec_data.h
+ { 0.998266, 0.0588631}, {-0.145619, 0.989341}
+};
+
+#endif /* AVCODEC_AC4DECDATA_H */
Index: FFmpeg/libavcodec/codec_desc.c
+#endif /* AVCODEC_AC4DEC_DATA_H */
Index: FFmpeg/libavcodec/allcodecs.c
===================================================================
--- FFmpeg.orig/libavcodec/codec_desc.c
+++ FFmpeg/libavcodec/codec_desc.c
@@ -3369,6 +3369,13 @@ static const AVCodecDescriptor codec_des
.long_name = NULL_IF_CONFIG_SMALL("RKA (RK Audio)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
},
+ {
+ .id = AV_CODEC_ID_AC4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ac4",
+ .long_name = NULL_IF_CONFIG_SMALL("AC-4"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
--- FFmpeg.orig/libavcodec/allcodecs.c
+++ FFmpeg/libavcodec/allcodecs.c
@@ -431,6 +431,7 @@ extern const FFCodec ff_ac3_encoder;
extern const FFCodec ff_ac3_decoder;
extern const FFCodec ff_ac3_fixed_encoder;
extern const FFCodec ff_ac3_fixed_decoder;
+extern const FFCodec ff_ac4_decoder;
extern const FFCodec ff_acelp_kelvin_decoder;
extern const FFCodec ff_alac_encoder;
extern const FFCodec ff_alac_decoder;
Index: FFmpeg/libavcodec/kbdwin.h
===================================================================
--- FFmpeg.orig/libavcodec/kbdwin.h
+++ FFmpeg/libavcodec/kbdwin.h
@@ -24,7 +24,7 @@
/**
* Maximum window size for ff_kbd_window_init.
*/
-#define FF_KBD_WINDOW_MAX 1024
+#define FF_KBD_WINDOW_MAX 2048
/* subtitle codecs */
{
Index: FFmpeg/libavcodec/codec_id.h
===================================================================
--- FFmpeg.orig/libavcodec/codec_id.h
+++ FFmpeg/libavcodec/codec_id.h
@@ -538,6 +538,7 @@ enum AVCodecID {
AV_CODEC_ID_FTR,
AV_CODEC_ID_WAVARC,
AV_CODEC_ID_RKA,
+ AV_CODEC_ID_AC4,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
Index: FFmpeg/libavformat/Makefile
===================================================================
--- FFmpeg.orig/libavformat/Makefile
+++ FFmpeg/libavformat/Makefile
@@ -76,6 +76,7 @@ OBJS-$(CONFIG_AA_DEMUXER)
OBJS-$(CONFIG_AAC_DEMUXER) += aacdec.o apetag.o img2.o rawdec.o
OBJS-$(CONFIG_AAX_DEMUXER) += aaxdec.o
OBJS-$(CONFIG_AC3_DEMUXER) += ac3dec.o rawdec.o
+OBJS-$(CONFIG_AC4_DEMUXER) += ac4dec.o
OBJS-$(CONFIG_AC3_MUXER) += rawenc.o
OBJS-$(CONFIG_ACE_DEMUXER) += acedec.o
OBJS-$(CONFIG_ACM_DEMUXER) += acm.o rawdec.o
Index: FFmpeg/libavformat/ac4dec.c
===================================================================
--- /dev/null
+++ FFmpeg/libavformat/ac4dec.c
@@ -0,0 +1,105 @@
+/*
+ * RAW AC-4 demuxer
+ * Copyright (c) 2019 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/crc.h"
+#include "avformat.h"
+#include "demux.h"
+#include "rawdec.h"
+
+static int ac4_probe(const AVProbeData *p)
+{
+ const uint8_t *buf = p->buf;
+ int left = p->buf_size;
+ int max_frames = 0;
+
+ while (left > 7) {
+ int size;
+
+ if (buf[0] == 0xAC &&
+ (buf[1] == 0x40 ||
+ buf[1] == 0x41)) {
+ size = (buf[2] << 8) | buf[3];
+ if (size == 0xFFFF)
+ size = 3 + ((buf[4] << 16) | (buf[5] << 8) | buf[6]);
+ size += 4;
+ if (buf[1] == 0x41)
+ size += 2;
+ max_frames++;
+ left -= size;
+ buf += size;
+ } else {
+ break;
+ }
+ }
+
+ return FFMIN(AVPROBE_SCORE_MAX, max_frames * 7);
+}
+
+static int ac4_read_header(AVFormatContext *s)
+{
+ AVStream *st;
+
+ st = avformat_new_stream(s, NULL);
+ if (!st)
+ return AVERROR(ENOMEM);
+
+ st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
+ st->codecpar->codec_id = AV_CODEC_ID_AC4;
+
+ return 0;
+}
+
+static int ac4_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVIOContext *pb = s->pb;
+ int64_t pos;
+ uint16_t sync;
+ int ret, size;
+
+ if (avio_feof(s->pb))
+ return AVERROR_EOF;
+
+ pos = avio_tell(s->pb);
+ sync = avio_rb16(pb);
+ size = avio_rb16(pb);
+ if (size == 0xffff)
+ size = avio_rb24(pb);
+
+ ret = av_get_packet(pb, pkt, size);
+ pkt->pos = pos;
+ pkt->stream_index = 0;
+
+ if (sync == 0xAC41)
+ avio_skip(pb, 2);
+
+ return ret;
+}
+
+AVInputFormat ff_ac4_demuxer = {
+ .name = "ac4",
+ .long_name = NULL_IF_CONFIG_SMALL("raw AC-4"),
+ .read_probe = ac4_probe,
+ .read_header = ac4_read_header,
+ .read_packet = ac4_read_packet,
+ .flags = AVFMT_GENERIC_INDEX,
+ .extensions = "ac4",
+};
Index: FFmpeg/libavformat/allformats.c
===================================================================
--- FFmpeg.orig/libavformat/allformats.c
+++ FFmpeg/libavformat/allformats.c
@@ -34,6 +34,7 @@ extern const AVInputFormat ff_aac_demux
extern const AVInputFormat ff_aax_demuxer;
extern const AVInputFormat ff_ac3_demuxer;
extern const FFOutputFormat ff_ac3_muxer;
+extern const AVInputFormat ff_ac4_demuxer;
extern const AVInputFormat ff_ace_demuxer;
extern const AVInputFormat ff_acm_demuxer;
extern const AVInputFormat ff_act_demuxer;
Index: FFmpeg/libavformat/isom_tags.c
===================================================================
--- FFmpeg.orig/libavformat/isom_tags.c
+++ FFmpeg/libavformat/isom_tags.c
@@ -298,6 +298,7 @@ const AVCodecTag ff_codec_movaudio_tags[
{ AV_CODEC_ID_DTS, MKTAG('d', 't', 's', 'e') }, /* DTS Express */
{ AV_CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non-standard */
{ AV_CODEC_ID_EAC3, MKTAG('e', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F (only valid in ISOBMFF) */
+ { AV_CODEC_ID_AC4, MKTAG('a', 'c', '-', '4') },
{ AV_CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
{ AV_CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
{ AV_CODEC_ID_GSM, MKTAG('a', 'g', 's', 'm') },
Index: FFmpeg/libavformat/mpegts.c
===================================================================
--- FFmpeg.orig/libavformat/mpegts.c
+++ FFmpeg/libavformat/mpegts.c
@@ -859,6 +859,7 @@ static const StreamType HLS_SAMPLE_ENC_t
static const StreamType REGD_types[] = {
{ MKTAG('d', 'r', 'a', 'c'), AVMEDIA_TYPE_VIDEO, AV_CODEC_ID_DIRAC },
{ MKTAG('A', 'C', '-', '3'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC3 },
+ { MKTAG('A', 'C', '-', '4'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_AC4 },
{ MKTAG('B', 'S', 'S', 'D'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_S302M },
{ MKTAG('D', 'T', 'S', '1'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
{ MKTAG('D', 'T', 'S', '2'), AVMEDIA_TYPE_AUDIO, AV_CODEC_ID_DTS },
/**
* Generate a Kaiser-Bessel Derived Window.
Index: FFmpeg/libavcodec/utils.c
===================================================================
--- FFmpeg.orig/libavcodec/utils.c
+++ FFmpeg/libavcodec/utils.c
@@ -634,7 +634,8 @@ static int get_audio_frame_duration(enum
@@ -598,7 +598,8 @@ static int get_audio_frame_duration(enum
case AV_CODEC_ID_ATRAC3P: return 2048;
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MUSEPACK7: return 1152;
@ -7812,3 +7647,15 @@ Index: FFmpeg/libavcodec/utils.c
case AV_CODEC_ID_FTR: return 1024;
}
Index: FFmpeg/libavformat/isom_tags.c
===================================================================
--- FFmpeg.orig/libavformat/isom_tags.c
+++ FFmpeg/libavformat/isom_tags.c
@@ -308,6 +308,7 @@ const AVCodecTag ff_codec_movaudio_tags[
{ AV_CODEC_ID_DTS, MKTAG('d', 't', 's', 'e') }, /* DTS Express */
{ AV_CODEC_ID_DTS, MKTAG('D', 'T', 'S', ' ') }, /* non-standard */
{ AV_CODEC_ID_EAC3, MKTAG('e', 'c', '-', '3') }, /* ETSI TS 102 366 Annex F (only valid in ISOBMFF) */
+ { AV_CODEC_ID_AC4, MKTAG('a', 'c', '-', '4') },
{ AV_CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
{ AV_CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
{ AV_CODEC_ID_GSM, MKTAG('a', 'g', 's', 'm') },

View File

@ -1,386 +0,0 @@
Subject: [PATCH] avfilter: add scale_vt for videotoolbox pix_fmt
---
Index: configure
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/configure b/configure
--- a/configure (revision 6c90fcd9e606c1222271f5f867f761abeafbe51e)
+++ b/configure (revision c2c96c4c2419859c4d1b11e6f907e58afb6dfa3c)
@@ -3833,6 +3833,7 @@
zoompan_filter_deps="swscale"
zscale_filter_deps="libzimg const_nan"
scale_vaapi_filter_deps="vaapi"
+scale_vt_filter_deps="videotoolbox"
scale_vulkan_filter_deps="vulkan spirv_compiler"
vpp_qsv_filter_deps="libmfx"
vpp_qsv_filter_select="qsvvpp"
Index: doc/filters.texi
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/doc/filters.texi b/doc/filters.texi
--- a/doc/filters.texi (revision 6c90fcd9e606c1222271f5f867f761abeafbe51e)
+++ b/doc/filters.texi (revision c2c96c4c2419859c4d1b11e6f907e58afb6dfa3c)
@@ -21184,6 +21184,27 @@
@end example
@end itemize
+@section scale_vt
+
+Scale and convert the color parameters using VTPixelTransferSession.
+
+The filter accepts the following options:
+@table @option
+@item w
+@item h
+Set the output video dimension expression. Default value is the input dimension.
+
+@item color_matrix
+Set the output colorspace matrix.
+
+@item color_primaries
+Set the output color primaries.
+
+@item color_transfer
+Set the output transfer characteristics.
+
+@end table
+
@section scharr
Apply scharr operator to input video stream.
Index: libavfilter/Makefile
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
--- a/libavfilter/Makefile (revision 393b03d3db8a085d3b56631b334d474da390cab1)
+++ b/libavfilter/Makefile (date 1708782013235)
@@ -452,6 +452,7 @@
OBJS-$(CONFIG_SCALE_OPENCL_FILTER) += vf_scale_opencl.o opencl.o opencl/scale.o scale_eval.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_vpp_qsv.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
+OBJS-$(CONFIG_SCALE_VT_FILTER) += vf_scale_vt.o scale_eval.o
OBJS-$(CONFIG_SCALE_VULKAN_FILTER) += vf_scale_vulkan.o vulkan.o vulkan_filter.o
OBJS-$(CONFIG_SCALE_RKRGA_FILTER) += vf_vpp_rkrga.o scale_eval.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale_eval.o
Index: libavfilter/allfilters.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
--- a/libavfilter/allfilters.c (revision 393b03d3db8a085d3b56631b334d474da390cab1)
+++ b/libavfilter/allfilters.c (date 1708782072996)
@@ -424,6 +424,7 @@
extern const AVFilter ff_vf_scale_opencl;
extern const AVFilter ff_vf_scale_qsv;
extern const AVFilter ff_vf_scale_vaapi;
+extern const AVFilter ff_vf_scale_vt;
extern const AVFilter ff_vf_scale_vulkan;
extern const AVFilter ff_vf_scale_rkrga;
extern const AVFilter ff_vf_scale2ref;
Index: libavfilter/vf_scale_vt.c
===================================================================
diff --git a/libavfilter/vf_scale_vt.c b/libavfilter/vf_scale_vt.c
new file mode 100644
--- /dev/null (revision c2c96c4c2419859c4d1b11e6f907e58afb6dfa3c)
+++ b/libavfilter/vf_scale_vt.c (revision c2c96c4c2419859c4d1b11e6f907e58afb6dfa3c)
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2023 Zhao Zhili <zhilizhao@tencent.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <VideoToolbox/VideoToolbox.h>
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_videotoolbox.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "scale_eval.h"
+
+typedef struct ScaleVtContext {
+ AVClass *class;
+
+ VTPixelTransferSessionRef transfer;
+ int output_width;
+ int output_height;
+ char *w_expr;
+ char *h_expr;
+
+ enum AVColorPrimaries colour_primaries;
+ enum AVColorTransferCharacteristic colour_transfer;
+ enum AVColorSpace colour_matrix;
+ enum AVPixelFormat format;
+ char *colour_primaries_string;
+ char *colour_transfer_string;
+ char *colour_matrix_string;
+} ScaleVtContext;
+
+static const enum AVPixelFormat supported_formats[] = {
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_NONE,
+};
+
+static int format_is_supported(enum AVPixelFormat fmt)
+{
+ for (int i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
+ if (supported_formats[i] == fmt)
+ return 1;
+ return 0;
+}
+
+static av_cold int scale_vt_init(AVFilterContext *avctx)
+{
+ ScaleVtContext *s = avctx->priv;
+ int ret;
+ CFStringRef value;
+
+ ret = VTPixelTransferSessionCreate(kCFAllocatorDefault, &s->transfer);
+ if (ret != noErr) {
+ av_log(avctx, AV_LOG_ERROR, "transfer session create failed, %d\n", ret);
+ return AVERROR_EXTERNAL;
+ }
+
+#define STRING_OPTION(var_name, func_name, default_value) \
+ do { \
+ if (s->var_name##_string) { \
+ int var = av_##func_name##_from_name(s->var_name##_string); \
+ if (var < 0) { \
+ av_log(avctx, AV_LOG_ERROR, "Invalid %s.\n", #var_name); \
+ return AVERROR(EINVAL); \
+ } \
+ s->var_name = var; \
+ } else { \
+ s->var_name = default_value; \
+ } \
+ } while (0)
+
+ STRING_OPTION(colour_primaries, color_primaries, AVCOL_PRI_UNSPECIFIED);
+ STRING_OPTION(colour_transfer, color_transfer, AVCOL_TRC_UNSPECIFIED);
+ STRING_OPTION(colour_matrix, color_space, AVCOL_SPC_UNSPECIFIED);
+
+ if (s->colour_primaries != AVCOL_PRI_UNSPECIFIED) {
+ value = av_map_videotoolbox_color_primaries_from_av(s->colour_primaries);
+ if (!value) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Doesn't support converting to colour primaries %s\n",
+ s->colour_primaries_string);
+ return AVERROR(ENOTSUP);
+ }
+ VTSessionSetProperty(s->transfer, kVTPixelTransferPropertyKey_DestinationColorPrimaries, value);
+ }
+
+ if (s->colour_transfer != AVCOL_TRC_UNSPECIFIED) {
+ value = av_map_videotoolbox_color_trc_from_av(s->colour_transfer);
+ if (!value) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Doesn't support converting to trc %s\n",
+ s->colour_transfer_string);
+ return AVERROR(ENOTSUP);
+ }
+ VTSessionSetProperty(s->transfer, kVTPixelTransferPropertyKey_DestinationTransferFunction, value);
+ }
+
+ if (s->colour_matrix != AVCOL_SPC_UNSPECIFIED) {
+ value = av_map_videotoolbox_color_matrix_from_av(s->colour_matrix);
+ if (!value) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Doesn't support converting to colorspace %s\n",
+ s->colour_matrix_string);
+ return AVERROR(ENOTSUP);
+ }
+ VTSessionSetProperty(s->transfer, kVTPixelTransferPropertyKey_DestinationYCbCrMatrix, value);
+ }
+
+ return 0;
+}
+
+static av_cold void scale_vt_uninit(AVFilterContext *avctx)
+{
+ ScaleVtContext *s = avctx->priv;
+
+ if (s->transfer) {
+ VTPixelTransferSessionInvalidate(s->transfer);
+ CFRelease(s->transfer);
+ s->transfer = NULL;
+ }
+}
+
+static int scale_vt_filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ int ret;
+ AVFilterContext *ctx = link->dst;
+ ScaleVtContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ CVPixelBufferRef src;
+ CVPixelBufferRef dst;
+
+ AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = av_frame_copy_props(out, in);
+ if (ret < 0)
+ goto fail;
+
+ av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
+ (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
+ (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
+ INT_MAX);
+ if (s->colour_primaries != AVCOL_PRI_UNSPECIFIED)
+ out->color_primaries = s->colour_primaries;
+ if (s->colour_transfer != AVCOL_TRC_UNSPECIFIED)
+ out->color_trc = s->colour_transfer;
+ if (s->colour_matrix != AVCOL_SPC_UNSPECIFIED)
+ out->colorspace = s->colour_matrix;
+
+ src = (CVPixelBufferRef)in->data[3];
+ dst = (CVPixelBufferRef)out->data[3];
+ ret = VTPixelTransferSessionTransferImage(s->transfer, src, dst);
+ if (ret != noErr) {
+ av_log(ctx, AV_LOG_ERROR, "transfer image failed, %d\n", ret);
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+
+fail:
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+}
+
+static int scale_vt_config_output(AVFilterLink *outlink)
+{
+ int err;
+ AVFilterContext *avctx = outlink->src;
+ ScaleVtContext *s = avctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ AVHWFramesContext *hw_frame_ctx_in;
+ AVHWFramesContext *hw_frame_ctx_out;
+ enum AVPixelFormat out_format;
+
+ err = ff_scale_eval_dimensions(s, s->w_expr, s->h_expr, inlink, outlink,
+ &s->output_width,
+ &s->output_height);
+ if (err < 0)
+ return err;
+
+ outlink->w = s->output_width;
+ outlink->h = s->output_height;
+
+ if (inlink->sample_aspect_ratio.num) {
+ AVRational r = {outlink->h * inlink->w, outlink->w * inlink->h};
+ outlink->sample_aspect_ratio = av_mul_q(r, inlink->sample_aspect_ratio);
+ } else {
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+ }
+
+ hw_frame_ctx_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+
+ out_format = (s->format == AV_PIX_FMT_NONE) ? hw_frame_ctx_in->sw_format : s->format;
+ if (!format_is_supported(s->format)) {
+ av_log(s, AV_LOG_ERROR, "Unsupported output format: %s\n",
+ av_get_pix_fmt_name(out_format));
+ return AVERROR(ENOSYS);
+ }
+
+ av_buffer_unref(&outlink->hw_frames_ctx);
+ outlink->hw_frames_ctx = av_hwframe_ctx_alloc(hw_frame_ctx_in->device_ref);
+ hw_frame_ctx_out = (AVHWFramesContext *)outlink->hw_frames_ctx->data;
+ hw_frame_ctx_out->format = AV_PIX_FMT_VIDEOTOOLBOX;
+ hw_frame_ctx_out->sw_format = out_format;
+ hw_frame_ctx_out->width = outlink->w;
+ hw_frame_ctx_out->height = outlink->h;
+
+ err = ff_filter_init_hw_frames(avctx, outlink, 1);
+ if (err < 0)
+ return err;
+
+ err = av_hwframe_ctx_init(outlink->hw_frames_ctx);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to init videotoolbox frame context, %s\n",
+ av_err2str(err));
+ return err;
+ }
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(ScaleVtContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption scale_vt_options[] = {
+ { "w", "Output video width",
+ OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, .flags = FLAGS },
+ { "h", "Output video height",
+ OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, .flags = FLAGS },
+ { "color_matrix", "Output colour matrix coefficient set",
+ OFFSET(colour_matrix_string), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "color_primaries", "Output colour primaries",
+ OFFSET(colour_primaries_string), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "color_transfer", "Output colour transfer characteristics",
+ OFFSET(colour_transfer_string), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
+ { "format", "Output pixel format",
+ OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE, INT_MAX, FLAGS, "fmt" },
+ { NULL },
+};
+
+AVFILTER_DEFINE_CLASS(scale_vt);
+
+static const AVFilterPad scale_vt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = &scale_vt_filter_frame,
+ },
+};
+
+static const AVFilterPad scale_vt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = &scale_vt_config_output,
+ },
+};
+
+const AVFilter ff_vf_scale_vt = {
+ .name = "scale_vt",
+ .description = NULL_IF_CONFIG_SMALL("Scale Videotoolbox frames"),
+ .priv_size = sizeof(ScaleVtContext),
+ .init = scale_vt_init,
+ .uninit = scale_vt_uninit,
+ FILTER_INPUTS(scale_vt_inputs),
+ FILTER_OUTPUTS(scale_vt_outputs),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VIDEOTOOLBOX),
+ .priv_class = &scale_vt_class,
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};

View File

@ -0,0 +1,35 @@
Index: FFmpeg/fftools/ffmpeg_sched.h
===================================================================
--- FFmpeg.orig/fftools/ffmpeg_sched.h
+++ FFmpeg/fftools/ffmpeg_sched.h
@@ -238,12 +238,15 @@ int sch_add_mux(Scheduler *sch, SchThrea
* Default size of a packet thread queue. For muxing this can be overridden by
* the thread_queue_size option as passed to a call to sch_add_mux().
*/
-#define DEFAULT_PACKET_THREAD_QUEUE_SIZE 8
+#define DEFAULT_PACKET_THREAD_QUEUE_SIZE 1
/**
* Default size of a frame thread queue.
*/
-#define DEFAULT_FRAME_THREAD_QUEUE_SIZE 8
+#define DEFAULT_FRAME_THREAD_QUEUE_SIZE 1
+
+// The new default value of 8 does little to help hwaccel, but instead increases
+// extra_hw_frames, which causes video memory on dGPU to be exhausted more easily.
/**
* Add a muxed stream for a previously added muxer.
Index: FFmpeg/tests/ref/fate/ffmpeg-fix_sub_duration_heartbeat
===================================================================
--- FFmpeg.orig/tests/ref/fate/ffmpeg-fix_sub_duration_heartbeat
+++ FFmpeg/tests/ref/fate/ffmpeg-fix_sub_duration_heartbeat
@@ -33,3 +33,8 @@
<font face="Monospace">{\an7}(<i> inaudible radio chatter</i> )
>> Safety remains our numb</font>
+9
+00:00:03,704 --> 00:00:04,004
+<font face="Monospace">{\an7}(<i> inaudible radio chatter</i> )
+>> Safety remains our number one</font>
+

View File

@ -0,0 +1,78 @@
Index: FFmpeg/libavcodec/qsv.c
===================================================================
--- FFmpeg.orig/libavcodec/qsv.c
+++ FFmpeg/libavcodec/qsv.c
@@ -473,8 +473,8 @@ static int ff_qsv_set_display_handle(AVC
AVVAAPIDeviceContext *hwctx;
int ret;
- av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
- av_dict_set(&child_device_opts, "driver", "iHD", 0);
+ av_dict_set(&child_device_opts, "vendor_id", "0x8086", 0);
+ av_dict_set(&child_device_opts, "driver", "iHD", 0);
ret = av_hwdevice_ctx_create(&qs->va_device_ref, AV_HWDEVICE_TYPE_VAAPI, NULL, child_device_opts, 0);
av_dict_free(&child_device_opts);
Index: FFmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_qsv.c
+++ FFmpeg/libavutil/hwcontext_qsv.c
@@ -2644,8 +2644,8 @@ static int qsv_device_create(AVHWDeviceC
// used on recent Intel hardware. Set options to the VAAPI device
// creation so that we should pick a usable setup by default if
// possible, even when multiple devices and drivers are available.
- av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
- av_dict_set(&child_device_opts, "driver", "iHD", 0);
+ av_dict_set(&child_device_opts, "vendor_id", "0x8086", 0);
+ av_dict_set(&child_device_opts, "driver", "iHD", 0);
}
break;
#endif
Index: FFmpeg/libavutil/hwcontext_vaapi.c
===================================================================
--- FFmpeg.orig/libavutil/hwcontext_vaapi.c
+++ FFmpeg/libavutil/hwcontext_vaapi.c
@@ -1763,7 +1763,9 @@ static int vaapi_device_create(AVHWDevic
#if CONFIG_LIBDRM
drmVersion *info;
const AVDictionaryEntry *kernel_driver;
+ const AVDictionaryEntry *vendor_id;
kernel_driver = av_dict_get(opts, "kernel_driver", NULL, 0);
+ vendor_id = av_dict_get(opts, "vendor_id", NULL, 0);
#endif
for (n = 0; n < max_devices; n++) {
snprintf(path, sizeof(path),
@@ -1818,6 +1820,33 @@ static int vaapi_device_create(AVHWDevic
close(priv->drm_fd);
priv->drm_fd = -1;
continue;
+ } else if (vendor_id) {
+ drmDevicePtr device;
+ char drm_vendor[8];
+ if (drmGetDevice(priv->drm_fd, &device)) {
+ av_log(ctx, AV_LOG_VERBOSE,
+ "Failed to get DRM device info for device %d.\n", n);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+
+ snprintf(drm_vendor, sizeof(drm_vendor), "0x%x", device->deviceinfo.pci->vendor_id);
+ if (strcmp(vendor_id->value, drm_vendor)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Ignoring device %d "
+ "with non-matching vendor id (%s).\n",
+ n, vendor_id->value);
+ drmFreeDevice(&device);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d, "
+ "with matching vendor id (%s).\n",
+ n, vendor_id->value);
+ drmFreeDevice(&device);
+ break;
}
drmFreeVersion(info);
#endif

View File

@ -1,161 +0,0 @@
diff --git a/configure b/configure
index 197f762b58..efcb4c4e49 100755
--- a/configure
+++ b/configure
@@ -2460,6 +2460,7 @@ HAVE_LIST="
opencl_dxva2
opencl_vaapi_beignet
opencl_vaapi_intel_media
+ opencl_videotoolbox
perl
pod2man
texi2html
@@ -7217,6 +7218,11 @@ if enabled_all opencl d3d11va ; then
enable opencl_d3d11
fi
+if enabled_all opencl videotoolbox ; then
+ check_func_headers OpenCL/cl_gl_ext.h clCreateImageFromIOSurfaceWithPropertiesAPPLE -framework VideoToolbox -framework OpenCL &&
+ enable opencl_videotoolbox
+fi
+
enabled vdpau &&
check_cpp_condition vdpau vdpau/vdpau.h "defined VDP_DECODER_PROFILE_MPEG4_PART2_ASP"
diff --git a/libavutil/hwcontext_opencl.c b/libavutil/hwcontext_opencl.c
index de093fffb1..efd8d0e094 100644
--- a/libavutil/hwcontext_opencl.c
+++ b/libavutil/hwcontext_opencl.c
@@ -96,6 +96,11 @@
#endif
+#if HAVE_OPENCL_VIDEOTOOLBOX
+#include <OpenCL/cl_gl_ext.h>
+#include <VideoToolbox/VideoToolbox.h>
+#endif
+
#if HAVE_OPENCL_VAAPI_INTEL_MEDIA && CONFIG_LIBMFX
extern int ff_qsv_get_surface_base_handle(mfxFrameSurface1 *surf,
enum AVHWDeviceType base_dev_typ,
void **base_handle);
@@ -1364,6 +1369,12 @@ static int opencl_device_derive(AVHWDeviceContext *hwdev,
break;
#endif
+#if HAVE_OPENCL_VIDEOTOOLBOX
+ case AV_HWDEVICE_TYPE_VIDEOTOOLBOX:
+ err = opencl_device_create(hwdev, NULL, NULL, 0);
+ break;
+#endif
+
default:
err = AVERROR(ENOSYS);
break;
@@ -2819,6 +2830,85 @@ fail:
#endif
+#if HAVE_OPENCL_VIDEOTOOLBOX
+
+static void opencl_unmap_from_vt(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ uint8_t *desc = hwmap->priv;
+ opencl_pool_free(hwfc, desc);
+}
+
+static int opencl_map_from_vt(AVHWFramesContext *dst_fc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ CVPixelBufferRef pixbuf = (CVPixelBufferRef) src->data[3];
+ IOSurfaceRef io_surface_ref = CVPixelBufferGetIOSurface(pixbuf);
+ cl_int err = 0;
+ AVOpenCLFrameDescriptor *desc = NULL;
+ AVOpenCLDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+
+ if (!io_surface_ref) {
+ av_log(dst_fc, AV_LOG_ERROR, "Failed to get IOSurfaceRef\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ desc = av_mallocz(sizeof(*desc));
+ if (!desc)
+ return AVERROR(ENOMEM);
+
+ for (int p = 0;; p++) {
+ cl_image_format image_format;
+ cl_image_desc image_desc;
+ cl_iosurface_properties_APPLE props[] = {
+ CL_IOSURFACE_REF_APPLE, (cl_iosurface_properties_APPLE) io_surface_ref,
+ CL_IOSURFACE_PLANE_APPLE, p,
+ 0
+ };
+
+ err = opencl_get_plane_format(dst_fc->sw_format, p,
+ src->width, src->height,
+ &image_format, &image_desc);
+ if (err == AVERROR(ENOENT))
+ break;
+ if (err < 0)
+ goto fail;
+
+ desc->planes[p] = clCreateImageFromIOSurfaceWithPropertiesAPPLE(dst_dev->context,
+ opencl_mem_flags_for_mapping(flags),
+ &image_format, &image_desc,
+ props, &err);
+ if (!desc->planes[p]) {
+ av_log(dst_fc, AV_LOG_ERROR, "Failed to create image from IOSurfaceRef\n");
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ desc->nb_planes++;
+ }
+
+ for (int i = 0; i < desc->nb_planes; i++)
+ dst->data[i] = (uint8_t *) desc->planes[i];
+
+ err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
+ opencl_unmap_from_vt, desc);
+ if (err < 0)
+ goto fail;
+
+ dst->width = src->width;
+ dst->height = src->height;
+
+ return 0;
+
+fail:
+ for (int i = 0; i < desc->nb_planes; i++)
+ clReleaseMemObject(desc->planes[i]);
+ av_freep(&desc);
+ memset(dst->data, 0, sizeof(dst->data));
+ return err;
+}
+
+#endif
+
static int opencl_map_from(AVHWFramesContext *hwfc, AVFrame *dst,
const AVFrame *src, int flags)
{
@@ -2864,6 +2953,10 @@ static int opencl_map_to(AVHWFramesContext *hwfc, AVFrame *dst,
case AV_PIX_FMT_DRM_PRIME:
if (priv->drm_arm_mapping_usable)
return opencl_map_from_drm_arm(hwfc, dst, src, flags);
+#endif
+#if HAVE_OPENCL_VIDEOTOOLBOX
+ case AV_PIX_FMT_VIDEOTOOLBOX:
+ return opencl_map_from_vt(hwfc, dst, src, flags);
#endif
}
return AVERROR(ENOSYS);
@@ -2922,6 +3015,10 @@ static int opencl_frames_derive_to(AVHWFramesContext *dst_fc,
if (!priv->drm_arm_mapping_usable)
return AVERROR(ENOSYS);
break;
+#endif
+#if HAVE_OPENCL_VIDEOTOOLBOX
+ case AV_HWDEVICE_TYPE_VIDEOTOOLBOX:
+ break;
#endif
default:
return AVERROR(ENOSYS);

File diff suppressed because it is too large Load Diff

View File

@ -1,307 +0,0 @@
diff --git a/configure b/configure
index 3cd3bdfb44..7bf621590e 100755
--- a/configure
+++ b/configure
@@ -3751,6 +3751,7 @@ tonemap_opencl_filter_deps="opencl const_nan"
transpose_opencl_filter_deps="opencl"
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
transpose_vulkan_filter_deps="vulkan spirv_compiler"
+transpose_vt_filter_deps="coreimage videotoolbox"
unsharp_opencl_filter_deps="opencl"
uspp_filter_deps="gpl avcodec"
vaguedenoiser_filter_deps="gpl"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index b3d3d981dd..f4ef8cd062 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -516,6 +516,7 @@ OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o
OBJS-$(CONFIG_TRANSPOSE_OPENCL_FILTER) += vf_transpose_opencl.o opencl.o opencl/transpose.o
OBJS-$(CONFIG_TRANSPOSE_VAAPI_FILTER) += vf_transpose_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_TRANSPOSE_VULKAN_FILTER) += vf_transpose_vulkan.o vulkan.o vulkan_filter.o
+OBJS-$(CONFIG_TRANSPOSE_VT_FILTER) += vf_transpose_vt.o
OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNPREMULTIPLY_FILTER) += vf_premultiply.o framesync.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index d7db46c2af..4d95949027 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -486,6 +486,7 @@ extern const AVFilter ff_vf_transpose_npp;
extern const AVFilter ff_vf_transpose_opencl;
extern const AVFilter ff_vf_transpose_vaapi;
extern const AVFilter ff_vf_transpose_vulkan;
+extern const AVFilter ff_vf_transpose_vt;
extern const AVFilter ff_vf_trim;
extern const AVFilter ff_vf_unpremultiply;
extern const AVFilter ff_vf_unsharp;
diff --git a/libavfilter/vf_transpose_vt.m b/libavfilter/vf_transpose_vt.m
new file mode 100644
index 0000000000..bc3b727a10
--- /dev/null
+++ b/libavfilter/vf_transpose_vt.m
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2024 Gnattu OC <gnattuoc@me.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <CoreImage/CoreImage.h>
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+#include "transpose.h"
+#include "video.h"
+
+typedef struct TransposeVtContext {
+ AVClass *class;
+ CIContext *ci_ctx;
+ CGImagePropertyOrientation orientation;
+
+ int dir;
+ int passthrough;
+} TransposeVtContext;
+
+static av_cold int transpose_vt_init(AVFilterContext *avctx)
+{
+ TransposeVtContext *s = avctx->priv;
+ s->ci_ctx = CFBridgingRetain([CIContext context]);
+ if (!s->ci_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "CoreImage Context create failed\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ return 0;
+}
+
+static av_cold void transpose_vt_uninit(AVFilterContext *avctx)
+{
+ TransposeVtContext *s = avctx->priv;
+ if (s->ci_ctx) {
+ CFRelease(s->ci_ctx);
+ s->ci_ctx = NULL;
+ }
+}
+
+static int transpose_vt_filter_frame(AVFilterLink *link, AVFrame *in)
+{
+ int ret;
+ AVFilterContext *ctx = link->dst;
+ TransposeVtContext *s = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ CVPixelBufferRef src;
+ CVPixelBufferRef dst;
+ AVFrame *out;
+ CIImage *source_image = NULL;
+ CIImage *transposed_image = NULL;
+
+ if (s->passthrough)
+ return ff_filter_frame(outlink, in);
+
+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!out) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ret = av_frame_copy_props(out, in);
+ if (ret < 0)
+ goto fail;
+
+ src = (CVPixelBufferRef)in->data[3];
+ dst = (CVPixelBufferRef)out->data[3];
+
+ source_image = CFBridgingRetain([CIImage imageWithCVPixelBuffer: src]);
+ transposed_image = CFBridgingRetain([source_image imageByApplyingCGOrientation: s->orientation]);
+ if (!transposed_image) {
+ CFRelease(source_image);
+ av_log(ctx, AV_LOG_ERROR, "transpose image failed, %d\n", ret);
+ ret = AVERROR_EXTERNAL;
+ goto fail;
+ }
+ [(__bridge CIContext*)s->ci_ctx render: (__bridge CIImage*)transposed_image toCVPixelBuffer: dst];
+ CFRelease(source_image);
+ CFRelease(transposed_image);
+ CVBufferPropagateAttachments(src, dst);
+
+ av_frame_free(&in);
+
+ return ff_filter_frame(outlink, out);
+
+ fail:
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+}
+
+static int transpose_vt_recreate_hw_ctx(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ AVHWFramesContext *hw_frame_ctx_in;
+ AVHWFramesContext *hw_frame_ctx_out;
+ int err;
+
+ av_buffer_unref(&outlink->hw_frames_ctx);
+
+ hw_frame_ctx_in = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+ outlink->hw_frames_ctx = av_hwframe_ctx_alloc(hw_frame_ctx_in->device_ref);
+ hw_frame_ctx_out = (AVHWFramesContext *)outlink->hw_frames_ctx->data;
+ hw_frame_ctx_out->format = AV_PIX_FMT_VIDEOTOOLBOX;
+ hw_frame_ctx_out->sw_format = hw_frame_ctx_in->sw_format;
+ hw_frame_ctx_out->width = outlink->w;
+ hw_frame_ctx_out->height = outlink->h;
+
+ err = ff_filter_init_hw_frames(avctx, outlink, 1);
+ if (err < 0)
+ return err;
+
+ err = av_hwframe_ctx_init(outlink->hw_frames_ctx);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to init videotoolbox frame context, %s\n",
+ av_err2str(err));
+ return err;
+ }
+
+ return 0;
+}
+
+static int transpose_vt_config_output(AVFilterLink *outlink)
+{
+ int err;
+ AVFilterContext *avctx = outlink->src;
+ TransposeVtContext *s = avctx->priv;
+ AVFilterLink *inlink = outlink->src->inputs[0];
+ int swap_w_h = 0;
+
+ av_buffer_unref(&outlink->hw_frames_ctx);
+ outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
+
+ if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
+ (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
+ inlink->w, inlink->h, inlink->w, inlink->h);
+ s->orientation = kCGImagePropertyOrientationUp;
+ return 0;
+ }
+
+ s->passthrough = TRANSPOSE_PT_TYPE_NONE;
+
+ switch (s->dir) {
+ case TRANSPOSE_CCLOCK_FLIP:
+ s->orientation = kCGImagePropertyOrientationLeftMirrored;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CCLOCK:
+ s->orientation = kCGImagePropertyOrientationLeft;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CLOCK:
+ s->orientation = kCGImagePropertyOrientationRight;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_CLOCK_FLIP:
+ s->orientation = kCGImagePropertyOrientationRightMirrored;
+ swap_w_h = 1;
+ break;
+ case TRANSPOSE_REVERSAL:
+ s->orientation = kCGImagePropertyOrientationDown;
+ break;
+ case TRANSPOSE_HFLIP:
+ s->orientation = kCGImagePropertyOrientationUpMirrored;
+ break;
+ case TRANSPOSE_VFLIP:
+ s->orientation = kCGImagePropertyOrientationDownMirrored;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Failed to set direction to %d\n", s->dir);
+ return AVERROR(EINVAL);
+ }
+
+ if (!swap_w_h)
+ return 0;
+
+ outlink->w = inlink->h;
+ outlink->h = inlink->w;
+ return transpose_vt_recreate_hw_ctx(outlink);
+}
+
+#define OFFSET(x) offsetof(TransposeVtContext, x)
+#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption transpose_vt_options[] = {
+ { "dir", "set transpose direction",
+ OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 6, FLAGS, .unit = "dir" },
+ { "cclock_flip", "rotate counter-clockwise with vertical flip",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
+ { "clock", "rotate clockwise",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "cclock", "rotate counter-clockwise",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .flags=FLAGS, .unit = "dir" },
+ { "clock_flip", "rotate clockwise with vertical flip",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .flags=FLAGS, .unit = "dir" },
+ { "reversal", "rotate by half-turn",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL }, .flags=FLAGS, .unit = "dir" },
+ { "hflip", "flip horizontally",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP }, .flags=FLAGS, .unit = "dir" },
+ { "vflip", "flip vertically",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP }, .flags=FLAGS, .unit = "dir" },
+
+ { "passthrough", "do not apply transposition if the input matches the specified geometry",
+ OFFSET(passthrough), AV_OPT_TYPE_INT, { .i64=TRANSPOSE_PT_TYPE_NONE }, 0, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "none", "always apply transposition",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_NONE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "portrait", "preserve portrait geometry",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_PORTRAIT }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+ { "landscape", "preserve landscape geometry",
+ 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_PT_TYPE_LANDSCAPE }, INT_MIN, INT_MAX, FLAGS, .unit = "passthrough" },
+
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(transpose_vt);
+
+static const AVFilterPad transpose_vt_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = &transpose_vt_filter_frame,
+ },
+};
+
+static const AVFilterPad transpose_vt_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = &transpose_vt_config_output,
+ },
+};
+
+const AVFilter ff_vf_transpose_vt = {
+ .name = "transpose_vt",
+ .description = NULL_IF_CONFIG_SMALL("Transpose Videotoolbox frames"),
+ .priv_size = sizeof(TransposeVtContext),
+ .init = transpose_vt_init,
+ .uninit = transpose_vt_uninit,
+ FILTER_INPUTS(transpose_vt_inputs),
+ FILTER_OUTPUTS(transpose_vt_outputs),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_VIDEOTOOLBOX),
+ .priv_class = &transpose_vt_class,
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};

View File

@ -1,110 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/libsvtav1.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/libsvtav1.c
+++ jellyfin-ffmpeg/libavcodec/libsvtav1.c
@@ -170,7 +170,7 @@ static int config_enc_params(EbSvtAv1Enc
param->look_ahead_distance = svt_enc->la_depth;
#endif
- if (svt_enc->enc_mode >= 0)
+ if (svt_enc->enc_mode >= -1)
param->enc_mode = svt_enc->enc_mode;
if (avctx->bit_rate) {
@@ -184,8 +184,10 @@ static int config_enc_params(EbSvtAv1Enc
param->min_qp_allowed = avctx->qmin;
}
param->max_bit_rate = avctx->rc_max_rate;
- if (avctx->bit_rate && avctx->rc_buffer_size)
- param->maximum_buffer_size_ms = avctx->rc_buffer_size * 1000LL / avctx->bit_rate;
+ if ((avctx->bit_rate > 0 || avctx->rc_max_rate > 0) && avctx->rc_buffer_size)
+ param->maximum_buffer_size_ms =
+ avctx->rc_buffer_size * 1000LL /
+ FFMAX(avctx->bit_rate, avctx->rc_max_rate);
if (svt_enc->crf > 0) {
param->qp = svt_enc->crf;
@@ -240,9 +242,27 @@ static int config_enc_params(EbSvtAv1Enc
if (avctx->level != FF_LEVEL_UNKNOWN)
param->level = avctx->level;
- if (avctx->gop_size > 0)
+ // gop_size == 1 case is handled when encoding each frame by setting
+ // pic_type to EB_AV1_KEY_PICTURE. For gop_size > 1, set the
+ // intra_period_length. Even though setting intra_period_length to 0 should
+ // work in this case, it does not.
+ // See: https://gitlab.com/AOMediaCodec/SVT-AV1/-/issues/2076
+ if (avctx->gop_size > 1)
param->intra_period_length = avctx->gop_size - 1;
+#if SVT_AV1_CHECK_VERSION(1, 1, 0)
+ // In order for SVT-AV1 to force keyframes by setting pic_type to
+ // EB_AV1_KEY_PICTURE on any frame, force_key_frames has to be set. Note
+ // that this does not force all frames to be keyframes (it only forces a
+ // keyframe with pic_type is set to EB_AV1_KEY_PICTURE). As of now, SVT-AV1
+ // does not support arbitrary keyframe requests by setting pic_type to
+ // EB_AV1_KEY_PICTURE, so it is done only when gop_size == 1.
+ // FIXME: When SVT-AV1 supports arbitrary keyframe requests, this code needs
+ // to be updated to set force_key_frames accordingly.
+ if (avctx->gop_size == 1)
+ param->force_key_frames = 1;
+#endif
+
if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
param->frame_rate_numerator = avctx->framerate.num;
param->frame_rate_denominator = avctx->framerate.den;
@@ -302,7 +322,8 @@ static int config_enc_params(EbSvtAv1Enc
avctx->bit_rate = param->rate_control_mode > 0 ?
param->target_bit_rate : 0;
avctx->rc_max_rate = param->max_bit_rate;
- avctx->rc_buffer_size = param->maximum_buffer_size_ms * avctx->bit_rate / 1000LL;
+ avctx->rc_buffer_size = param->maximum_buffer_size_ms *
+ FFMAX(avctx->bit_rate, avctx->rc_max_rate) / 1000LL;
if (avctx->bit_rate || avctx->rc_max_rate || avctx->rc_buffer_size) {
AVCPBProperties *cpb_props = ff_add_cpb_side_data(avctx);
@@ -453,6 +474,9 @@ static int eb_send_frame(AVCodecContext
break;
}
+ if (avctx->gop_size == 1)
+ headerPtr->pic_type = EB_AV1_KEY_PICTURE;
+
svt_av1_enc_send_picture(svt_enc->svt_handle, headerPtr);
return 0;
@@ -509,6 +533,14 @@ static int eb_receive_packet(AVCodecCont
if (svt_ret == EB_NoErrorEmptyQueue)
return AVERROR(EAGAIN);
+#if SVT_AV1_CHECK_VERSION(2, 0, 0)
+ if (headerPtr->flags & EB_BUFFERFLAG_EOS) {
+ svt_enc->eos_flag = EOS_RECEIVED;
+ svt_av1_enc_release_out_buffer(&headerPtr);
+ return AVERROR_EOF;
+ }
+#endif
+
ref = get_output_ref(avctx, svt_enc, headerPtr->n_filled_len);
if (!ref) {
av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
@@ -543,8 +575,10 @@ static int eb_receive_packet(AVCodecCont
if (headerPtr->pic_type == EB_AV1_NON_REF_PICTURE)
pkt->flags |= AV_PKT_FLAG_DISPOSABLE;
+#if !(SVT_AV1_CHECK_VERSION(2, 0, 0))
if (headerPtr->flags & EB_BUFFERFLAG_EOS)
svt_enc->eos_flag = EOS_RECEIVED;
+#endif
ff_side_data_set_encoder_stats(pkt, headerPtr->qp * FF_QP2LAMBDA, NULL, 0, pict_type);
@@ -590,7 +624,7 @@ static const AVOption options[] = {
{ "high", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, VE, "tier" },
#endif
{ "preset", "Encoding preset",
- OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, MAX_ENC_PRESET, VE },
+ OFFSET(enc_mode), AV_OPT_TYPE_INT, { .i64 = -2 }, -2, MAX_ENC_PRESET, VE },
FF_AV1_PROFILE_OPTS

View File

@ -1,47 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_vulkan.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_vulkan.c
@@ -1581,17 +1581,33 @@ static int vulkan_device_derive(AVHWDevi
#if CONFIG_VAAPI
case AV_HWDEVICE_TYPE_VAAPI: {
AVVAAPIDeviceContext *src_hwctx = src_ctx->hwctx;
+ VADisplay dpy = src_hwctx->display;
+#if VA_CHECK_VERSION(1, 15, 0)
+ VAStatus vas;
+ VADisplayAttribute attr = {
+ .type = VADisplayPCIID,
+ };
+#endif
+ const char *vendor;
- const char *vendor = vaQueryVendorString(src_hwctx->display);
- if (!vendor) {
- av_log(ctx, AV_LOG_ERROR, "Unable to get device info from VAAPI!\n");
- return AVERROR_EXTERNAL;
- }
+#if VA_CHECK_VERSION(1, 15, 0)
+ vas = vaGetDisplayAttributes(dpy, &attr, 1);
+ if (vas == VA_STATUS_SUCCESS && attr.flags != VA_DISPLAY_ATTRIB_NOT_SUPPORTED)
+ dev_select.pci_device = (attr.value & 0xFFFF);
+#endif
+
+ if (!dev_select.pci_device) {
+ vendor = vaQueryVendorString(dpy);
+ if (!vendor) {
+ av_log(ctx, AV_LOG_ERROR, "Unable to get device info from VAAPI!\n");
+ return AVERROR_EXTERNAL;
+ }
- if (strstr(vendor, "Intel"))
- dev_select.vendor_id = 0x8086;
- if (strstr(vendor, "AMD"))
- dev_select.vendor_id = 0x1002;
+ if (strstr(vendor, "Intel"))
+ dev_select.vendor_id = 0x8086;
+ if (strstr(vendor, "AMD"))
+ dev_select.vendor_id = 0x1002;
+ }
return vulkan_device_create_internal(ctx, &dev_select, opts, flags);
}

View File

@ -1,82 +0,0 @@
Index: jellyfin-ffmpeg/libavformat/mpegtsenc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavformat/mpegtsenc.c
+++ jellyfin-ffmpeg/libavformat/mpegtsenc.c
@@ -23,6 +23,7 @@
#include "libavutil/bswap.h"
#include "libavutil/crc.h"
#include "libavutil/dict.h"
+#include "libavutil/dovi_meta.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
@@ -348,6 +349,52 @@ static void put_registration_descriptor(
*q_ptr = q;
}
+static int put_dovi_descriptor(AVFormatContext *s, uint8_t **q_ptr,
+ const AVDOVIDecoderConfigurationRecord *dovi)
+{
+ uint16_t val16;
+ uint8_t *q = *q_ptr;
+
+ if (!dovi)
+ return AVERROR(ENOMEM);
+
+ if (!dovi->bl_present_flag) {
+ av_log(s, AV_LOG_ERROR,
+ "EL only DOVI stream is not supported!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ put_registration_descriptor(&q, MKTAG('D', 'O', 'V', 'I')); // format_identifier
+
+ /* DOVI Video Stream Descriptor Syntax */
+ *q++ = 0xb0; // descriptor_tag
+ *q++ = 0x05; // descriptor_length
+ *q++ = dovi->dv_version_major;
+ *q++ = dovi->dv_version_minor;
+
+ val16 = (dovi->dv_profile & 0x7f) << 9 | // 7 bits
+ (dovi->dv_level & 0x3f) << 3 | // 6 bits
+ (dovi->rpu_present_flag & 0x01) << 2 | // 1 bits
+ (dovi->el_present_flag & 0x01) << 1 | // 1 bits
+ (dovi->bl_present_flag & 0x01); // 1 bits
+ put16(&q, val16);
+
+#if 0
+ // TODO: support dependency_pid (EL only stream)
+ // descriptor_length: 0x05->0x07
+ if (!bl_present_flag) {
+ val16 = (dependency_pid & 0x1fff) << 3; // 13+3 bits
+ put16(&q, val16);
+ }
+#endif
+
+ *q++ = (dovi->dv_bl_signal_compatibility_id & 0x0f) << 4; // 4+4 bits
+
+ *q_ptr = q;
+
+ return 0;
+}
+
static int get_dvb_stream_type(AVFormatContext *s, AVStream *st)
{
MpegTSWrite *ts = s->priv_data;
@@ -795,7 +842,15 @@ static int mpegts_write_pmt(AVFormatCont
} else if (stream_type == STREAM_TYPE_VIDEO_VC1) {
put_registration_descriptor(&q, MKTAG('V', 'C', '-', '1'));
} else if (stream_type == STREAM_TYPE_VIDEO_HEVC && s->strict_std_compliance <= FF_COMPLIANCE_NORMAL) {
- put_registration_descriptor(&q, MKTAG('H', 'E', 'V', 'C'));
+ const AVDOVIDecoderConfigurationRecord *dovi = (const AVDOVIDecoderConfigurationRecord *)
+ av_stream_get_side_data(st, AV_PKT_DATA_DOVI_CONF, NULL);
+
+ if (dovi && dovi->bl_present_flag && s->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+ if (put_dovi_descriptor(s, &q, dovi) < 0)
+ break;
+ } else {
+ put_registration_descriptor(&q, MKTAG('H', 'E', 'V', 'C'));
+ }
} else if (stream_type == STREAM_TYPE_VIDEO_CAVS || stream_type == STREAM_TYPE_VIDEO_AVS2 ||
stream_type == STREAM_TYPE_VIDEO_AVS3) {
put_registration_descriptor(&q, MKTAG('A', 'V', 'S', 'V'));

View File

@ -1,82 +0,0 @@
Index: jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
===================================================================
--- jellyfin-ffmpeg.orig/libavutil/hwcontext_qsv.c
+++ jellyfin-ffmpeg/libavutil/hwcontext_qsv.c
@@ -36,6 +36,7 @@
#include "hwcontext_d3d11va.h"
#endif
#if CONFIG_DXVA2
+#include <initguid.h>
#include "hwcontext_dxva2.h"
#endif
@@ -737,9 +738,11 @@ static int qsv_d3d9_update_config(void *
#if CONFIG_DXVA2
mfxStatus sts;
IDirect3DDeviceManager9* devmgr = handle;
- IDirect3DDevice9Ex *device = NULL;
+ IDirect3DDevice9 *device = NULL;
+ IDirect3DDevice9Ex *device_ex = NULL;
HANDLE device_handle = 0;
IDirect3D9Ex *d3d9ex = NULL;
+ IDirect3D9 *d3d9 = NULL;
LUID luid;
D3DDEVICE_CREATION_PARAMETERS params;
HRESULT hr;
@@ -757,18 +760,31 @@ static int qsv_d3d9_update_config(void *
IDirect3DDeviceManager9_CloseDeviceHandle(devmgr, device_handle);
goto fail;
}
+ hr = IDirect3DDevice9_QueryInterface(device, &IID_IDirect3DDevice9Ex, (void **)&device_ex);
+ IDirect3DDevice9_Release(device);
+ if (FAILED(hr)) {
+ av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9_QueryInterface %d\n", hr);
+ goto unlock;
+ }
- hr = IDirect3DDevice9Ex_GetCreationParameters(device, &params);
+ hr = IDirect3DDevice9Ex_GetCreationParameters(device_ex, &params);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9_GetCreationParameters %d\n", hr);
- IDirect3DDevice9Ex_Release(device);
+ IDirect3DDevice9Ex_Release(device_ex);
goto unlock;
}
- hr = IDirect3DDevice9Ex_GetDirect3D(device, &d3d9ex);
+ hr = IDirect3DDevice9Ex_GetDirect3D(device_ex, &d3d9);
if (FAILED(hr)) {
- av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetAdapterLUID %d\n", hr);
- IDirect3DDevice9Ex_Release(device);
+ av_log(ctx, AV_LOG_ERROR, "Error IDirect3DDevice9Ex_GetDirect3D %d\n", hr);
+ IDirect3DDevice9Ex_Release(device_ex);
+ goto unlock;
+ }
+ hr = IDirect3D9_QueryInterface(d3d9, &IID_IDirect3D9Ex, (void **)&d3d9ex);
+ IDirect3D9_Release(d3d9);
+ if (FAILED(hr)) {
+ av_log(ctx, AV_LOG_ERROR, "Error IDirect3D9_QueryInterface3D %d\n", hr);
+ IDirect3DDevice9Ex_Release(device_ex);
goto unlock;
}
@@ -792,7 +808,7 @@ static int qsv_d3d9_update_config(void *
release:
IDirect3D9Ex_Release(d3d9ex);
- IDirect3DDevice9Ex_Release(device);
+ IDirect3DDevice9Ex_Release(device_ex);
unlock:
IDirect3DDeviceManager9_UnlockDevice(devmgr, device_handle, FALSE);
@@ -1340,8 +1356,9 @@ static int qsv_frames_derive_from(AVHWFr
case AV_HWDEVICE_TYPE_D3D11VA:
{
D3D11_TEXTURE2D_DESC texDesc;
+ AVD3D11VAFramesContext *dst_hwctx;
dst_ctx->initial_pool_size = src_ctx->initial_pool_size;
- AVD3D11VAFramesContext *dst_hwctx = dst_ctx->hwctx;
+ dst_hwctx = dst_ctx->hwctx;
dst_hwctx->texture_infos = av_calloc(src_hwctx->nb_surfaces,
sizeof(*dst_hwctx->texture_infos));
if (!dst_hwctx->texture_infos)

View File

@ -1,72 +0,0 @@
Subject: [PATCH] lavcvideotoolbox: Add low_priority key_frame_only decoding
---
Index: libavcodec/avcodec.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
--- a/libavcodec/avcodec.h (revision 019c5b13ace26af67613d27f333104e90bde8b31)
+++ b/libavcodec/avcodec.h (revision c58e99fd3e45d898d4726456f6bddb0453592bac)
@@ -2275,6 +2275,13 @@
*/
#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
+/**
+ * Some hardware decoders (like VideoToolbox) supports decode session priority
+ * that run decode pipeline at a lower priority than is used for realtime decoding.
+ * This will be useful for background processing without interrupting normal playback.
+ */
+#define AV_HWACCEL_FLAG_LOW_PRIORITY (1 << 4)
+
/**
* @}
*/
Index: libavcodec/options_table.h
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/libavcodec/options_table.h b/libavcodec/options_table.h
--- a/libavcodec/options_table.h (revision 019c5b13ace26af67613d27f333104e90bde8b31)
+++ b/libavcodec/options_table.h (revision c58e99fd3e45d898d4726456f6bddb0453592bac)
@@ -397,6 +397,7 @@
{"unsafe_output", "allow potentially unsafe hwaccel frame output that might require special care to process successfully", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_UNSAFE_OUTPUT }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{"extra_hw_frames", "Number of extra hardware frames to allocate for the user", OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, V|D },
{"discard_damaged_percentage", "Percentage of damaged samples to discard a frame", OFFSET(discard_damaged_percentage), AV_OPT_TYPE_INT, {.i64 = 95 }, 0, 100, V|D },
+{"low_priority", "attempt to run decode pipeline at a lower priority than is used for realtime decoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_LOW_PRIORITY }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{NULL},
};
Index: libavcodec/videotoolbox.c
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
diff --git a/libavcodec/videotoolbox.c b/libavcodec/videotoolbox.c
--- a/libavcodec/videotoolbox.c (revision 019c5b13ace26af67613d27f333104e90bde8b31)
+++ b/libavcodec/videotoolbox.c (revision c58e99fd3e45d898d4726456f6bddb0453592bac)
@@ -983,6 +983,23 @@
av_log(avctx, AV_LOG_ERROR, "VideoToolbox reported invalid data.\n");
return AVERROR_INVALIDDATA;
case 0:
+ if (avctx->skip_frame >= AVDISCARD_NONKEY) {
+ status = VTSessionSetProperty(videotoolbox->session,
+ kVTDecompressionPropertyKey_OnlyTheseFrames,
+ kVTDecompressionProperty_OnlyTheseFrames_KeyFrames);
+ if (status) {
+ av_log(avctx, AV_LOG_WARNING, "kVTDecompressionProperty_OnlyTheseFrames_KeyFrames is not supported on this device. Ignoring.\n");
+ }
+ }
+ if (avctx->hwaccel_flags & AV_HWACCEL_FLAG_LOW_PRIORITY) {
+ status = VTSessionSetProperty(videotoolbox->session,
+ kVTDecompressionPropertyKey_RealTime,
+ kCFBooleanFalse);
+ av_log(avctx, AV_LOG_INFO, "Decoder running at lower priority.\n");
+ if (status) {
+ av_log(avctx, AV_LOG_WARNING, "kVTDecompressionPropertyKey_RealTime is not supported on this device. Ignoring.\n");
+ }
+ }
return 0;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown VideoToolbox session creation error %d\n", (int)status);

View File

@ -1,128 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/avcodec.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/avcodec.h
+++ jellyfin-ffmpeg/libavcodec/avcodec.h
@@ -1584,12 +1584,14 @@ typedef struct AVCodecContext {
#define FF_PROFILE_DNXHR_HQX 4
#define FF_PROFILE_DNXHR_444 5
-#define FF_PROFILE_DTS 20
-#define FF_PROFILE_DTS_ES 30
-#define FF_PROFILE_DTS_96_24 40
-#define FF_PROFILE_DTS_HD_HRA 50
-#define FF_PROFILE_DTS_HD_MA 60
-#define FF_PROFILE_DTS_EXPRESS 70
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+#define FF_PROFILE_DTS_EXPRESS 70
+#define FF_PROFILE_DTS_HD_MA_X 61
+#define FF_PROFILE_DTS_HD_MA_X_IMAX 62
#define FF_PROFILE_MPEG2_422 0
#define FF_PROFILE_MPEG2_HIGH 1
Index: jellyfin-ffmpeg/libavcodec/dca_syncwords.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dca_syncwords.h
+++ jellyfin-ffmpeg/libavcodec/dca_syncwords.h
@@ -33,4 +33,7 @@
#define DCA_SYNCWORD_SUBSTREAM_CORE 0x02B09261U
#define DCA_SYNCWORD_REV1AUX 0x9A1105A0U
+#define DCA_SYNCWORD_XLL_X 0x02000850U
+#define DCA_SYNCWORD_XLL_X_IMAX 0xF14000D0U
+
#endif /* AVCODEC_DCA_SYNCWORDS_H */
Index: jellyfin-ffmpeg/libavcodec/dca_xll.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dca_xll.c
+++ jellyfin-ffmpeg/libavcodec/dca_xll.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "avcodec.h"
#include "libavutil/channel_layout.h"
#include "dcadec.h"
#include "dcadata.h"
@@ -1054,6 +1055,22 @@ static int parse_frame(DCAXllDecoder *s,
return ret;
if ((ret = parse_band_data(s)) < 0)
return ret;
+
+ if (s->frame_size * 8 > FFALIGN(get_bits_count(&s->gb), 32)) {
+ unsigned int extradata_syncword;
+
+ // Align to dword
+ skip_bits_long(&s->gb, -get_bits_count(&s->gb) & 31);
+
+ extradata_syncword = show_bits_long(&s->gb, 32);
+
+ if (extradata_syncword == DCA_SYNCWORD_XLL_X) {
+ s->x_syncword_present = 1;
+ } else if ((extradata_syncword >> 1) == (DCA_SYNCWORD_XLL_X_IMAX >> 1)) {
+ s->x_imax_syncword_present = 1;
+ }
+ }
+
if (ff_dca_seek_bits(&s->gb, s->frame_size * 8)) {
av_log(s->avctx, AV_LOG_ERROR, "Read past end of XLL frame\n");
return AVERROR_INVALIDDATA;
@@ -1428,8 +1445,15 @@ int ff_dca_xll_filter_frame(DCAXllDecode
return AVERROR(EINVAL);
}
+ if (s->x_imax_syncword_present) {
+ avctx->profile = FF_PROFILE_DTS_HD_MA_X_IMAX;
+ } else if (s->x_syncword_present) {
+ avctx->profile = FF_PROFILE_DTS_HD_MA_X;
+ } else {
+ avctx->profile = FF_PROFILE_DTS_HD_MA;
+ }
+
avctx->bits_per_raw_sample = p->storage_bit_res;
- avctx->profile = FF_PROFILE_DTS_HD_MA;
avctx->bit_rate = 0;
frame->nb_samples = nsamples = s->nframesamples << (s->nfreqbands - 1);
Index: jellyfin-ffmpeg/libavcodec/dca_xll.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/dca_xll.h
+++ jellyfin-ffmpeg/libavcodec/dca_xll.h
@@ -135,6 +135,9 @@ typedef struct DCAXllDecoder {
DCADSPContext *dcadsp;
+ int x_syncword_present; ///< Syncword for extension data at end of frame (DTS:X) is present
+ int x_imax_syncword_present; ///< Syncword for extension data at end of frame (DTS:X IMAX) is present
+
int output_mask;
int32_t *output_samples[DCA_SPEAKER_COUNT];
} DCAXllDecoder;
Index: jellyfin-ffmpeg/libavcodec/profiles.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/profiles.c
+++ jellyfin-ffmpeg/libavcodec/profiles.c
@@ -36,12 +36,14 @@ const AVProfile ff_aac_profiles[] = {
};
const AVProfile ff_dca_profiles[] = {
- { FF_PROFILE_DTS, "DTS" },
- { FF_PROFILE_DTS_ES, "DTS-ES" },
- { FF_PROFILE_DTS_96_24, "DTS 96/24" },
- { FF_PROFILE_DTS_HD_HRA, "DTS-HD HRA" },
- { FF_PROFILE_DTS_HD_MA, "DTS-HD MA" },
- { FF_PROFILE_DTS_EXPRESS, "DTS Express" },
+ { FF_PROFILE_DTS, "DTS" },
+ { FF_PROFILE_DTS_ES, "DTS-ES" },
+ { FF_PROFILE_DTS_96_24, "DTS 96/24" },
+ { FF_PROFILE_DTS_HD_HRA, "DTS-HD HRA" },
+ { FF_PROFILE_DTS_HD_MA, "DTS-HD MA" },
+ { FF_PROFILE_DTS_HD_MA_X, "DTS-HD MA + DTS:X" },
+ { FF_PROFILE_DTS_HD_MA_X_IMAX, "DTS-HD MA + DTS:X IMAX" },
+ { FF_PROFILE_DTS_EXPRESS, "DTS Express" },
{ FF_PROFILE_UNKNOWN },
};

View File

@ -1,119 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/ac3dec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/ac3dec.c
+++ jellyfin-ffmpeg/libavcodec/ac3dec.c
@@ -1714,6 +1714,7 @@ skip:
if (!err) {
avctx->sample_rate = s->sample_rate;
avctx->bit_rate = s->bit_rate + s->prev_bit_rate;
+ avctx->profile = s->eac3_extension_type_a == 1 ? FF_PROFILE_EAC3_DDP_ATMOS : FF_PROFILE_UNKNOWN;
}
if (!avctx->sample_rate) {
Index: jellyfin-ffmpeg/libavcodec/ac3dec.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/ac3dec.h
+++ jellyfin-ffmpeg/libavcodec/ac3dec.h
@@ -102,6 +102,7 @@ typedef struct AC3DecodeContext {
int eac3; ///< indicates if current frame is E-AC-3
int eac3_frame_dependent_found; ///< bitstream has E-AC-3 dependent frame(s)
int eac3_subsbtreamid_found; ///< bitstream has E-AC-3 additional substream(s)
+ int eac3_extension_type_a; ///< bitstream has E-AC-3 extension type A enabled frame(s)
int dolby_surround_mode; ///< dolby surround mode (dsurmod)
int dolby_surround_ex_mode; ///< dolby surround ex mode (dsurexmod)
int dolby_headphone_mode; ///< dolby headphone mode (dheadphonmod)
Index: jellyfin-ffmpeg/libavcodec/ac3dec_float.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/ac3dec_float.c
+++ jellyfin-ffmpeg/libavcodec/ac3dec_float.c
@@ -33,6 +33,7 @@
#include "ac3dec.h"
#include "codec_internal.h"
+#include "profiles.h"
#include "eac3dec.c"
#include "ac3dec.c"
@@ -92,6 +93,7 @@ const FFCodec ff_eac3_decoder = {
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &ac3_eac3_decoder_class,
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_eac3_profiles),
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};
#endif
Index: jellyfin-ffmpeg/libavcodec/avcodec.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/avcodec.h
+++ jellyfin-ffmpeg/libavcodec/avcodec.h
@@ -1593,6 +1593,8 @@ typedef struct AVCodecContext {
#define FF_PROFILE_DTS_HD_MA_X 61
#define FF_PROFILE_DTS_HD_MA_X_IMAX 62
+#define FF_PROFILE_EAC3_DDP_ATMOS 30
+
#define FF_PROFILE_MPEG2_422 0
#define FF_PROFILE_MPEG2_HIGH 1
#define FF_PROFILE_MPEG2_SS 2
Index: jellyfin-ffmpeg/libavcodec/codec_desc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/codec_desc.c
+++ jellyfin-ffmpeg/libavcodec/codec_desc.c
@@ -2931,6 +2931,7 @@ static const AVCodecDescriptor codec_des
.name = "eac3",
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_eac3_profiles),
},
{
.id = AV_CODEC_ID_SIPR,
Index: jellyfin-ffmpeg/libavcodec/eac3dec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/eac3dec.c
+++ jellyfin-ffmpeg/libavcodec/eac3dec.c
@@ -464,7 +464,16 @@ static int ff_eac3_parse_header(AC3Decod
if (get_bits1(gbc)) {
int addbsil = get_bits(gbc, 6);
for (i = 0; i < addbsil + 1; i++) {
- skip_bits(gbc, 8); // skip additional bit stream info
+ if (i == 0) {
+ /* In this 8 bit chunk, the LSB is equal to flag_ec3_extension_type_a
+ which can be used to detect Atmos presence */
+ skip_bits(gbc, 7);
+ if (get_bits1(gbc)) {
+ s->eac3_extension_type_a = 1;
+ }
+ } else {
+ skip_bits(gbc, 8); // skip additional bit stream info
+ }
}
}
Index: jellyfin-ffmpeg/libavcodec/profiles.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/profiles.c
+++ jellyfin-ffmpeg/libavcodec/profiles.c
@@ -47,6 +47,11 @@ const AVProfile ff_dca_profiles[] = {
{ FF_PROFILE_UNKNOWN },
};
+const AVProfile ff_eac3_profiles[] = {
+ { FF_PROFILE_EAC3_DDP_ATMOS, "Dolby Digital Plus + Dolby Atmos"},
+ { FF_PROFILE_UNKNOWN },
+};
+
const AVProfile ff_dnxhd_profiles[] = {
{ FF_PROFILE_DNXHD, "DNXHD"},
{ FF_PROFILE_DNXHR_LB, "DNXHR LB"},
Index: jellyfin-ffmpeg/libavcodec/profiles.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/profiles.h
+++ jellyfin-ffmpeg/libavcodec/profiles.h
@@ -58,6 +58,7 @@
extern const AVProfile ff_aac_profiles[];
extern const AVProfile ff_dca_profiles[];
+extern const AVProfile ff_eac3_profiles[];
extern const AVProfile ff_dnxhd_profiles[];
extern const AVProfile ff_h264_profiles[];
extern const AVProfile ff_hevc_profiles[];

View File

@ -1,87 +0,0 @@
Index: jellyfin-ffmpeg/libavcodec/avcodec.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/avcodec.h
+++ jellyfin-ffmpeg/libavcodec/avcodec.h
@@ -1595,6 +1595,8 @@ typedef struct AVCodecContext {
#define FF_PROFILE_EAC3_DDP_ATMOS 30
+#define FF_PROFILE_TRUEHD_ATMOS 30
+
#define FF_PROFILE_MPEG2_422 0
#define FF_PROFILE_MPEG2_HIGH 1
#define FF_PROFILE_MPEG2_SS 2
Index: jellyfin-ffmpeg/libavcodec/codec_desc.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/codec_desc.c
+++ jellyfin-ffmpeg/libavcodec/codec_desc.c
@@ -2960,6 +2960,7 @@ static const AVCodecDescriptor codec_des
.name = "truehd",
.long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
.props = AV_CODEC_PROP_LOSSLESS,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_truehd_profiles),
},
{
.id = AV_CODEC_ID_MP4ALS,
Index: jellyfin-ffmpeg/libavcodec/mlpdec.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/mlpdec.c
+++ jellyfin-ffmpeg/libavcodec/mlpdec.c
@@ -42,6 +42,7 @@
#include "mlpdsp.h"
#include "mlp.h"
#include "config.h"
+#include "profiles.h"
/** number of bits used for VLC lookup - longest Huffman code is 9 */
#if ARCH_ARM
@@ -392,6 +393,14 @@ static int read_major_sync(MLPDecodeCont
m->num_substreams = mh.num_substreams;
m->substream_info = mh.substream_info;
+ /* If there is a 4th substream and the MSB of substream_info is set,
+ * there is a 16-channel spatial presentation (Atmos in TrueHD).
+ */
+ if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD
+ && m->num_substreams == 4 && m->substream_info >> 7 == 1) {
+ m->avctx->profile = FF_PROFILE_TRUEHD_ATMOS;
+ }
+
/* limit to decoding 3 substreams, as the 4th is used by Dolby Atmos for non-audio data */
m->max_decoded_substream = FFMIN(m->num_substreams - 1, 2);
@@ -1452,5 +1461,6 @@ const FFCodec ff_truehd_decoder = {
FF_CODEC_DECODE_CB(read_access_unit),
.flush = mlp_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_truehd_profiles),
};
#endif /* CONFIG_TRUEHD_DECODER */
Index: jellyfin-ffmpeg/libavcodec/profiles.c
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/profiles.c
+++ jellyfin-ffmpeg/libavcodec/profiles.c
@@ -52,6 +52,11 @@ const AVProfile ff_eac3_profiles[] = {
{ FF_PROFILE_UNKNOWN },
};
+const AVProfile ff_truehd_profiles[] = {
+ { FF_PROFILE_TRUEHD_ATMOS, "Dolby TrueHD + Dolby Atmos"},
+ { FF_PROFILE_UNKNOWN },
+};
+
const AVProfile ff_dnxhd_profiles[] = {
{ FF_PROFILE_DNXHD, "DNXHD"},
{ FF_PROFILE_DNXHR_LB, "DNXHR LB"},
Index: jellyfin-ffmpeg/libavcodec/profiles.h
===================================================================
--- jellyfin-ffmpeg.orig/libavcodec/profiles.h
+++ jellyfin-ffmpeg/libavcodec/profiles.h
@@ -59,6 +59,7 @@
extern const AVProfile ff_aac_profiles[];
extern const AVProfile ff_dca_profiles[];
extern const AVProfile ff_eac3_profiles[];
+extern const AVProfile ff_truehd_profiles[];
extern const AVProfile ff_dnxhd_profiles[];
extern const AVProfile ff_h264_profiles[];
extern const AVProfile ff_hevc_profiles[];

Some files were not shown because too many files have changed in this diff Show More