mirror of
https://gitee.com/openharmony/third_party_ffmpeg
synced 2024-11-23 19:30:05 +00:00
lavfi: do not use av_pix_fmt_descriptors directly.
This commit is contained in:
parent
50ba57e0ce
commit
59ee9f78b0
@ -214,7 +214,7 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
|
||||
av_dlog(ctx,
|
||||
"link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s",
|
||||
link, link->w, link->h,
|
||||
av_pix_fmt_descriptors[link->format].name,
|
||||
av_get_pix_fmt_name(link->format),
|
||||
link->src ? link->src->filter->name : "",
|
||||
link->dst ? link->dst->filter->name : "",
|
||||
end ? "\n" : "");
|
||||
|
@ -185,7 +185,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args)
|
||||
if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t
|
||||
{
|
||||
uint8_t rgba_map[4] = {0};
|
||||
int i;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
|
||||
int hsub = pix_desc->log2_chroma_w;
|
||||
|
||||
*is_packed_rgba = 1;
|
||||
|
@ -89,7 +89,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < fmts->format_count; j++)
|
||||
printf("INPUT[%d] %s: %s\n",
|
||||
i, filter_ctx->filter->inputs[i].name,
|
||||
av_pix_fmt_descriptors[fmts->formats[j]].name);
|
||||
av_get_pix_fmt_name(fmts->formats[j]));
|
||||
}
|
||||
|
||||
/* print the supported formats in output */
|
||||
@ -98,7 +98,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < fmts->format_count; j++)
|
||||
printf("OUTPUT[%d] %s: %s\n",
|
||||
i, filter_ctx->filter->outputs[i].name,
|
||||
av_pix_fmt_descriptors[fmts->formats[j]].name);
|
||||
av_get_pix_fmt_name(fmts->formats[j]));
|
||||
}
|
||||
|
||||
avfilter_free(filter_ctx);
|
||||
|
@ -213,10 +213,12 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
|
||||
int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB :
|
||||
type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0;
|
||||
|
||||
for (fmt = 0; fmt < num_formats; fmt++)
|
||||
for (fmt = 0; fmt < num_formats; fmt++) {
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
|
||||
if ((type != AVMEDIA_TYPE_VIDEO) ||
|
||||
(type == AVMEDIA_TYPE_VIDEO && !(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_HWACCEL)))
|
||||
(type == AVMEDIA_TYPE_VIDEO && !(desc->flags & PIX_FMT_HWACCEL)))
|
||||
ff_add_format(&ret, fmt);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
BoxBlurContext *boxblur = ctx->priv;
|
||||
int w = inlink->w, h = inlink->h;
|
||||
|
@ -158,7 +158,7 @@ static int config_input(AVFilterLink *link)
|
||||
{
|
||||
AVFilterContext *ctx = link->dst;
|
||||
CropContext *crop = ctx->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[link->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format);
|
||||
int ret;
|
||||
const char *expr;
|
||||
double res;
|
||||
@ -177,8 +177,8 @@ static int config_input(AVFilterLink *link)
|
||||
crop->var_values[VAR_POS] = NAN;
|
||||
|
||||
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
|
||||
crop->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
|
||||
crop->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
crop->hsub = pix_desc->log2_chroma_w;
|
||||
crop->vsub = pix_desc->log2_chroma_h;
|
||||
|
||||
if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
|
||||
var_names, crop->var_values,
|
||||
@ -248,6 +248,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||
AVFilterContext *ctx = link->dst;
|
||||
CropContext *crop = ctx->priv;
|
||||
AVFilterBufferRef *ref2;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
int i;
|
||||
|
||||
ref2 = avfilter_ref_buffer(picref, ~0);
|
||||
@ -281,8 +282,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||
ref2->data[0] += crop->y * ref2->linesize[0];
|
||||
ref2->data[0] += crop->x * crop->max_step[0];
|
||||
|
||||
if (!(av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL ||
|
||||
av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PSEUDOPAL)) {
|
||||
if (!(desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL)) {
|
||||
for (i = 1; i < 3; i ++) {
|
||||
if (ref2->data[i]) {
|
||||
ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i];
|
||||
|
@ -107,7 +107,7 @@ static int config_input(AVFilterLink *inlink)
|
||||
CropDetectContext *cd = ctx->priv;
|
||||
|
||||
av_image_fill_max_pixsteps(cd->max_pixsteps, NULL,
|
||||
&av_pix_fmt_descriptors[inlink->format]);
|
||||
av_pix_fmt_desc_get(inlink->format));
|
||||
|
||||
cd->x1 = inlink->w - 1;
|
||||
cd->y1 = inlink->h - 1;
|
||||
|
@ -226,9 +226,10 @@ static int end_frame(AVFilterLink *inlink)
|
||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||
AVFilterBufferRef *inpicref = inlink ->cur_buf;
|
||||
AVFilterBufferRef *outpicref = outlink->out_buf;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int direct = inpicref->buf == outpicref->buf;
|
||||
int hsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
int vsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
int hsub0 = desc->log2_chroma_w;
|
||||
int vsub0 = desc->log2_chroma_h;
|
||||
int plane;
|
||||
int ret;
|
||||
|
||||
|
@ -81,9 +81,10 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
DrawBoxContext *drawbox = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
drawbox->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
drawbox->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
drawbox->hsub = desc->log2_chroma_w;
|
||||
drawbox->vsub = desc->log2_chroma_h;
|
||||
|
||||
if (drawbox->w == 0) drawbox->w = inlink->w;
|
||||
if (drawbox->h == 0) drawbox->h = inlink->h;
|
||||
|
@ -569,7 +569,7 @@ static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
DrawTextContext *dtext = ctx->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int ret;
|
||||
|
||||
dtext->hsub = pix_desc->log2_chroma_w;
|
||||
|
@ -89,7 +89,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
FadeContext *fade = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
fade->hsub = pixdesc->log2_chroma_w;
|
||||
fade->vsub = pixdesc->log2_chroma_h;
|
||||
|
@ -78,15 +78,16 @@ static int query_formats(AVFilterContext *ctx)
|
||||
* a bitstream format, and does not have vertically sub-sampled chroma */
|
||||
if (ctx->inputs[0]) {
|
||||
formats = NULL;
|
||||
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
|
||||
if (!( av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL
|
||||
|| av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BITSTREAM)
|
||||
&& av_pix_fmt_descriptors[pix_fmt].nb_components
|
||||
&& !av_pix_fmt_descriptors[pix_fmt].log2_chroma_h
|
||||
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) {
|
||||
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) {
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|
||||
if (!(desc->flags & PIX_FMT_HWACCEL ||
|
||||
desc->flags & PIX_FMT_BITSTREAM) &&
|
||||
desc->nb_components && !desc->log2_chroma_h &&
|
||||
(ret = ff_add_format(&formats, pix_fmt)) < 0) {
|
||||
ff_formats_unref(&formats);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
|
||||
ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
|
||||
}
|
||||
|
@ -167,8 +167,9 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
GradFunContext *gf = inlink->dst->priv;
|
||||
int hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
int vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int hsub = desc->log2_chroma_w;
|
||||
int vsub = desc->log2_chroma_h;
|
||||
|
||||
gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t));
|
||||
if (!gf->buf)
|
||||
|
@ -75,11 +75,11 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
FlipContext *flip = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc);
|
||||
flip->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
flip->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
flip->hsub = pix_desc->log2_chroma_w;
|
||||
flip->vsub = pix_desc->log2_chroma_h;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -295,11 +295,12 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
HQDN3DContext *hqdn3d = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int i;
|
||||
|
||||
hqdn3d->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
hqdn3d->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
hqdn3d->depth = av_pix_fmt_descriptors[inlink->format].comp[0].depth_minus1+1;
|
||||
hqdn3d->hsub = desc->log2_chroma_w;
|
||||
hqdn3d->vsub = desc->log2_chroma_h;
|
||||
hqdn3d->depth = desc->comp[0].depth_minus1+1;
|
||||
|
||||
hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
|
||||
if (!hqdn3d->line)
|
||||
|
@ -212,7 +212,7 @@ static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
LutContext *lut = ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int min[4], max[4];
|
||||
int val, comp, ret;
|
||||
|
||||
|
@ -113,7 +113,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
static int config_input_main(AVFilterLink *inlink)
|
||||
{
|
||||
OverlayContext *over = inlink->dst->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
av_image_fill_max_pixsteps(over->max_plane_step, NULL, pix_desc);
|
||||
over->hsub = pix_desc->log2_chroma_w;
|
||||
@ -158,10 +158,10 @@ static int config_input_overlay(AVFilterLink *inlink)
|
||||
av_log(ctx, AV_LOG_VERBOSE,
|
||||
"main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n",
|
||||
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
|
||||
av_pix_fmt_descriptors[ctx->inputs[MAIN]->format].name,
|
||||
av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
|
||||
over->x, over->y,
|
||||
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
|
||||
av_pix_fmt_descriptors[ctx->inputs[OVERLAY]->format].name);
|
||||
av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
|
||||
|
||||
if (over->x < 0 || over->y < 0 ||
|
||||
over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
|
||||
|
@ -144,7 +144,7 @@ static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
PadContext *pad = ctx->priv;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
uint8_t rgba_color[4];
|
||||
int ret, is_packed_rgba;
|
||||
double var_values[VARS_NB], res;
|
||||
|
@ -44,7 +44,7 @@ static int config_props(AVFilterLink *inlink)
|
||||
{
|
||||
PixdescTestContext *priv = inlink->dst->priv;
|
||||
|
||||
priv->pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
@ -155,6 +155,7 @@ static int config_props(AVFilterLink *outlink)
|
||||
AVFilterContext *ctx = outlink->src;
|
||||
AVFilterLink *inlink = outlink->src->inputs[0];
|
||||
ScaleContext *scale = ctx->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
int64_t w, h;
|
||||
double var_values[VARS_NB], res;
|
||||
char *expr;
|
||||
@ -170,8 +171,8 @@ static int config_props(AVFilterLink *outlink)
|
||||
var_values[VAR_DAR] = var_values[VAR_A] = (double) inlink->w / inlink->h;
|
||||
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
|
||||
(double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
|
||||
var_values[VAR_HSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
var_values[VAR_VSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
|
||||
var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
|
||||
|
||||
/* evaluate width and height */
|
||||
av_expr_parse_and_eval(&res, (expr = scale->w_expr),
|
||||
@ -220,12 +221,12 @@ static int config_props(AVFilterLink *outlink)
|
||||
|
||||
/* TODO: make algorithm configurable */
|
||||
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n",
|
||||
inlink ->w, inlink ->h, av_pix_fmt_descriptors[ inlink->format].name,
|
||||
outlink->w, outlink->h, av_pix_fmt_descriptors[outlink->format].name,
|
||||
inlink ->w, inlink ->h, av_get_pix_fmt_name(inlink->format),
|
||||
outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
|
||||
scale->flags);
|
||||
|
||||
scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL ||
|
||||
av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PSEUDOPAL;
|
||||
scale->input_is_pal = desc->flags & PIX_FMT_PAL ||
|
||||
desc->flags & PIX_FMT_PSEUDOPAL;
|
||||
|
||||
if (scale->sws)
|
||||
sws_freeContext(scale->sws);
|
||||
@ -261,6 +262,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||
ScaleContext *scale = link->dst->priv;
|
||||
AVFilterLink *outlink = link->dst->outputs[0];
|
||||
AVFilterBufferRef *outpicref, *for_next_filter;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
int ret = 0;
|
||||
|
||||
if (!scale->sws) {
|
||||
@ -270,8 +272,8 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||
return ff_start_frame(outlink, outpicref);
|
||||
}
|
||||
|
||||
scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
|
||||
scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
scale->hsub = desc->log2_chroma_w;
|
||||
scale->vsub = desc->log2_chroma_h;
|
||||
|
||||
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
||||
if (!outpicref)
|
||||
|
@ -46,8 +46,9 @@ static int end_frame(AVFilterLink *inlink)
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
ShowInfoContext *showinfo = ctx->priv;
|
||||
AVFilterBufferRef *picref = inlink->cur_buf;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
|
||||
uint32_t plane_checksum[4] = {0}, checksum = 0;
|
||||
int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
int i, plane, vsub = desc->log2_chroma_h;
|
||||
|
||||
for (plane = 0; picref->data[plane] && plane < 4; plane++) {
|
||||
size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
|
||||
@ -67,7 +68,7 @@ static int end_frame(AVFilterLink *inlink)
|
||||
"checksum:%u plane_checksum:[%u %u %u %u]\n",
|
||||
showinfo->frame,
|
||||
picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos,
|
||||
av_pix_fmt_descriptors[picref->format].name,
|
||||
desc->name,
|
||||
picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
|
||||
picref->video->w, picref->video->h,
|
||||
!picref->video->interlaced ? 'P' : /* Progressive */
|
||||
|
@ -54,8 +54,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
|
||||
static int config_props(AVFilterLink *link)
|
||||
{
|
||||
SliceContext *slice = link->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
|
||||
slice->vshift = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
slice->vshift = desc->log2_chroma_h;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -98,12 +98,13 @@ static int config_props_output(AVFilterLink *outlink)
|
||||
AVFilterContext *ctx = outlink->src;
|
||||
TransContext *trans = ctx->priv;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[outlink->format];
|
||||
const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
|
||||
const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
trans->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
|
||||
trans->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
|
||||
trans->hsub = desc_in->log2_chroma_w;
|
||||
trans->vsub = desc_in->log2_chroma_h;
|
||||
|
||||
av_image_fill_max_pixsteps(trans->pixsteps, NULL, pixdesc);
|
||||
av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out);
|
||||
|
||||
outlink->w = inlink->h;
|
||||
outlink->h = inlink->w;
|
||||
|
@ -187,9 +187,10 @@ static void init_filter_param(AVFilterContext *ctx, FilterParam *fp, const char
|
||||
static int config_props(AVFilterLink *link)
|
||||
{
|
||||
UnsharpContext *unsharp = link->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
|
||||
unsharp->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
|
||||
unsharp->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
unsharp->hsub = desc->log2_chroma_w;
|
||||
unsharp->vsub = desc->log2_chroma_h;
|
||||
|
||||
init_filter_param(link->dst, &unsharp->luma, "luma", link->w);
|
||||
init_filter_param(link->dst, &unsharp->chroma, "chroma", SHIFTUP(link->w, unsharp->hsub));
|
||||
|
@ -36,8 +36,9 @@ typedef struct {
|
||||
static int config_input(AVFilterLink *link)
|
||||
{
|
||||
FlipContext *flip = link->dst->priv;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
|
||||
flip->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
flip->vsub = desc->log2_chroma_h;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ static int return_frame(AVFilterContext *ctx, int is_second)
|
||||
}
|
||||
|
||||
if (!yadif->csp)
|
||||
yadif->csp = &av_pix_fmt_descriptors[link->format];
|
||||
yadif->csp = av_pix_fmt_desc_get(link->format);
|
||||
if (yadif->csp->comp[0].depth_minus1 / 8 == 1)
|
||||
yadif->filter_line = filter_line_c_16bit;
|
||||
|
||||
|
@ -339,7 +339,8 @@ int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
|
||||
|
||||
/* copy the slice if needed for permission reasons */
|
||||
if (link->src_buf) {
|
||||
vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
|
||||
vsub = desc->log2_chroma_h;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (link->src_buf->data[i]) {
|
||||
|
@ -118,7 +118,7 @@ static int color_config_props(AVFilterLink *inlink)
|
||||
ColorContext *color = ctx->priv;
|
||||
uint8_t rgba_color[4];
|
||||
int is_packed_rgba;
|
||||
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];
|
||||
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
|
||||
|
||||
color->hsub = pix_desc->log2_chroma_w;
|
||||
color->vsub = pix_desc->log2_chroma_h;
|
||||
|
Loading…
Reference in New Issue
Block a user