mirror of
https://gitee.com/openharmony/third_party_ffmpeg
synced 2024-11-27 13:10:37 +00:00
lavfi: add frame counter into AVFilterLink and use it in filters.
This commit is contained in:
parent
f4596e8bb6
commit
b8a5c76131
@ -915,6 +915,7 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
|
||||
|
||||
pts = out->pts;
|
||||
ret = filter_frame(link, out);
|
||||
link->frame_count++;
|
||||
link->frame_requested = 0;
|
||||
ff_update_link_current_pts(link, pts);
|
||||
return ret;
|
||||
|
@ -718,6 +718,11 @@ struct AVFilterLink {
|
||||
* Link processing flags.
|
||||
*/
|
||||
unsigned flags;
|
||||
|
||||
/**
|
||||
* Number of past frames sent through the link.
|
||||
*/
|
||||
int64_t frame_count;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -283,6 +283,7 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
|
||||
if (isnan(select->var_values[VAR_START_T]))
|
||||
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
|
||||
|
||||
select->var_values[VAR_N ] = inlink->frame_count;
|
||||
select->var_values[VAR_PTS] = TS2D(frame->pts);
|
||||
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
|
||||
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
|
||||
@ -352,7 +353,6 @@ static void select_frame(AVFilterContext *ctx, AVFrame *frame)
|
||||
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
|
||||
}
|
||||
|
||||
select->var_values[VAR_N] += 1.0;
|
||||
select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
|
||||
select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
|
||||
}
|
||||
|
@ -43,7 +43,6 @@ typedef struct {
|
||||
double pixel_black_th;
|
||||
unsigned int pixel_black_th_i;
|
||||
|
||||
unsigned int frame_count; ///< frame number
|
||||
unsigned int nb_black_pixels; ///< number of black pixels counted so far
|
||||
} BlackDetectContext;
|
||||
|
||||
@ -149,8 +148,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
|
||||
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
|
||||
|
||||
av_log(ctx, AV_LOG_DEBUG,
|
||||
"frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n",
|
||||
blackdetect->frame_count, picture_black_ratio,
|
||||
"frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
|
||||
inlink->frame_count, picture_black_ratio,
|
||||
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
|
||||
av_get_picture_type_char(picref->pict_type));
|
||||
|
||||
@ -168,7 +167,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
|
||||
}
|
||||
|
||||
blackdetect->last_picref_pts = picref->pts;
|
||||
blackdetect->frame_count++;
|
||||
blackdetect->nb_black_pixels = 0;
|
||||
return ff_filter_frame(inlink->dst->outputs[0], picref);
|
||||
}
|
||||
|
@ -81,7 +81,6 @@ typedef struct {
|
||||
struct FFBufQueue queue_bottom;
|
||||
int hsub, vsub; ///< chroma subsampling values
|
||||
int frame_requested;
|
||||
int framenum;
|
||||
char *all_expr;
|
||||
enum BlendMode all_mode;
|
||||
double all_opacity;
|
||||
@ -382,7 +381,7 @@ static void blend_frame(AVFilterContext *ctx,
|
||||
uint8_t *bottom = bottom_buf->data[plane];
|
||||
|
||||
param = &b->params[plane];
|
||||
param->values[VAR_N] = b->framenum++;
|
||||
param->values[VAR_N] = inlink->frame_count;
|
||||
param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
|
||||
param->values[VAR_W] = outw;
|
||||
param->values[VAR_H] = outh;
|
||||
|
@ -259,6 +259,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
|
||||
frame->width = crop->w;
|
||||
frame->height = crop->h;
|
||||
|
||||
crop->var_values[VAR_N] = link->frame_count;
|
||||
crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
|
||||
NAN : frame->pts * av_q2d(link->time_base);
|
||||
crop->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
|
||||
@ -299,8 +300,6 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
|
||||
frame->data[3] += crop->x * crop->max_step[3];
|
||||
}
|
||||
|
||||
crop->var_values[VAR_N] += 1.0;
|
||||
|
||||
return ff_filter_frame(link->dst->outputs[0], frame);
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,6 @@ typedef struct {
|
||||
int fid; ///< current frame id in the queue
|
||||
int filled; ///< 1 if the queue is filled, 0 otherwise
|
||||
AVFrame *last; ///< last frame from the previous queue
|
||||
int64_t frame_count; ///< output frame counter
|
||||
AVFrame **clean_src; ///< frame queue for the clean source
|
||||
int got_frame[2]; ///< frame request flag for each input stream
|
||||
double ts_unit; ///< timestamp units for the output frames
|
||||
@ -215,7 +214,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
av_frame_free(&frame);
|
||||
frame = dm->clean_src[i];
|
||||
}
|
||||
frame->pts = dm->frame_count++ * dm->ts_unit;
|
||||
frame->pts = outlink->frame_count * dm->ts_unit;
|
||||
ret = ff_filter_frame(outlink, frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
@ -164,7 +164,6 @@ typedef struct {
|
||||
AVRational tc_rate; ///< frame rate for timecode
|
||||
AVTimecode tc; ///< timecode context
|
||||
int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
|
||||
int frame_id;
|
||||
int reload; ///< reload text file for each frame
|
||||
} DrawTextContext;
|
||||
|
||||
@ -820,6 +819,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
|
||||
int width, int height)
|
||||
{
|
||||
DrawTextContext *dtext = ctx->priv;
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
uint32_t code = 0, prev_code = 0;
|
||||
int x = 0, y = 0, i = 0, ret;
|
||||
int max_text_line_w = 0, len;
|
||||
@ -857,7 +857,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
|
||||
|
||||
if (dtext->tc_opt_string) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_string(&dtext->tc, tcbuf, dtext->frame_id++);
|
||||
av_timecode_make_string(&dtext->tc, tcbuf, inlink->frame_count);
|
||||
av_bprint_clear(bp);
|
||||
av_bprintf(bp, "%s%s", dtext->text, tcbuf);
|
||||
}
|
||||
@ -983,6 +983,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
if ((ret = load_textfile(ctx)) < 0)
|
||||
return ret;
|
||||
|
||||
dtext->var_values[VAR_N] = inlink->frame_count;
|
||||
dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
|
||||
NAN : frame->pts * av_q2d(inlink->time_base);
|
||||
|
||||
@ -993,8 +994,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
(int)dtext->var_values[VAR_TEXT_W], (int)dtext->var_values[VAR_TEXT_H],
|
||||
dtext->x, dtext->y);
|
||||
|
||||
dtext->var_values[VAR_N] += 1.0;
|
||||
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,6 @@ typedef struct {
|
||||
|
||||
AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
|
||||
AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
|
||||
int64_t frame_count; ///< output frame counter
|
||||
int got_frame[2]; ///< frame request flag for each input stream
|
||||
int hsub, vsub; ///< chroma subsampling values
|
||||
uint32_t eof; ///< bitmask for end of stream
|
||||
@ -738,7 +737,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
|
||||
/* scene change check */
|
||||
if (fm->combmatch == COMBMATCH_SC) {
|
||||
if (fm->lastn == fm->frame_count - 1) {
|
||||
if (fm->lastn == outlink->frame_count - 1) {
|
||||
if (fm->lastscdiff > fm->scthresh)
|
||||
sc = 1;
|
||||
} else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
|
||||
@ -746,7 +745,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
}
|
||||
|
||||
if (!sc) {
|
||||
fm->lastn = fm->frame_count;
|
||||
fm->lastn = outlink->frame_count;
|
||||
fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
|
||||
sc = fm->lastscdiff > fm->scthresh;
|
||||
}
|
||||
@ -805,10 +804,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
dst->interlaced_frame = combs[match] >= fm->combpel;
|
||||
if (dst->interlaced_frame) {
|
||||
av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
|
||||
fm->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
|
||||
outlink->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
|
||||
dst->top_field_first = field;
|
||||
}
|
||||
fm->frame_count++;
|
||||
|
||||
av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
|
||||
" match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int frame_step, frame_count;
|
||||
int frame_step;
|
||||
} FrameStepContext;
|
||||
|
||||
#define OFFSET(x) offsetof(FrameStepContext, x)
|
||||
@ -64,7 +64,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
|
||||
{
|
||||
FrameStepContext *framestep = inlink->dst->priv;
|
||||
|
||||
if (!(framestep->frame_count++ % framestep->frame_step)) {
|
||||
if (!(inlink->frame_count % framestep->frame_step)) {
|
||||
return ff_filter_frame(inlink->dst->outputs[0], ref);
|
||||
} else {
|
||||
av_frame_free(&ref);
|
||||
|
@ -36,7 +36,6 @@ typedef struct {
|
||||
const AVClass *class;
|
||||
AVExpr *e[4]; ///< expressions for each plane
|
||||
char *expr_str[4]; ///< expression strings for each plane
|
||||
int framenum; ///< frame counter
|
||||
AVFrame *picref; ///< current input buffer
|
||||
int hsub, vsub; ///< chroma subsampling
|
||||
int planes; ///< number of planes
|
||||
@ -163,7 +162,7 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||
AVFrame *out;
|
||||
double values[VAR_VARS_NB] = {
|
||||
[VAR_N] = geq->framenum++,
|
||||
[VAR_N] = inlink->frame_count,
|
||||
[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
|
||||
};
|
||||
|
||||
|
@ -252,6 +252,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
|
||||
av_frame_copy_props(outpic, inpic);
|
||||
}
|
||||
|
||||
hue->var_values[VAR_N] = inlink->frame_count;
|
||||
hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
|
||||
hue->var_values[VAR_PTS] = TS2D(inpic->pts);
|
||||
|
||||
@ -281,8 +282,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
|
||||
|
||||
compute_sin_and_cos(hue);
|
||||
|
||||
hue->var_values[VAR_N] += 1;
|
||||
|
||||
if (!direct) {
|
||||
av_image_copy_plane(outpic->data[0], outpic->linesize[0],
|
||||
inpic->data[0], inpic->linesize[0],
|
||||
|
@ -600,6 +600,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
|
||||
if (over->eval_mode == EVAL_MODE_FRAME) {
|
||||
int64_t pos = av_frame_get_pkt_pos(mainpic);
|
||||
|
||||
over->var_values[VAR_N] = inlink->frame_count;
|
||||
over->var_values[VAR_T] = mainpic->pts == AV_NOPTS_VALUE ?
|
||||
NAN : mainpic->pts * av_q2d(inlink->time_base);
|
||||
over->var_values[VAR_POS] = pos == -1 ? NAN : pos;
|
||||
@ -614,7 +615,6 @@ static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
|
||||
if (over->enable)
|
||||
blend_image(ctx, mainpic, over->overpicref, over->x, over->y);
|
||||
|
||||
over->var_values[VAR_N] += 1.0;
|
||||
}
|
||||
ret = ff_filter_frame(ctx->outputs[0], mainpic);
|
||||
av_assert1(ret != AVERROR(EAGAIN));
|
||||
|
@ -24,7 +24,6 @@
|
||||
|
||||
typedef struct {
|
||||
int nb_planes;
|
||||
int64_t frame_count;
|
||||
double ts_unit;
|
||||
} SeparateFieldsContext;
|
||||
|
||||
@ -76,12 +75,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
|
||||
second->linesize[i] *= 2;
|
||||
}
|
||||
|
||||
inpicref->pts = sf->frame_count++ * sf->ts_unit;
|
||||
second->pts = sf->frame_count++ * sf->ts_unit;
|
||||
|
||||
inpicref->pts = outlink->frame_count * sf->ts_unit;
|
||||
ret = ff_filter_frame(outlink, inpicref);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
second->pts = outlink->frame_count * sf->ts_unit;
|
||||
return ff_filter_frame(outlink, second);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ typedef struct {
|
||||
double ts_unit;
|
||||
int out_cnt;
|
||||
int occupied;
|
||||
int64_t frame_count;
|
||||
|
||||
int nb_planes;
|
||||
int planeheight[4];
|
||||
@ -233,7 +232,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
|
||||
}
|
||||
|
||||
av_frame_copy_props(frame, inpicref);
|
||||
frame->pts = tc->frame_count++ * tc->ts_unit;
|
||||
frame->pts = outlink->frame_count * tc->ts_unit;
|
||||
ret = ff_filter_frame(outlink, frame);
|
||||
}
|
||||
av_frame_free(&inpicref);
|
||||
|
Loading…
Reference in New Issue
Block a user