avfilter: added motion estimation and interpolation filters

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
This commit is contained in:
Davinder Singh 2016-08-23 17:50:35 +05:30 committed by Michael Niedermayer
parent bcd1153162
commit b07d4a0fb2
8 changed files with 2276 additions and 1 deletions

View File

@ -9434,6 +9434,134 @@ format=rgb24,mergeplanes=0x000102:yuv444p
@end example @end example
@end itemize @end itemize
@section mestimate
Estimate and export motion vectors using block matching algorithms.
Motion vectors are stored in frame side data to be used by other filters.
This filter accepts the following options:
@table @option
@item method
Specify the motion estimation method. Accepts one of the following values:
@table @samp
@item esa
Exhaustive search algorithm.
@item tss
Three step search algorithm.
@item tdls
Two dimensional logarithmic search algorithm.
@item ntss
New three step search algorithm.
@item fss
Four step search algorithm.
@item ds
Diamond search algorithm.
@item hexbs
Hexagon-based search algorithm.
@item epzs
Enhanced predictive zonal search algorithm.
@item umh
Uneven multi-hexagon search algorithm.
@end table
Default value is @samp{esa}.
@item mb_size
Macroblock size. Default @code{16}.
@item search_param
Search parameter. Default @code{7}.
@end table
@section minterpolate
Convert the video to specified frame rate using motion interpolation.
This filter accepts the following options:
@table @option
@item fps
Specify the output frame rate. This can be rational e.g. @code{60000/1001}. Frames are dropped if @var{fps} is lower than source fps. Default @code{60}.
@item mi_mode
Motion interpolation mode. Following values are accepted:
@table @samp
@item dup
Duplicate previous or next frame for interpolating new ones.
@item blend
Blend source frames. Interpolated frame is mean of previous and next frames.
@item mci
Motion compensated interpolation. Following options are effective when this mode is selected:
@table @samp
@item mc_mode
Motion compensation mode. Following values are accepted:
@table @samp
@item obmc
Overlapped block motion compensation.
@item aobmc
Adaptive overlapped block motion compensation. Window weighting coefficients are controlled adaptively according to the reliabilities of the neighboring motion vectors to reduce oversmoothing.
@end table
Default mode is @samp{obmc}.
@item me_mode
Motion estimation mode. Following values are accepted:
@table @samp
@item bidir
Bidirectional motion estimation. Motion vectors are estimated for each source frame in both forward and backward directions.
@item bilat
Bilateral motion estimation. Motion vectors are estimated directly for interpolated frame.
@end table
Default mode is @samp{bilat}.
@item me
The algorithm to be used for motion estimation. Following values are accepted:
@table @samp
@item esa
Exhaustive search algorithm.
@item tss
Three step search algorithm.
@item tdls
Two dimensional logarithmic search algorithm.
@item ntss
New three step search algorithm.
@item fss
Four step search algorithm.
@item ds
Diamond search algorithm.
@item hexbs
Hexagon-based search algorithm.
@item epzs
Enhanced predictive zonal search algorithm.
@item umh
Uneven multi-hexagon search algorithm.
@end table
Default algorithm is @samp{epzs}.
@item mb_size
Macroblock size. Default @code{16}.
@item search_param
Motion estimation search parameter. Default @code{32}.
@item vsmbc
Enable variable-size block motion compensation. Motion estimation is applied with smaller block sizes at object boundaries in order to make the them less blur. Default is @code{0} (disabled).
@end table
@end table
@item scd
Scene change detection method. Scene change leads motion vectors to be in random direction. Scene change detection replace interpolated frames by duplicate ones. May not be needed for other modes. Following values are accepted:
@table @samp
@item none
Disable scene change detection.
@item fdiff
Frame difference. Corresponding pixel values are compared and if it statisfies @var{scd_threshold} scene change is detected.
@end table
Default method is @samp{fdiff}.
@item scd_threshold
Scene change detection threshold. Default is @code{5.0}.
@end table
@section mpdecimate @section mpdecimate
Drop frames that do not differ greatly from the previous frame in Drop frames that do not differ greatly from the previous frame in

View File

@ -213,7 +213,9 @@ OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o
OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation.o
OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o
OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_NNEDI_FILTER) += vf_nnedi.o OBJS-$(CONFIG_NNEDI_FILTER) += vf_nnedi.o

View File

@ -230,7 +230,9 @@ void avfilter_register_all(void)
REGISTER_FILTER(MASKEDMERGE, maskedmerge, vf); REGISTER_FILTER(MASKEDMERGE, maskedmerge, vf);
REGISTER_FILTER(MCDEINT, mcdeint, vf); REGISTER_FILTER(MCDEINT, mcdeint, vf);
REGISTER_FILTER(MERGEPLANES, mergeplanes, vf); REGISTER_FILTER(MERGEPLANES, mergeplanes, vf);
REGISTER_FILTER(MESTIMATE, mestimate, vf);
REGISTER_FILTER(METADATA, metadata, vf); REGISTER_FILTER(METADATA, metadata, vf);
REGISTER_FILTER(MINTERPOLATE, minterpolate, vf);
REGISTER_FILTER(MPDECIMATE, mpdecimate, vf); REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf); REGISTER_FILTER(NEGATE, negate, vf);
REGISTER_FILTER(NNEDI, nnedi, vf); REGISTER_FILTER(NNEDI, nnedi, vf);

View File

@ -0,0 +1,432 @@
/**
* Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "motion_estimation.h"
static const int8_t sqr1[8][2] = {{ 0,-1}, { 0, 1}, {-1, 0}, { 1, 0}, {-1,-1}, {-1, 1}, { 1,-1}, { 1, 1}};
static const int8_t dia1[4][2] = {{-1, 0}, { 0,-1}, { 1, 0}, { 0, 1}};
static const int8_t dia2[8][2] = {{-2, 0}, {-1,-1}, { 0,-2}, { 1,-1}, { 2, 0}, { 1, 1}, { 0, 2}, {-1, 1}};
static const int8_t hex2[6][2] = {{-2, 0}, {-1,-2}, {-1, 2}, { 1,-2}, { 1, 2}, { 2, 0}};
static const int8_t hex4[16][2] = {{-4,-2}, {-4,-1}, {-4, 0}, {-4, 1}, {-4, 2},
{ 4,-2}, { 4,-1}, { 4, 0}, { 4, 1}, { 4, 2},
{-2, 3}, { 0, 4}, { 2, 3}, {-2,-3}, { 0,-4}, { 2,-3}};
#define COST_MV(x, y)\
do {\
cost = me_ctx->get_cost(me_ctx, x_mb, y_mb, x, y);\
if (cost < cost_min) {\
cost_min = cost;\
mv[0] = x;\
mv[1] = y;\
}\
} while(0)
#define COST_P_MV(x, y)\
if (x >= x_min && x <= x_max && y >= y_min && y <= y_max)\
COST_MV(x, y);
void ff_me_init_context(AVMotionEstContext *me_ctx, int mb_size, int search_param,
int width, int height, int x_min, int x_max, int y_min, int y_max)
{
me_ctx->width = width;
me_ctx->height = height;
me_ctx->mb_size = mb_size;
me_ctx->search_param = search_param;
me_ctx->get_cost = &ff_me_cmp_sad;
me_ctx->x_min = x_min;
me_ctx->x_max = x_max;
me_ctx->y_min = y_min;
me_ctx->y_max = y_max;
}
uint64_t ff_me_cmp_sad(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int x_mv, int y_mv)
{
const int linesize = me_ctx->linesize;
uint8_t *data_ref = me_ctx->data_ref;
uint8_t *data_cur = me_ctx->data_cur;
uint64_t sad = 0;
int i, j;
data_ref += y_mv * linesize;
data_cur += y_mb * linesize;
for (j = 0; j < me_ctx->mb_size; j++)
for (i = 0; i < me_ctx->mb_size; i++)
sad += FFABS(data_ref[x_mv + i + j * linesize] - data_cur[x_mb + i + j * linesize]);
return sad;
}
uint64_t ff_me_search_esa(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
for (y = y_min; y <= y_max; y++)
for (x = x_min; x <= x_max; x++)
COST_MV(x, y);
return cost_min;
}
uint64_t ff_me_search_tss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int step = ROUNDED_DIV(me_ctx->search_param, 2);
int i;
mv[0] = x_mb;
mv[1] = y_mb;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 8; i++)
COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
step = step >> 1;
} while (step > 0);
return cost_min;
}
uint64_t ff_me_search_tdls(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int step = ROUNDED_DIV(me_ctx->search_param, 2);
int i;
mv[0] = x_mb;
mv[1] = y_mb;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 4; i++)
COST_P_MV(x + dia1[i][0] * step, y + dia1[i][1] * step);
if (x == mv[0] && y == mv[1])
step = step >> 1;
} while (step > 0);
return cost_min;
}
uint64_t ff_me_search_ntss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int step = ROUNDED_DIV(me_ctx->search_param, 2);
int first_step = 1;
int i;
mv[0] = x_mb;
mv[1] = y_mb;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 8; i++)
COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
/* addition to TSS in NTSS */
if (first_step) {
for (i = 0; i < 8; i++)
COST_P_MV(x + sqr1[i][0], y + sqr1[i][1]);
if (x == mv[0] && y == mv[1])
return cost_min;
if (FFABS(x - mv[0]) <= 1 && FFABS(y - mv[1]) <= 1) {
x = mv[0];
y = mv[1];
for (i = 0; i < 8; i++)
COST_P_MV(x + sqr1[i][0], y + sqr1[i][1]);
return cost_min;
}
first_step = 0;
}
step = step >> 1;
} while (step > 0);
return cost_min;
}
uint64_t ff_me_search_fss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int step = 2;
int i;
mv[0] = x_mb;
mv[1] = y_mb;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 8; i++)
COST_P_MV(x + sqr1[i][0] * step, y + sqr1[i][1] * step);
if (x == mv[0] && y == mv[1])
step = step >> 1;
} while (step > 0);
return cost_min;
}
uint64_t ff_me_search_ds(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int i;
int dir_x, dir_y;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
x = x_mb; y = y_mb;
dir_x = dir_y = 0;
do {
x = mv[0];
y = mv[1];
#if 1
for (i = 0; i < 8; i++)
COST_P_MV(x + dia2[i][0], y + dia2[i][1]);
#else
/* this version skips previously examined 3 or 5 locations based on prev origin */
if (dir_x <= 0)
COST_P_MV(x - 2, y);
if (dir_x <= 0 && dir_y <= 0)
COST_P_MV(x - 1, y - 1);
if (dir_y <= 0)
COST_P_MV(x, y - 2);
if (dir_x >= 0 && dir_y <= 0)
COST_P_MV(x + 1, y - 1);
if (dir_x >= 0)
COST_P_MV(x + 2, y);
if (dir_x >= 0 && dir_y >= 0)
COST_P_MV(x + 1, y + 1);
if (dir_y >= 0)
COST_P_MV(x, y + 2);
if (dir_x <= 0 && dir_y >= 0)
COST_P_MV(x - 1, y + 1);
dir_x = mv[0] - x;
dir_y = mv[1] - y;
#endif
} while (x != mv[0] || y != mv[1]);
for (i = 0; i < 4; i++)
COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
return cost_min;
}
uint64_t ff_me_search_hexbs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int i;
if (!(cost_min = me_ctx->get_cost(me_ctx, x_mb, y_mb, x_mb, y_mb)))
return cost_min;
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 6; i++)
COST_P_MV(x + hex2[i][0], y + hex2[i][1]);
} while (x != mv[0] || y != mv[1]);
for (i = 0; i < 4; i++)
COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
return cost_min;
}
/* two subsets of predictors are used
me->pred_x|y is set to median of current frame's left, top, top-right
set 1: me->preds[0] has: (0, 0), left, top, top-right, collocated block in prev frame
set 2: me->preds[1] has: accelerator mv, top, left, right, bottom adj mb of prev frame
*/
uint64_t ff_me_search_epzs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int i;
AVMotionEstPredictor *preds = me_ctx->preds;
cost_min = UINT64_MAX;
COST_P_MV(x_mb + me_ctx->pred_x, y_mb + me_ctx->pred_y);
for (i = 0; i < preds[0].nb; i++)
COST_P_MV(x_mb + preds[0].mvs[i][0], y_mb + preds[0].mvs[i][1]);
for (i = 0; i < preds[1].nb; i++)
COST_P_MV(x_mb + preds[1].mvs[i][0], y_mb + preds[1].mvs[i][1]);
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 4; i++)
COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
} while (x != mv[0] || y != mv[1]);
return cost_min;
}
/* required predictor order: median, (0,0), left, top, top-right
rules when mb not available:
replace left with (0, 0)
replace top-right with top-left
replace top two with left
repeated can be skipped, if no predictors are used, set me_ctx->pred to (0,0)
*/
uint64_t ff_me_search_umh(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv)
{
int x, y;
int x_min = FFMAX(me_ctx->x_min, x_mb - me_ctx->search_param);
int y_min = FFMAX(me_ctx->y_min, y_mb - me_ctx->search_param);
int x_max = FFMIN(x_mb + me_ctx->search_param, me_ctx->x_max);
int y_max = FFMIN(y_mb + me_ctx->search_param, me_ctx->y_max);
uint64_t cost, cost_min;
int d, i;
int end_x, end_y;
AVMotionEstPredictor *pred = &me_ctx->preds[0];
cost_min = UINT64_MAX;
COST_P_MV(x_mb + me_ctx->pred_x, y_mb + me_ctx->pred_y);
for (i = 0; i < pred->nb; i++)
COST_P_MV(x_mb + pred->mvs[i][0], y_mb + pred->mvs[i][1]);
// Unsymmetrical-cross Search
x = mv[0];
y = mv[1];
for (d = 1; d <= me_ctx->search_param; d += 2) {
COST_P_MV(x - d, y);
COST_P_MV(x + d, y);
if (d <= me_ctx->search_param / 2) {
COST_P_MV(x, y - d);
COST_P_MV(x, y + d);
}
}
// Uneven Multi-Hexagon-Grid Search
end_x = FFMIN(mv[0] + 2, x_max);
end_y = FFMIN(mv[1] + 2, y_max);
for (y = FFMAX(y_min, mv[1] - 2); y <= end_y; y++)
for (x = FFMAX(x_min, mv[0] - 2); x <= end_x; x++)
COST_P_MV(x, y);
x = mv[0];
y = mv[1];
for (d = 1; d <= me_ctx->search_param / 4; d++)
for (i = 1; i < 16; i++)
COST_P_MV(x + hex4[i][0] * d, y + hex4[i][1] * d);
// Extended Hexagon-based Search
do {
x = mv[0];
y = mv[1];
for (i = 0; i < 6; i++)
COST_P_MV(x + hex2[i][0], y + hex2[i][1]);
} while (x != mv[0] || y != mv[1]);
for (i = 0; i < 4; i++)
COST_P_MV(x + dia1[i][0], y + dia1[i][1]);
return cost_min;
}

View File

@ -0,0 +1,87 @@
/**
* Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_MOTION_ESTIMATION_H
#define AVFILTER_MOTION_ESTIMATION_H
#include "libavutil/avutil.h"
#define AV_ME_METHOD_ESA 1
#define AV_ME_METHOD_TSS 2
#define AV_ME_METHOD_TDLS 3
#define AV_ME_METHOD_NTSS 4
#define AV_ME_METHOD_FSS 5
#define AV_ME_METHOD_DS 6
#define AV_ME_METHOD_HEXBS 7
#define AV_ME_METHOD_EPZS 8
#define AV_ME_METHOD_UMH 9
typedef struct AVMotionEstPredictor {
int mvs[10][2];
int nb;
} AVMotionEstPredictor;
typedef struct AVMotionEstContext {
uint8_t *data_cur, *data_ref;
int linesize;
int mb_size;
int search_param;
int width;
int height;
int x_min;
int x_max;
int y_min;
int y_max;
int pred_x; ///< median predictor x
int pred_y; ///< median predictor y
AVMotionEstPredictor preds[2];
uint64_t (*get_cost)(struct AVMotionEstContext *me_ctx, int x_mb, int y_mb,
int mv_x, int mv_y);
} AVMotionEstContext;
void ff_me_init_context(AVMotionEstContext *me_ctx, int mb_size, int search_param,
int width, int height, int x_min, int x_max, int y_min, int y_max);
uint64_t ff_me_cmp_sad(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int x_mv, int y_mv);
uint64_t ff_me_search_esa(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_tss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_tdls(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_ntss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_fss(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_ds(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_hexbs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_epzs(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
uint64_t ff_me_search_umh(AVMotionEstContext *me_ctx, int x_mb, int y_mb, int *mv);
#endif /* AVFILTER_MOTION_ESTIMATION_H */

View File

@ -30,7 +30,7 @@
#include "libavutil/version.h" #include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 6 #define LIBAVFILTER_VERSION_MAJOR 6
#define LIBAVFILTER_VERSION_MINOR 55 #define LIBAVFILTER_VERSION_MINOR 56
#define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \

377
libavfilter/vf_mestimate.c Normal file
View File

@ -0,0 +1,377 @@
/**
* Copyright (c) 2016 Davinder Singh (DSM_) <ds.mudhar<@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "motion_estimation.h"
#include "libavcodec/mathops.h"
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/motion_vector.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct MEContext {
const AVClass *class;
AVMotionEstContext me_ctx;
int method; ///< motion estimation method
int mb_size; ///< macroblock size
int search_param; ///< search parameter
int b_width, b_height, b_count;
int log2_mb_size;
AVFrame *prev, *cur, *next;
int (*mv_table[3])[2][2]; ///< motion vectors of current & prev 2 frames
} MEContext;
#define OFFSET(x) offsetof(MEContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, 0, 0, FLAGS, unit }
static const AVOption mestimate_options[] = {
{ "method", "motion estimation method", OFFSET(method), AV_OPT_TYPE_INT, {.i64 = AV_ME_METHOD_ESA}, AV_ME_METHOD_ESA, AV_ME_METHOD_UMH, FLAGS, "method" },
CONST("esa", "exhaustive search", AV_ME_METHOD_ESA, "method"),
CONST("tss", "three step search", AV_ME_METHOD_TSS, "method"),
CONST("tdls", "two dimensional logarithmic search", AV_ME_METHOD_TDLS, "method"),
CONST("ntss", "new three step search", AV_ME_METHOD_NTSS, "method"),
CONST("fss", "four step search", AV_ME_METHOD_FSS, "method"),
CONST("ds", "diamond search", AV_ME_METHOD_DS, "method"),
CONST("hexbs", "hexagon-based search", AV_ME_METHOD_HEXBS, "method"),
CONST("epzs", "enhanced predictive zonal search", AV_ME_METHOD_EPZS, "method"),
CONST("umh", "uneven multi-hexagon search", AV_ME_METHOD_UMH, "method"),
{ "mb_size", "macroblock size", OFFSET(mb_size), AV_OPT_TYPE_INT, {.i64 = 16}, 8, INT_MAX, FLAGS },
{ "search_param", "search parameter", OFFSET(search_param), AV_OPT_TYPE_INT, {.i64 = 7}, 4, INT_MAX, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(mestimate);
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE
};
AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
if (!fmts_list)
return AVERROR(ENOMEM);
return ff_set_common_formats(ctx, fmts_list);
}
static int config_input(AVFilterLink *inlink)
{
MEContext *s = inlink->dst->priv;
int i;
s->log2_mb_size = av_ceil_log2_c(s->mb_size);
s->mb_size = 1 << s->log2_mb_size;
s->b_width = inlink->w >> s->log2_mb_size;
s->b_height = inlink->h >> s->log2_mb_size;
s->b_count = s->b_width * s->b_height;
for (i = 0; i < 3; i++) {
s->mv_table[i] = av_mallocz_array(s->b_count, sizeof(*s->mv_table[0]));
if (!s->mv_table[i])
return AVERROR(ENOMEM);
}
ff_me_init_context(&s->me_ctx, s->mb_size, s->search_param, inlink->w, inlink->h, 0, (s->b_width - 1) << s->log2_mb_size, 0, (s->b_height - 1) << s->log2_mb_size);
return 0;
}
static void add_mv_data(AVMotionVector *mv, int mb_size,
int x, int y, int x_mv, int y_mv, int dir)
{
mv->w = mb_size;
mv->h = mb_size;
mv->dst_x = x + (mb_size >> 1);
mv->dst_y = y + (mb_size >> 1);
mv->src_x = x_mv + (mb_size >> 1);
mv->src_y = y_mv + (mb_size >> 1);
mv->source = dir ? 1 : -1;
mv->flags = 0;
}
#define SEARCH_MV(method)\
do {\
for (mb_y = 0; mb_y < s->b_height; mb_y++)\
for (mb_x = 0; mb_x < s->b_width; mb_x++) {\
const int x_mb = mb_x << s->log2_mb_size;\
const int y_mb = mb_y << s->log2_mb_size;\
int mv[2] = {x_mb, y_mb};\
ff_me_search_##method(me_ctx, x_mb, y_mb, mv);\
add_mv_data(((AVMotionVector *) sd->data) + mv_count++, me_ctx->mb_size, x_mb, y_mb, mv[0], mv[1], dir);\
}\
} while (0)
#define ADD_PRED(preds, px, py)\
do {\
preds.mvs[preds.nb][0] = px;\
preds.mvs[preds.nb][1] = py;\
preds.nb++;\
} while(0)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
MEContext *s = ctx->priv;
AVMotionEstContext *me_ctx = &s->me_ctx;
AVFrameSideData *sd;
AVFrame *out;
int mb_x, mb_y, dir;
int32_t mv_count = 0;
int ret;
if (frame->pts == AV_NOPTS_VALUE) {
ret = ff_filter_frame(ctx->outputs[0], frame);
return ret;
}
av_frame_free(&s->prev);
s->prev = s->cur;
s->cur = s->next;
s->next = frame;
s->mv_table[2] = memcpy(s->mv_table[2], s->mv_table[1], sizeof(*s->mv_table[1]) * s->b_count);
s->mv_table[1] = memcpy(s->mv_table[1], s->mv_table[0], sizeof(*s->mv_table[0]) * s->b_count);
if (!s->cur) {
s->cur = av_frame_clone(frame);
if (!s->cur)
return AVERROR(ENOMEM);
}
if (!s->prev)
return 0;
out = av_frame_clone(s->cur);
if (!out)
return AVERROR(ENOMEM);
sd = av_frame_new_side_data(out, AV_FRAME_DATA_MOTION_VECTORS, 2 * s->b_count * sizeof(AVMotionVector));
if (!sd) {
av_frame_free(&out);
return AVERROR(ENOMEM);
}
me_ctx->data_cur = s->cur->data[0];
me_ctx->linesize = s->cur->linesize[0];
for (dir = 0; dir < 2; dir++) {
me_ctx->data_ref = (dir ? s->next : s->prev)->data[0];
if (s->method == AV_ME_METHOD_DS)
SEARCH_MV(ds);
else if (s->method == AV_ME_METHOD_ESA)
SEARCH_MV(esa);
else if (s->method == AV_ME_METHOD_FSS)
SEARCH_MV(fss);
else if (s->method == AV_ME_METHOD_NTSS)
SEARCH_MV(ntss);
else if (s->method == AV_ME_METHOD_TDLS)
SEARCH_MV(tdls);
else if (s->method == AV_ME_METHOD_TSS)
SEARCH_MV(tss);
else if (s->method == AV_ME_METHOD_HEXBS)
SEARCH_MV(hexbs);
else if (s->method == AV_ME_METHOD_UMH) {
for (mb_y = 0; mb_y < s->b_height; mb_y++)
for (mb_x = 0; mb_x < s->b_width; mb_x++) {
const int mb_i = mb_x + mb_y * s->b_width;
const int x_mb = mb_x << s->log2_mb_size;
const int y_mb = mb_y << s->log2_mb_size;
int mv[2] = {x_mb, y_mb};
AVMotionEstPredictor *preds = me_ctx->preds;
preds[0].nb = 0;
ADD_PRED(preds[0], 0, 0);
//left mb in current frame
if (mb_x > 0)
ADD_PRED(preds[0], s->mv_table[0][mb_i - 1][dir][0], s->mv_table[0][mb_i - 1][dir][1]);
if (mb_y > 0) {
//top mb in current frame
ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width][dir][0], s->mv_table[0][mb_i - s->b_width][dir][1]);
//top-right mb in current frame
if (mb_x + 1 < s->b_width)
ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width + 1][dir][0], s->mv_table[0][mb_i - s->b_width + 1][dir][1]);
//top-left mb in current frame
else if (mb_x > 0)
ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width - 1][dir][0], s->mv_table[0][mb_i - s->b_width - 1][dir][1]);
}
//median predictor
if (preds[0].nb == 4) {
me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
} else if (preds[0].nb == 3) {
me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
} else if (preds[0].nb == 2) {
me_ctx->pred_x = preds[0].mvs[1][0];
me_ctx->pred_y = preds[0].mvs[1][1];
} else {
me_ctx->pred_x = 0;
me_ctx->pred_y = 0;
}
ff_me_search_umh(me_ctx, x_mb, y_mb, mv);
s->mv_table[0][mb_i][dir][0] = mv[0] - x_mb;
s->mv_table[0][mb_i][dir][1] = mv[1] - y_mb;
add_mv_data(((AVMotionVector *) sd->data) + mv_count++, me_ctx->mb_size, x_mb, y_mb, mv[0], mv[1], dir);
}
} else if (s->method == AV_ME_METHOD_EPZS) {
for (mb_y = 0; mb_y < s->b_height; mb_y++)
for (mb_x = 0; mb_x < s->b_width; mb_x++) {
const int mb_i = mb_x + mb_y * s->b_width;
const int x_mb = mb_x << s->log2_mb_size;
const int y_mb = mb_y << s->log2_mb_size;
int mv[2] = {x_mb, y_mb};
AVMotionEstPredictor *preds = me_ctx->preds;
preds[0].nb = 0;
preds[1].nb = 0;
ADD_PRED(preds[0], 0, 0);
//left mb in current frame
if (mb_x > 0)
ADD_PRED(preds[0], s->mv_table[0][mb_i - 1][dir][0], s->mv_table[0][mb_i - 1][dir][1]);
//top mb in current frame
if (mb_y > 0)
ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width][dir][0], s->mv_table[0][mb_i - s->b_width][dir][1]);
//top-right mb in current frame
if (mb_y > 0 && mb_x + 1 < s->b_width)
ADD_PRED(preds[0], s->mv_table[0][mb_i - s->b_width + 1][dir][0], s->mv_table[0][mb_i - s->b_width + 1][dir][1]);
//median predictor
if (preds[0].nb == 4) {
me_ctx->pred_x = mid_pred(preds[0].mvs[1][0], preds[0].mvs[2][0], preds[0].mvs[3][0]);
me_ctx->pred_y = mid_pred(preds[0].mvs[1][1], preds[0].mvs[2][1], preds[0].mvs[3][1]);
} else if (preds[0].nb == 3) {
me_ctx->pred_x = mid_pred(0, preds[0].mvs[1][0], preds[0].mvs[2][0]);
me_ctx->pred_y = mid_pred(0, preds[0].mvs[1][1], preds[0].mvs[2][1]);
} else if (preds[0].nb == 2) {
me_ctx->pred_x = preds[0].mvs[1][0];
me_ctx->pred_y = preds[0].mvs[1][1];
} else {
me_ctx->pred_x = 0;
me_ctx->pred_y = 0;
}
//collocated mb in prev frame
ADD_PRED(preds[0], s->mv_table[1][mb_i][dir][0], s->mv_table[1][mb_i][dir][1]);
//accelerator motion vector of collocated block in prev frame
ADD_PRED(preds[1], s->mv_table[1][mb_i][dir][0] + (s->mv_table[1][mb_i][dir][0] - s->mv_table[2][mb_i][dir][0]),
s->mv_table[1][mb_i][dir][1] + (s->mv_table[1][mb_i][dir][1] - s->mv_table[2][mb_i][dir][1]));
//left mb in prev frame
if (mb_x > 0)
ADD_PRED(preds[1], s->mv_table[1][mb_i - 1][dir][0], s->mv_table[1][mb_i - 1][dir][1]);
//top mb in prev frame
if (mb_y > 0)
ADD_PRED(preds[1], s->mv_table[1][mb_i - s->b_width][dir][0], s->mv_table[1][mb_i - s->b_width][dir][1]);
//right mb in prev frame
if (mb_x + 1 < s->b_width)
ADD_PRED(preds[1], s->mv_table[1][mb_i + 1][dir][0], s->mv_table[1][mb_i + 1][dir][1]);
//bottom mb in prev frame
if (mb_y + 1 < s->b_height)
ADD_PRED(preds[1], s->mv_table[1][mb_i + s->b_width][dir][0], s->mv_table[1][mb_i + s->b_width][dir][1]);
ff_me_search_epzs(me_ctx, x_mb, y_mb, mv);
s->mv_table[0][mb_i][dir][0] = mv[0] - x_mb;
s->mv_table[0][mb_i][dir][1] = mv[1] - y_mb;
add_mv_data(((AVMotionVector *) sd->data) + mv_count++, s->mb_size, x_mb, y_mb, mv[0], mv[1], dir);
}
}
}
return ff_filter_frame(ctx->outputs[0], out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
MEContext *s = ctx->priv;
int i;
av_frame_free(&s->prev);
av_frame_free(&s->cur);
av_frame_free(&s->next);
for (i = 0; i < 3; i++)
av_freep(&s->mv_table[i]);
}
static const AVFilterPad mestimate_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad mestimate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
AVFilter ff_vf_mestimate = {
.name = "mestimate",
.description = NULL_IF_CONFIG_SMALL("Generate motion vectors."),
.priv_size = sizeof(MEContext),
.priv_class = &mestimate_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = mestimate_inputs,
.outputs = mestimate_outputs,
};

File diff suppressed because it is too large Load Diff