lavfi: new interlace filter

Signed-off-by: Anton Khirnov <anton@khirnov.net>
This commit is contained in:
Vittorio Giovara 2013-03-12 11:35:21 +01:00 committed by Anton Khirnov
parent 921046008f
commit 3fce136798
6 changed files with 286 additions and 0 deletions

View File

@ -10,6 +10,7 @@ version 10:
- new avconv options -filter_script and -filter_complex_script, which allow a
filtergraph description to be read from a file
- uniform options syntax across all filters
- new interlace filter
version 9:

1
configure vendored
View File

@ -1881,6 +1881,7 @@ frei0r_filter_extralibs='$ldl'
frei0r_src_filter_deps="frei0r dlopen strtok_r"
frei0r_src_filter_extralibs='$ldl'
hqdn3d_filter_deps="gpl"
interlace_filter_deps="gpl"
resample_filter_deps="avresample"
ocv_filter_deps="libopencv"
scale_filter_deps="swscale"

View File

@ -1235,6 +1235,36 @@ a float number which specifies chroma temporal strength, defaults to
@var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial}
@end table
@section interlace
Simple interlacing filter from progressive contents. This interleaves upper (or
lower) lines from odd frames with lower (or upper) lines from even frames,
halving the frame rate and preserving image height.
@example
Original Original New Frame
Frame 'j' Frame 'j+1' (tff)
========== =========== ==================
Line 0 --------------------> Frame 'j' Line 0
Line 1 Line 1 ----> Frame 'j+1' Line 1
Line 2 ---------------------> Frame 'j' Line 2
Line 3 Line 3 ----> Frame 'j+1' Line 3
... ... ...
New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
@end example
It accepts the following optional parameters:
@table @option
@item scan
determines whether the interlaced frame is taken from the even (tff - default)
or odd (bff) lines of the progressive frame.
@item lowpass
Enable (default) or disable the vertical lowpass filter to avoid twitter
interlacing and reduce moire patterns.
@end table
@section lut, lutrgb, lutyuv
Compute a look-up table for binding each pixel component input value

View File

@ -56,6 +56,7 @@ OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o

View File

@ -76,6 +76,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(GRADFUN, gradfun, vf);
REGISTER_FILTER(HFLIP, hflip, vf);
REGISTER_FILTER(HQDN3D, hqdn3d, vf);
REGISTER_FILTER(INTERLACE, interlace, vf);
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);

252
libavfilter/vf_interlace.c Executable file
View File

@ -0,0 +1,252 @@
/*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with Libav; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/**
* @file
* progressive to interlaced content filter, inspired by heavy debugging of tinterlace filter
*/
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
#include "formats.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
enum ScanMode {
MODE_TFF = 0,
MODE_BFF = 1,
};
enum FieldType {
FIELD_UPPER = 0,
FIELD_LOWER = 1,
};
typedef struct {
const AVClass *class;
enum ScanMode scan; // top or bottom field first scanning
int lowpass; // enable or disable low pass filterning
AVFrame *cur, *next; // the two frames from which the new one is obtained
int got_output; // signal an output frame is reday to request_frame()
} InterlaceContext;
#define OFFSET(x) offsetof(InterlaceContext, x)
#define V AV_OPT_FLAG_VIDEO_PARAM
static const AVOption options[] = {
{ "scan", "scanning mode", OFFSET(scan),
AV_OPT_TYPE_INT, {.i64 = MODE_TFF }, 0, 1, .flags = V, .unit = "scan" },
{ "tff", "top field first", 0,
AV_OPT_TYPE_CONST, {.i64 = MODE_TFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
{ "bff", "bottom field first", 0,
AV_OPT_TYPE_CONST, {.i64 = MODE_BFF }, INT_MIN, INT_MAX, .flags = V, .unit = "scan" },
{ "lowpass", "enable vertical low-pass filter", OFFSET(lowpass),
AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 1, .flags = V },
{ NULL }
};
static const AVClass class = {
.class_name = "interlace filter",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static const enum AVPixelFormat formats_supported[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P,
AV_PIX_FMT_GRAY8, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_NONE
};
static int query_formats(AVFilterContext *ctx)
{
ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
InterlaceContext *s = ctx->priv;
av_frame_free(&s->cur);
av_frame_free(&s->next);
av_opt_free(s);
}
static int config_out_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
InterlaceContext *s = ctx->priv;
if (inlink->h < 2) {
av_log(ctx, AV_LOG_ERROR, "input video height is too small\n");
return AVERROR_INVALIDDATA;
}
// same input size
outlink->w = inlink->w;
outlink->h = inlink->h;
outlink->time_base = inlink->time_base;
// half framerate
outlink->time_base.num *= 2;
av_log(ctx, AV_LOG_VERBOSE, "%s interlacing %s lowpass filter\n",
s->scan == MODE_TFF ? "tff" : "bff", (s->lowpass) ? "with" : "without");
return 0;
}
static void copy_picture_field(AVFrame *src_frame, AVFrame *dst_frame,
AVFilterLink *inlink, enum FieldType field_type,
int lowpass)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int vsub = desc->log2_chroma_h;
int plane, i, j;
for (plane = 0; plane < desc->nb_components; plane++) {
int lines = (plane == 1 || plane == 2) ? inlink->h >> vsub : inlink->h;
int linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
uint8_t *dstp = dst_frame->data[plane];
const uint8_t *srcp = src_frame->data[plane];
av_assert0(linesize >= 0);
lines /= 2;
if (field_type == FIELD_LOWER)
srcp += src_frame->linesize[plane];
if (field_type == FIELD_LOWER)
dstp += dst_frame->linesize[plane];
if (lowpass) {
int srcp_linesize = src_frame->linesize[plane] * 2;
int dstp_linesize = dst_frame->linesize[plane] * 2;
for (j = lines; j > 0; j--) {
const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
if (j == lines)
srcp_above = srcp; // there is no line above
if (j == 1)
srcp_below = srcp; // there is no line below
for (i = 0; i < linesize; i++) {
// this calculation is an integer representation of
// '0.5 * current + 0.25 * above + 0.25 + below'
// '1 +' is for rounding.
dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
}
dstp += dstp_linesize;
srcp += srcp_linesize;
}
} else {
av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
srcp, src_frame->linesize[plane] * 2,
linesize, lines);
}
}
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
InterlaceContext *s = ctx->priv;
AVFrame *out;
int tff, ret;
av_frame_free(&s->cur);
s->cur = s->next;
s->next = buf;
/* we need at least two frames */
if (!s->cur || !s->next)
return 0;
tff = (s->scan == MODE_TFF);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
av_frame_copy_props(out, s->cur);
out->interlaced_frame = 1;
out->top_field_first = tff;
out->pts /= 2; // adjust pts to new framerate
/* copy upper/lower field from cur */
copy_picture_field(s->cur, out, inlink, tff ? FIELD_UPPER : FIELD_LOWER, s->lowpass);
av_frame_free(&s->cur);
/* copy lower/upper field from next */
copy_picture_field(s->next, out, inlink, tff ? FIELD_LOWER : FIELD_UPPER, s->lowpass);
av_frame_free(&s->next);
ret = ff_filter_frame(outlink, out);
s->got_output = 1;
return ret;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
InterlaceContext *s = ctx->priv;
int ret = 0;
s->got_output = 0;
while (ret >= 0 && !s->got_output)
ret = ff_request_frame(ctx->inputs[0]);
return ret;
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_out_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter avfilter_vf_interlace = {
.name = "interlace",
.description = NULL_IF_CONFIG_SMALL("Convert progressive video into interlaced."),
.uninit = uninit,
.priv_class = &class,
.priv_size = sizeof(InterlaceContext),
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
};