mirror of
https://github.com/xenia-project/FFmpeg.git
synced 2024-11-24 03:59:43 +00:00
183ce55b0d
AVFilterLink.frame_count is supposed to count the number of frames that were passed on the link, but with min_samples, that number is not always the same for the source and destination filters. With the addition of a FIFO on the link, the difference will become more significant. Split the variable in two: frame_count_in counts the number of frames that entered the link, frame_count_out counts the number of frames that were sent to the destination filter.
526 lines
19 KiB
C
526 lines
19 KiB
C
/*
|
|
* Copyright (c) 2015 Paul B Mahol
|
|
*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include <float.h>
|
|
#include <math.h>
|
|
|
|
#include "libavcodec/avfft.h"
|
|
#include "libavutil/audio_fifo.h"
|
|
#include "libavutil/avassert.h"
|
|
#include "libavutil/avstring.h"
|
|
#include "libavutil/channel_layout.h"
|
|
#include "libavutil/intreadwrite.h"
|
|
#include "libavutil/opt.h"
|
|
#include "libavutil/parseutils.h"
|
|
#include "audio.h"
|
|
#include "video.h"
|
|
#include "avfilter.h"
|
|
#include "internal.h"
|
|
#include "window_func.h"
|
|
|
|
enum DisplayMode { LINE, BAR, DOT, NB_MODES };
|
|
enum ChannelMode { COMBINED, SEPARATE, NB_CMODES };
|
|
enum FrequencyScale { FS_LINEAR, FS_LOG, FS_RLOG, NB_FSCALES };
|
|
enum AmplitudeScale { AS_LINEAR, AS_SQRT, AS_CBRT, AS_LOG, NB_ASCALES };
|
|
|
|
typedef struct ShowFreqsContext {
|
|
const AVClass *class;
|
|
int w, h;
|
|
int mode;
|
|
int cmode;
|
|
int fft_bits;
|
|
int ascale, fscale;
|
|
int avg;
|
|
int win_func;
|
|
FFTContext *fft;
|
|
FFTComplex **fft_data;
|
|
float **avg_data;
|
|
float *window_func_lut;
|
|
float overlap;
|
|
float minamp;
|
|
int hop_size;
|
|
int nb_channels;
|
|
int nb_freq;
|
|
int win_size;
|
|
float scale;
|
|
char *colors;
|
|
AVAudioFifo *fifo;
|
|
int64_t pts;
|
|
} ShowFreqsContext;
|
|
|
|
#define OFFSET(x) offsetof(ShowFreqsContext, x)
|
|
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
|
|
|
|
static const AVOption showfreqs_options[] = {
|
|
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
|
|
{ "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "1024x512"}, 0, 0, FLAGS },
|
|
{ "mode", "set display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=BAR}, 0, NB_MODES-1, FLAGS, "mode" },
|
|
{ "line", "show lines", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, FLAGS, "mode" },
|
|
{ "bar", "show bars", 0, AV_OPT_TYPE_CONST, {.i64=BAR}, 0, 0, FLAGS, "mode" },
|
|
{ "dot", "show dots", 0, AV_OPT_TYPE_CONST, {.i64=DOT}, 0, 0, FLAGS, "mode" },
|
|
{ "ascale", "set amplitude scale", OFFSET(ascale), AV_OPT_TYPE_INT, {.i64=AS_LOG}, 0, NB_ASCALES-1, FLAGS, "ascale" },
|
|
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=AS_LINEAR}, 0, 0, FLAGS, "ascale" },
|
|
{ "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=AS_SQRT}, 0, 0, FLAGS, "ascale" },
|
|
{ "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=AS_CBRT}, 0, 0, FLAGS, "ascale" },
|
|
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=AS_LOG}, 0, 0, FLAGS, "ascale" },
|
|
{ "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=FS_LINEAR}, 0, NB_FSCALES-1, FLAGS, "fscale" },
|
|
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=FS_LINEAR}, 0, 0, FLAGS, "fscale" },
|
|
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_LOG}, 0, 0, FLAGS, "fscale" },
|
|
{ "rlog", "reverse logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=FS_RLOG}, 0, 0, FLAGS, "fscale" },
|
|
{ "win_size", "set window size", OFFSET(fft_bits), AV_OPT_TYPE_INT, {.i64=11}, 4, 16, FLAGS, "fft" },
|
|
{ "w16", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "fft" },
|
|
{ "w32", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "fft" },
|
|
{ "w64", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "fft" },
|
|
{ "w128", 0, 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "fft" },
|
|
{ "w256", 0, 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "fft" },
|
|
{ "w512", 0, 0, AV_OPT_TYPE_CONST, {.i64=9}, 0, 0, FLAGS, "fft" },
|
|
{ "w1024", 0, 0, AV_OPT_TYPE_CONST, {.i64=10}, 0, 0, FLAGS, "fft" },
|
|
{ "w2048", 0, 0, AV_OPT_TYPE_CONST, {.i64=11}, 0, 0, FLAGS, "fft" },
|
|
{ "w4096", 0, 0, AV_OPT_TYPE_CONST, {.i64=12}, 0, 0, FLAGS, "fft" },
|
|
{ "w8192", 0, 0, AV_OPT_TYPE_CONST, {.i64=13}, 0, 0, FLAGS, "fft" },
|
|
{ "w16384", 0, 0, AV_OPT_TYPE_CONST, {.i64=14}, 0, 0, FLAGS, "fft" },
|
|
{ "w32768", 0, 0, AV_OPT_TYPE_CONST, {.i64=15}, 0, 0, FLAGS, "fft" },
|
|
{ "w65536", 0, 0, AV_OPT_TYPE_CONST, {.i64=16}, 0, 0, FLAGS, "fft" },
|
|
{ "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_HANNING}, 0, NB_WFUNC-1, FLAGS, "win_func" },
|
|
{ "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
|
|
{ "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
|
|
{ "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
|
|
{ "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
|
|
{ "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
|
|
{ "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
|
|
{ "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
|
|
{ "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
|
|
{ "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
|
|
{ "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
|
|
{ "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
|
|
{ "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
|
|
{ "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
|
|
{ "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
|
|
{ "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
|
|
{ "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
|
|
{ "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
|
|
{ "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
|
|
{ "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
|
|
{ "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1.}, 0., 1., FLAGS },
|
|
{ "averaging", "set time averaging", OFFSET(avg), AV_OPT_TYPE_INT, {.i64=1}, 0, INT32_MAX, FLAGS },
|
|
{ "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
|
|
{ "cmode", "set channel mode", OFFSET(cmode), AV_OPT_TYPE_INT, {.i64=COMBINED}, 0, NB_CMODES-1, FLAGS, "cmode" },
|
|
{ "combined", "show all channels in same window", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "cmode" },
|
|
{ "separate", "show each channel in own window", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "cmode" },
|
|
{ "minamp", "set minimum amplitude", OFFSET(minamp), AV_OPT_TYPE_FLOAT, {.dbl=1e-6}, FLT_MIN, 1e-6, FLAGS },
|
|
{ NULL }
|
|
};
|
|
|
|
AVFILTER_DEFINE_CLASS(showfreqs);
|
|
|
|
static int query_formats(AVFilterContext *ctx)
|
|
{
|
|
AVFilterFormats *formats = NULL;
|
|
AVFilterChannelLayouts *layouts = NULL;
|
|
AVFilterLink *inlink = ctx->inputs[0];
|
|
AVFilterLink *outlink = ctx->outputs[0];
|
|
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
|
|
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
|
|
int ret;
|
|
|
|
/* set input audio formats */
|
|
formats = ff_make_format_list(sample_fmts);
|
|
if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
|
|
return ret;
|
|
|
|
layouts = ff_all_channel_layouts();
|
|
if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
|
|
return ret;
|
|
|
|
formats = ff_all_samplerates();
|
|
if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
|
|
return ret;
|
|
|
|
/* set output video format */
|
|
formats = ff_make_format_list(pix_fmts);
|
|
if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
|
|
return ret;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static av_cold int init(AVFilterContext *ctx)
|
|
{
|
|
ShowFreqsContext *s = ctx->priv;
|
|
|
|
s->pts = AV_NOPTS_VALUE;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int config_output(AVFilterLink *outlink)
|
|
{
|
|
AVFilterContext *ctx = outlink->src;
|
|
AVFilterLink *inlink = ctx->inputs[0];
|
|
ShowFreqsContext *s = ctx->priv;
|
|
float overlap;
|
|
int i;
|
|
|
|
s->nb_freq = 1 << (s->fft_bits - 1);
|
|
s->win_size = s->nb_freq << 1;
|
|
av_audio_fifo_free(s->fifo);
|
|
av_fft_end(s->fft);
|
|
s->fft = av_fft_init(s->fft_bits, 0);
|
|
if (!s->fft) {
|
|
av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
|
|
"The window size might be too high.\n");
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
|
|
/* FFT buffers: x2 for each (display) channel buffer.
|
|
* Note: we use free and malloc instead of a realloc-like function to
|
|
* make sure the buffer is aligned in memory for the FFT functions. */
|
|
for (i = 0; i < s->nb_channels; i++) {
|
|
av_freep(&s->fft_data[i]);
|
|
av_freep(&s->avg_data[i]);
|
|
}
|
|
av_freep(&s->fft_data);
|
|
av_freep(&s->avg_data);
|
|
s->nb_channels = inlink->channels;
|
|
|
|
s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
|
|
if (!s->fft_data)
|
|
return AVERROR(ENOMEM);
|
|
s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
|
|
if (!s->fft_data)
|
|
return AVERROR(ENOMEM);
|
|
for (i = 0; i < s->nb_channels; i++) {
|
|
s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
|
|
s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
|
|
if (!s->fft_data[i] || !s->avg_data[i])
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
|
|
/* pre-calc windowing function */
|
|
s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
|
|
sizeof(*s->window_func_lut));
|
|
if (!s->window_func_lut)
|
|
return AVERROR(ENOMEM);
|
|
ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
|
|
if (s->overlap == 1.)
|
|
s->overlap = overlap;
|
|
s->hop_size = (1. - s->overlap) * s->win_size;
|
|
if (s->hop_size < 1) {
|
|
av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
for (s->scale = 0, i = 0; i < s->win_size; i++) {
|
|
s->scale += s->window_func_lut[i] * s->window_func_lut[i];
|
|
}
|
|
|
|
outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
|
|
outlink->sample_aspect_ratio = (AVRational){1,1};
|
|
outlink->w = s->w;
|
|
outlink->h = s->h;
|
|
|
|
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
|
|
if (!s->fifo)
|
|
return AVERROR(ENOMEM);
|
|
return 0;
|
|
}
|
|
|
|
static inline void draw_dot(AVFrame *out, int x, int y, uint8_t fg[4])
|
|
{
|
|
|
|
uint32_t color = AV_RL32(out->data[0] + y * out->linesize[0] + x * 4);
|
|
|
|
if ((color & 0xffffff) != 0)
|
|
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg) | color);
|
|
else
|
|
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
|
|
}
|
|
|
|
static int get_sx(ShowFreqsContext *s, int f)
|
|
{
|
|
switch (s->fscale) {
|
|
case FS_LINEAR:
|
|
return (s->w/(float)s->nb_freq)*f;
|
|
case FS_LOG:
|
|
return s->w-pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.));
|
|
case FS_RLOG:
|
|
return pow(s->w, f/(s->nb_freq-1.));
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static float get_bsize(ShowFreqsContext *s, int f)
|
|
{
|
|
switch (s->fscale) {
|
|
case FS_LINEAR:
|
|
return s->w/(float)s->nb_freq;
|
|
case FS_LOG:
|
|
return pow(s->w, (s->nb_freq-f-1)/(s->nb_freq-1.))-
|
|
pow(s->w, (s->nb_freq-f-2)/(s->nb_freq-1.));
|
|
case FS_RLOG:
|
|
return pow(s->w, (f+1)/(s->nb_freq-1.))-
|
|
pow(s->w, f /(s->nb_freq-1.));
|
|
}
|
|
|
|
return 1.;
|
|
}
|
|
|
|
static inline void plot_freq(ShowFreqsContext *s, int ch,
|
|
double a, int f, uint8_t fg[4], int *prev_y,
|
|
AVFrame *out, AVFilterLink *outlink)
|
|
{
|
|
const int w = s->w;
|
|
const float min = s->minamp;
|
|
const float avg = s->avg_data[ch][f];
|
|
const float bsize = get_bsize(s, f);
|
|
const int sx = get_sx(s, f);
|
|
int end = outlink->h;
|
|
int x, y, i;
|
|
|
|
switch(s->ascale) {
|
|
case AS_SQRT:
|
|
a = 1.0 - sqrt(a);
|
|
break;
|
|
case AS_CBRT:
|
|
a = 1.0 - cbrt(a);
|
|
break;
|
|
case AS_LOG:
|
|
a = log(av_clipd(a, min, 1)) / log(min);
|
|
break;
|
|
case AS_LINEAR:
|
|
a = 1.0 - a;
|
|
break;
|
|
}
|
|
|
|
switch (s->cmode) {
|
|
case COMBINED:
|
|
y = a * outlink->h - 1;
|
|
break;
|
|
case SEPARATE:
|
|
end = (outlink->h / s->nb_channels) * (ch + 1);
|
|
y = (outlink->h / s->nb_channels) * ch + a * (outlink->h / s->nb_channels) - 1;
|
|
break;
|
|
default:
|
|
av_assert0(0);
|
|
}
|
|
if (y < 0)
|
|
return;
|
|
|
|
switch (s->avg) {
|
|
case 0:
|
|
y = s->avg_data[ch][f] = !outlink->frame_count_in ? y : FFMIN(avg, y);
|
|
break;
|
|
case 1:
|
|
break;
|
|
default:
|
|
s->avg_data[ch][f] = avg + y * (y - avg) / (FFMIN(outlink->frame_count_in + 1, s->avg) * y);
|
|
y = s->avg_data[ch][f];
|
|
break;
|
|
}
|
|
|
|
switch(s->mode) {
|
|
case LINE:
|
|
if (*prev_y == -1) {
|
|
*prev_y = y;
|
|
}
|
|
if (y <= *prev_y) {
|
|
for (x = sx + 1; x < sx + bsize && x < w; x++)
|
|
draw_dot(out, x, y, fg);
|
|
for (i = y; i <= *prev_y; i++)
|
|
draw_dot(out, sx, i, fg);
|
|
} else {
|
|
for (i = *prev_y; i <= y; i++)
|
|
draw_dot(out, sx, i, fg);
|
|
for (x = sx + 1; x < sx + bsize && x < w; x++)
|
|
draw_dot(out, x, i - 1, fg);
|
|
}
|
|
*prev_y = y;
|
|
break;
|
|
case BAR:
|
|
for (x = sx; x < sx + bsize && x < w; x++)
|
|
for (i = y; i < end; i++)
|
|
draw_dot(out, x, i, fg);
|
|
break;
|
|
case DOT:
|
|
for (x = sx; x < sx + bsize && x < w; x++)
|
|
draw_dot(out, x, y, fg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
|
|
{
|
|
AVFilterContext *ctx = inlink->dst;
|
|
AVFilterLink *outlink = ctx->outputs[0];
|
|
ShowFreqsContext *s = ctx->priv;
|
|
const int win_size = s->win_size;
|
|
char *colors, *color, *saveptr = NULL;
|
|
AVFrame *out;
|
|
int ch, n;
|
|
|
|
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
|
|
if (!out)
|
|
return AVERROR(ENOMEM);
|
|
|
|
for (n = 0; n < outlink->h; n++)
|
|
memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);
|
|
|
|
/* fill FFT input with the number of samples available */
|
|
for (ch = 0; ch < s->nb_channels; ch++) {
|
|
const float *p = (float *)in->extended_data[ch];
|
|
|
|
for (n = 0; n < in->nb_samples; n++) {
|
|
s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
|
|
s->fft_data[ch][n].im = 0;
|
|
}
|
|
for (; n < win_size; n++) {
|
|
s->fft_data[ch][n].re = 0;
|
|
s->fft_data[ch][n].im = 0;
|
|
}
|
|
}
|
|
|
|
/* run FFT on each samples set */
|
|
for (ch = 0; ch < s->nb_channels; ch++) {
|
|
av_fft_permute(s->fft, s->fft_data[ch]);
|
|
av_fft_calc(s->fft, s->fft_data[ch]);
|
|
}
|
|
|
|
#define RE(x, ch) s->fft_data[ch][x].re
|
|
#define IM(x, ch) s->fft_data[ch][x].im
|
|
#define M(a, b) (sqrt((a) * (a) + (b) * (b)))
|
|
|
|
colors = av_strdup(s->colors);
|
|
if (!colors) {
|
|
av_frame_free(&out);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
|
|
for (ch = 0; ch < s->nb_channels; ch++) {
|
|
uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
|
|
int prev_y = -1, f;
|
|
double a;
|
|
|
|
color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
|
|
if (color)
|
|
av_parse_color(fg, color, -1, ctx);
|
|
|
|
a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
|
|
plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);
|
|
|
|
for (f = 1; f < s->nb_freq; f++) {
|
|
a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);
|
|
|
|
plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
|
|
}
|
|
}
|
|
|
|
av_free(colors);
|
|
out->pts = in->pts;
|
|
return ff_filter_frame(outlink, out);
|
|
}
|
|
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
|
{
|
|
AVFilterContext *ctx = inlink->dst;
|
|
ShowFreqsContext *s = ctx->priv;
|
|
AVFrame *fin = NULL;
|
|
int consumed = 0;
|
|
int ret = 0;
|
|
|
|
if (s->pts == AV_NOPTS_VALUE)
|
|
s->pts = in->pts - av_audio_fifo_size(s->fifo);
|
|
|
|
av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples);
|
|
while (av_audio_fifo_size(s->fifo) >= s->win_size) {
|
|
fin = ff_get_audio_buffer(inlink, s->win_size);
|
|
if (!fin) {
|
|
ret = AVERROR(ENOMEM);
|
|
goto fail;
|
|
}
|
|
|
|
fin->pts = s->pts + consumed;
|
|
consumed += s->hop_size;
|
|
ret = av_audio_fifo_peek(s->fifo, (void **)fin->extended_data, s->win_size);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
ret = plot_freqs(inlink, fin);
|
|
av_frame_free(&fin);
|
|
av_audio_fifo_drain(s->fifo, s->hop_size);
|
|
if (ret < 0)
|
|
goto fail;
|
|
}
|
|
|
|
fail:
|
|
s->pts = AV_NOPTS_VALUE;
|
|
av_frame_free(&fin);
|
|
av_frame_free(&in);
|
|
return ret;
|
|
}
|
|
|
|
static av_cold void uninit(AVFilterContext *ctx)
|
|
{
|
|
ShowFreqsContext *s = ctx->priv;
|
|
int i;
|
|
|
|
av_fft_end(s->fft);
|
|
for (i = 0; i < s->nb_channels; i++) {
|
|
if (s->fft_data)
|
|
av_freep(&s->fft_data[i]);
|
|
if (s->avg_data)
|
|
av_freep(&s->avg_data[i]);
|
|
}
|
|
av_freep(&s->fft_data);
|
|
av_freep(&s->avg_data);
|
|
av_freep(&s->window_func_lut);
|
|
av_audio_fifo_free(s->fifo);
|
|
}
|
|
|
|
static const AVFilterPad showfreqs_inputs[] = {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_AUDIO,
|
|
.filter_frame = filter_frame,
|
|
},
|
|
{ NULL }
|
|
};
|
|
|
|
static const AVFilterPad showfreqs_outputs[] = {
|
|
{
|
|
.name = "default",
|
|
.type = AVMEDIA_TYPE_VIDEO,
|
|
.config_props = config_output,
|
|
},
|
|
{ NULL }
|
|
};
|
|
|
|
AVFilter ff_avf_showfreqs = {
|
|
.name = "showfreqs",
|
|
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a frequencies video output."),
|
|
.init = init,
|
|
.uninit = uninit,
|
|
.query_formats = query_formats,
|
|
.priv_size = sizeof(ShowFreqsContext),
|
|
.inputs = showfreqs_inputs,
|
|
.outputs = showfreqs_outputs,
|
|
.priv_class = &showfreqs_class,
|
|
};
|