avf_showspectrum.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Clément Bœsch
3  * Copyright (c) 2013 Rudolf Polzer <divverent@xonotic.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * audio to spectrum (video) transmedia filter, based on ffplay rdft showmode
25  * (by Michael Niedermayer) and lavfi/avf_showwaves (by Stefano Sabatini).
26  */
27 
28 #include <math.h>
29 
30 #include "libavcodec/avfft.h"
31 #include "libavutil/avassert.h"
33 #include "libavutil/opt.h"
34 #include "avfilter.h"
35 #include "internal.h"
36 
40 
41 typedef struct {
42  const AVClass *class;
43  int w, h;
48  int sliding; ///< 1 if sliding mode, 0 otherwise
49  enum DisplayMode mode; ///< channel display mode
50  enum ColorMode color_mode; ///< display color scheme
52  float saturation; ///< color saturation multiplier
53  int xpos; ///< x position (current column)
54  RDFTContext *rdft; ///< Real Discrete Fourier Transform context
55  int rdft_bits; ///< number of bits (RDFT window size = 1<<rdft_bits)
56  FFTSample **rdft_data; ///< bins holder for each (displayed) channels
57  int filled; ///< number of samples (per channel) filled in current rdft_buffer
58  int consumed; ///< number of samples (per channel) consumed from the input frame
59  float *window_func_lut; ///< Window function LUT
60  float *combine_buffer; ///< color combining buffer (3 * h items)
62 
63 #define OFFSET(x) offsetof(ShowSpectrumContext, x)
64 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
65 
66 static const AVOption showspectrum_options[] = {
67  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
68  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "640x512"}, 0, 0, FLAGS },
69  { "slide", "set sliding mode", OFFSET(sliding), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
70  { "mode", "set channel display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=COMBINED}, COMBINED, NB_MODES-1, FLAGS, "mode" },
71  { "combined", "combined mode", 0, AV_OPT_TYPE_CONST, {.i64=COMBINED}, 0, 0, FLAGS, "mode" },
72  { "separate", "separate mode", 0, AV_OPT_TYPE_CONST, {.i64=SEPARATE}, 0, 0, FLAGS, "mode" },
73  { "color", "set channel coloring", OFFSET(color_mode), AV_OPT_TYPE_INT, {.i64=CHANNEL}, CHANNEL, NB_CLMODES-1, FLAGS, "color" },
74  { "channel", "separate color for each channel", 0, AV_OPT_TYPE_CONST, {.i64=CHANNEL}, 0, 0, FLAGS, "color" },
75  { "intensity", "intensity based coloring", 0, AV_OPT_TYPE_CONST, {.i64=INTENSITY}, 0, 0, FLAGS, "color" },
76  { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" },
77  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" },
78  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, FLAGS, "scale" },
79  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
80  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
81  { "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
82  { NULL },
83 };
84 
85 AVFILTER_DEFINE_CLASS(showspectrum);
86 
87 static const struct {
88  float a, y, u, v;
90  { 0, 0, 0, 0 },
91  { 0.13, .03587126228984074, .1573300977624594, -.02548747583751842 },
92  { 0.30, .18572281794568020, .1772436246393981, .17475554840414750 },
93  { 0.60, .28184980583656130, -.1593064119945782, .47132074554608920 },
94  { 0.73, .65830621175547810, -.3716070802232764, .24352759331252930 },
95  { 0.78, .76318535758242900, -.4307467689263783, .16866496622310430 },
96  { 0.91, .95336363636363640, -.2045454545454546, .03313636363636363 },
97  { 1, 1, 0, 0 }
98 };
99 
100 static av_cold void uninit(AVFilterContext *ctx)
101 {
102  ShowSpectrumContext *showspectrum = ctx->priv;
103  int i;
104 
105  av_freep(&showspectrum->combine_buffer);
106  av_rdft_end(showspectrum->rdft);
107  for (i = 0; i < showspectrum->nb_display_channels; i++)
108  av_freep(&showspectrum->rdft_data[i]);
109  av_freep(&showspectrum->rdft_data);
110  av_freep(&showspectrum->window_func_lut);
111  av_frame_free(&showspectrum->outpicref);
112 }
113 
115 {
118  AVFilterLink *inlink = ctx->inputs[0];
119  AVFilterLink *outlink = ctx->outputs[0];
121  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE };
122 
123  /* set input audio formats */
124  formats = ff_make_format_list(sample_fmts);
125  if (!formats)
126  return AVERROR(ENOMEM);
127  ff_formats_ref(formats, &inlink->out_formats);
128 
129  layouts = ff_all_channel_layouts();
130  if (!layouts)
131  return AVERROR(ENOMEM);
132  ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
133 
134  formats = ff_all_samplerates();
135  if (!formats)
136  return AVERROR(ENOMEM);
137  ff_formats_ref(formats, &inlink->out_samplerates);
138 
139  /* set output video format */
140  formats = ff_make_format_list(pix_fmts);
141  if (!formats)
142  return AVERROR(ENOMEM);
143  ff_formats_ref(formats, &outlink->in_formats);
144 
145  return 0;
146 }
147 
148 static int config_output(AVFilterLink *outlink)
149 {
150  AVFilterContext *ctx = outlink->src;
151  AVFilterLink *inlink = ctx->inputs[0];
152  ShowSpectrumContext *showspectrum = ctx->priv;
153  int i, rdft_bits, win_size, h;
154 
155  outlink->w = showspectrum->w;
156  outlink->h = showspectrum->h;
157 
158  h = (showspectrum->mode == COMBINED) ? outlink->h : outlink->h / inlink->channels;
159  showspectrum->channel_height = h;
160 
161  /* RDFT window size (precision) according to the requested output frame height */
162  for (rdft_bits = 1; 1 << rdft_bits < 2 * h; rdft_bits++);
163  win_size = 1 << rdft_bits;
164 
165  /* (re-)configuration if the video output changed (or first init) */
166  if (rdft_bits != showspectrum->rdft_bits) {
167  size_t rdft_size, rdft_listsize;
168  AVFrame *outpicref;
169 
170  av_rdft_end(showspectrum->rdft);
171  showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C);
172  showspectrum->rdft_bits = rdft_bits;
173 
174  /* RDFT buffers: x2 for each (display) channel buffer.
175  * Note: we use free and malloc instead of a realloc-like function to
176  * make sure the buffer is aligned in memory for the FFT functions. */
177  for (i = 0; i < showspectrum->nb_display_channels; i++)
178  av_freep(&showspectrum->rdft_data[i]);
179  av_freep(&showspectrum->rdft_data);
180  showspectrum->nb_display_channels = inlink->channels;
181 
182  if (av_size_mult(sizeof(*showspectrum->rdft_data),
183  showspectrum->nb_display_channels, &rdft_listsize) < 0)
184  return AVERROR(EINVAL);
185  if (av_size_mult(sizeof(**showspectrum->rdft_data),
186  win_size, &rdft_size) < 0)
187  return AVERROR(EINVAL);
188  showspectrum->rdft_data = av_malloc(rdft_listsize);
189  if (!showspectrum->rdft_data)
190  return AVERROR(ENOMEM);
191  for (i = 0; i < showspectrum->nb_display_channels; i++) {
192  showspectrum->rdft_data[i] = av_malloc(rdft_size);
193  if (!showspectrum->rdft_data[i])
194  return AVERROR(ENOMEM);
195  }
196  showspectrum->filled = 0;
197 
198  /* pre-calc windowing function (hann here) */
199  showspectrum->window_func_lut =
200  av_realloc_f(showspectrum->window_func_lut, win_size,
201  sizeof(*showspectrum->window_func_lut));
202  if (!showspectrum->window_func_lut)
203  return AVERROR(ENOMEM);
204  for (i = 0; i < win_size; i++)
205  showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
206 
207  /* prepare the initial picref buffer (black frame) */
208  av_frame_free(&showspectrum->outpicref);
209  showspectrum->outpicref = outpicref =
210  ff_get_video_buffer(outlink, outlink->w, outlink->h);
211  if (!outpicref)
212  return AVERROR(ENOMEM);
213  outlink->sample_aspect_ratio = (AVRational){1,1};
214  memset(outpicref->data[0], 0, outlink->h * outpicref->linesize[0]);
215  memset(outpicref->data[1], 128, outlink->h * outpicref->linesize[1]);
216  memset(outpicref->data[2], 128, outlink->h * outpicref->linesize[2]);
217  }
218 
219  if (showspectrum->xpos >= outlink->w)
220  showspectrum->xpos = 0;
221 
222  showspectrum->combine_buffer =
223  av_realloc_f(showspectrum->combine_buffer, outlink->h * 3,
224  sizeof(*showspectrum->combine_buffer));
225 
226  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d RDFT window size:%d\n",
227  showspectrum->w, showspectrum->h, win_size);
228  return 0;
229 }
230 
231 inline static int push_frame(AVFilterLink *outlink)
232 {
233  ShowSpectrumContext *showspectrum = outlink->src->priv;
234 
235  showspectrum->xpos++;
236  if (showspectrum->xpos >= outlink->w)
237  showspectrum->xpos = 0;
238  showspectrum->filled = 0;
239  showspectrum->req_fullfilled = 1;
240 
241  return ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref));
242 }
243 
244 static int request_frame(AVFilterLink *outlink)
245 {
246  ShowSpectrumContext *showspectrum = outlink->src->priv;
247  AVFilterLink *inlink = outlink->src->inputs[0];
248  int ret;
249 
250  showspectrum->req_fullfilled = 0;
251  do {
252  ret = ff_request_frame(inlink);
253  } while (!showspectrum->req_fullfilled && ret >= 0);
254 
255  if (ret == AVERROR_EOF && showspectrum->outpicref)
256  push_frame(outlink);
257  return ret;
258 }
259 
260 static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples)
261 {
262  int ret;
263  AVFilterContext *ctx = inlink->dst;
264  AVFilterLink *outlink = ctx->outputs[0];
265  ShowSpectrumContext *showspectrum = ctx->priv;
266  AVFrame *outpicref = showspectrum->outpicref;
267 
268  /* nb_freq contains the power of two superior or equal to the output image
269  * height (or half the RDFT window size) */
270  const int nb_freq = 1 << (showspectrum->rdft_bits - 1);
271  const int win_size = nb_freq << 1;
272  const double w = 1. / (sqrt(nb_freq) * 32768.);
273 
274  int ch, plane, n, y;
275  const int start = showspectrum->filled;
276  const int add_samples = FFMIN(win_size - start, nb_samples);
277 
278  /* fill RDFT input with the number of samples available */
279  for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
280  const int16_t *p = (int16_t *)insamples->extended_data[ch];
281 
282  p += showspectrum->consumed;
283  for (n = 0; n < add_samples; n++)
284  showspectrum->rdft_data[ch][start + n] = p[n] * showspectrum->window_func_lut[start + n];
285  }
286  showspectrum->filled += add_samples;
287 
288  /* complete RDFT window size? */
289  if (showspectrum->filled == win_size) {
290 
291  /* channel height */
292  int h = showspectrum->channel_height;
293 
294  /* run RDFT on each samples set */
295  for (ch = 0; ch < showspectrum->nb_display_channels; ch++)
296  av_rdft_calc(showspectrum->rdft, showspectrum->rdft_data[ch]);
297 
298  /* fill a new spectrum column */
299 #define RE(y, ch) showspectrum->rdft_data[ch][2 * y + 0]
300 #define IM(y, ch) showspectrum->rdft_data[ch][2 * y + 1]
301 #define MAGNITUDE(y, ch) hypot(RE(y, ch), IM(y, ch))
302 
303  /* initialize buffer for combining to black */
304  for (y = 0; y < outlink->h; y++) {
305  showspectrum->combine_buffer[3 * y ] = 0;
306  showspectrum->combine_buffer[3 * y + 1] = 127.5;
307  showspectrum->combine_buffer[3 * y + 2] = 127.5;
308  }
309 
310  for (ch = 0; ch < showspectrum->nb_display_channels; ch++) {
311  float yf, uf, vf;
312 
313  /* decide color range */
314  switch (showspectrum->mode) {
315  case COMBINED:
316  // reduce range by channel count
317  yf = 256.0f / showspectrum->nb_display_channels;
318  switch (showspectrum->color_mode) {
319  case INTENSITY:
320  uf = yf;
321  vf = yf;
322  break;
323  case CHANNEL:
324  /* adjust saturation for mixed UV coloring */
325  /* this factor is correct for infinite channels, an approximation otherwise */
326  uf = yf * M_PI;
327  vf = yf * M_PI;
328  break;
329  default:
330  av_assert0(0);
331  }
332  break;
333  case SEPARATE:
334  // full range
335  yf = 256.0f;
336  uf = 256.0f;
337  vf = 256.0f;
338  break;
339  default:
340  av_assert0(0);
341  }
342 
343  if (showspectrum->color_mode == CHANNEL) {
344  if (showspectrum->nb_display_channels > 1) {
345  uf *= 0.5 * sin((2 * M_PI * ch) / showspectrum->nb_display_channels);
346  vf *= 0.5 * cos((2 * M_PI * ch) / showspectrum->nb_display_channels);
347  } else {
348  uf = 0.0f;
349  vf = 0.0f;
350  }
351  }
352  uf *= showspectrum->saturation;
353  vf *= showspectrum->saturation;
354 
355  /* draw the channel */
356  for (y = 0; y < h; y++) {
357  int row = (showspectrum->mode == COMBINED) ? y : ch * h + y;
358  float *out = &showspectrum->combine_buffer[3 * row];
359 
360  /* get magnitude */
361  float a = w * MAGNITUDE(y, ch);
362 
363  /* apply scale */
364  switch (showspectrum->scale) {
365  case LINEAR:
366  break;
367  case SQRT:
368  a = sqrt(a);
369  break;
370  case CBRT:
371  a = cbrt(a);
372  break;
373  case LOG:
374  a = 1 - log(FFMAX(FFMIN(1, a), 1e-6)) / log(1e-6); // zero = -120dBFS
375  break;
376  default:
377  av_assert0(0);
378  }
379 
380  if (showspectrum->color_mode == INTENSITY) {
381  float y, u, v;
382  int i;
383 
384  for (i = 1; i < sizeof(intensity_color_table) / sizeof(*intensity_color_table) - 1; i++)
385  if (intensity_color_table[i].a >= a)
386  break;
387  // i now is the first item >= the color
388  // now we know to interpolate between item i - 1 and i
389  if (a <= intensity_color_table[i - 1].a) {
390  y = intensity_color_table[i - 1].y;
391  u = intensity_color_table[i - 1].u;
392  v = intensity_color_table[i - 1].v;
393  } else if (a >= intensity_color_table[i].a) {
394  y = intensity_color_table[i].y;
395  u = intensity_color_table[i].u;
396  v = intensity_color_table[i].v;
397  } else {
398  float start = intensity_color_table[i - 1].a;
399  float end = intensity_color_table[i].a;
400  float lerpfrac = (a - start) / (end - start);
401  y = intensity_color_table[i - 1].y * (1.0f - lerpfrac)
402  + intensity_color_table[i].y * lerpfrac;
403  u = intensity_color_table[i - 1].u * (1.0f - lerpfrac)
404  + intensity_color_table[i].u * lerpfrac;
405  v = intensity_color_table[i - 1].v * (1.0f - lerpfrac)
406  + intensity_color_table[i].v * lerpfrac;
407  }
408 
409  out[0] += y * yf;
410  out[1] += u * uf;
411  out[2] += v * vf;
412  } else {
413  out[0] += a * yf;
414  out[1] += a * uf;
415  out[2] += a * vf;
416  }
417  }
418  }
419 
420  /* copy to output */
421  if (showspectrum->sliding) {
422  for (plane = 0; plane < 3; plane++) {
423  for (y = 0; y < outlink->h; y++) {
424  uint8_t *p = outpicref->data[plane] +
425  y * outpicref->linesize[plane];
426  memmove(p, p + 1, outlink->w - 1);
427  }
428  }
429  showspectrum->xpos = outlink->w - 1;
430  }
431  for (plane = 0; plane < 3; plane++) {
432  uint8_t *p = outpicref->data[plane] +
433  (outlink->h - 1) * outpicref->linesize[plane] +
434  showspectrum->xpos;
435  for (y = 0; y < outlink->h; y++) {
436  *p = rint(FFMAX(0, FFMIN(showspectrum->combine_buffer[3 * y + plane], 255)));
437  p -= outpicref->linesize[plane];
438  }
439  }
440 
441  outpicref->pts = insamples->pts +
442  av_rescale_q(showspectrum->consumed,
443  (AVRational){ 1, inlink->sample_rate },
444  outlink->time_base);
445  ret = push_frame(outlink);
446  if (ret < 0)
447  return ret;
448  }
449 
450  return add_samples;
451 }
452 
453 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
454 {
455  AVFilterContext *ctx = inlink->dst;
456  ShowSpectrumContext *showspectrum = ctx->priv;
457  int ret = 0, left_samples = insamples->nb_samples;
458 
459  showspectrum->consumed = 0;
460  while (left_samples) {
461  int ret = plot_spectrum_column(inlink, insamples, left_samples);
462  if (ret < 0)
463  break;
464  showspectrum->consumed += ret;
465  left_samples -= ret;
466  }
467 
468  av_frame_free(&insamples);
469  return ret;
470 }
471 
473  {
474  .name = "default",
475  .type = AVMEDIA_TYPE_AUDIO,
476  .filter_frame = filter_frame,
477  },
478  { NULL }
479 };
480 
482  {
483  .name = "default",
484  .type = AVMEDIA_TYPE_VIDEO,
485  .config_props = config_output,
486  .request_frame = request_frame,
487  },
488  { NULL }
489 };
490 
492  .name = "showspectrum",
493  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
494  .uninit = uninit,
495  .query_formats = query_formats,
496  .priv_size = sizeof(ShowSpectrumContext),
497  .inputs = showspectrum_inputs,
498  .outputs = showspectrum_outputs,
499  .priv_class = &showspectrum_class,
500 };
Definition: start.py:1
float v
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static double rint(double x)
Definition: libm.h:141
AVOption.
Definition: opt.h:251
void * av_realloc_f(void *ptr, size_t nelem, size_t elsize)
Allocate or reallocate a block of memory.
Definition: mem.c:168
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
static const struct @107 intensity_color_table[]
float * window_func_lut
Window function LUT.
static int query_formats(AVFilterContext *ctx)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
int sliding
1 if sliding mode, 0 otherwise
RDFTContext * rdft
Real Discrete Fourier Transform context.
static const AVFilterPad showspectrum_outputs[]
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
mode
Definition: f_perms.c:27
static const AVOption showspectrum_options[]
AVOptions.
end end
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
DisplayMode
void ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:427
static int request_frame(AVFilterLink *outlink)
FFTSample ** rdft_data
bins holder for each (displayed) channels
#define AVERROR_EOF
End of file.
Definition: error.h:55
integer sqrt
Definition: avutil.txt:2
A filter pad used for either input or output.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:130
static av_always_inline double cbrt(double x)
Definition: libm.h:52
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
float saturation
color saturation multiplier
float * combine_buffer
color combining buffer (3 * h items)
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
#define FFMAX(a, b)
Definition: common.h:56
float FFTSample
Definition: avfft.h:35
enum DisplayScale scale
void av_rdft_calc(RDFTContext *s, FFTSample *data)
#define AV_LOG_VERBOSE
Definition: log.h:157
struct AVRational AVRational
rational number numerator/denominator
audio channel layout utility functions
static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples)
#define FFMIN(a, b)
Definition: common.h:58
float y
static const AVFilterPad showspectrum_inputs[]
static int push_frame(AVFilterLink *outlink)
ret
Definition: avfilter.c:821
static int av_size_mult(size_t a, size_t b, size_t *r)
Multiply two size_t values checking for overflow.
Definition: mem.h:204
ColorMode
int filled
number of samples (per channel) filled in current rdft_buffer
#define FLAGS
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
float u
#define MAGNITUDE(y, ch)
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:402
float a
A list of supported channel layouts.
Definition: formats.h:85
AVFrame * av_frame_clone(AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:317
NULL
Definition: eval.c:55
enum DisplayMode mode
channel display mode
void ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:432
static int config_output(AVFilterLink *outlink)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
FFT functions.
AVFilter avfilter_avf_showspectrum
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
Describe the class of an AVClass context structure.
Definition: log.h:50
int consumed
number of samples (per channel) consumed from the input frame
Filter definition.
Definition: avfilter.h:436
AVFILTER_DEFINE_CLASS(showspectrum)
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
const char * name
filter name
Definition: avfilter.h:437
offset must point to two consecutive integers
Definition: opt.h:230
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:396
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static av_cold void uninit(AVFilterContext *ctx)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common formats
Definition: swscale.txt:33
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:49
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
Same thing on a dB scale
int rdft_bits
number of bits (RDFT window size = 1<<rdft_bits)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:524
signed 16 bits, planar
Definition: samplefmt.h:58
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:700
#define OFFSET(x)
void INT64 start
Definition: avisynth_c.h:594
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
#define M_PI
Definition: mathematics.h:46
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:319
DisplayScale
internal API functions
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:117
int xpos
x position (current column)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
for(j=16;j >0;--j)
enum ColorMode color_mode
display color scheme
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)