f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/opt.h"
31 #include "avfilter.h"
32 #include "audio.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 
37 #if CONFIG_AVCODEC
38 #include "libavcodec/dsputil.h"
39 #endif
40 
41 static const char *const var_names[] = {
42  "TB", ///< timebase
43 
44  "pts", ///< original pts in the file of the frame
45  "start_pts", ///< first PTS in the stream, expressed in TB units
46  "prev_pts", ///< previous frame PTS
47  "prev_selected_pts", ///< previous selected frame PTS
48 
49  "t", ///< first PTS in seconds
50  "start_t", ///< first PTS in the stream, expressed in seconds
51  "prev_t", ///< previous frame time
52  "prev_selected_t", ///< previously selected time
53 
54  "pict_type", ///< the type of picture in the movie
55  "I",
56  "P",
57  "B",
58  "S",
59  "SI",
60  "SP",
61  "BI",
62 
63  "interlace_type", ///< the frame interlace type
64  "PROGRESSIVE",
65  "TOPFIRST",
66  "BOTTOMFIRST",
67 
68  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
69  "samples_n", ///< number of samples in the current frame (only audio)
70  "sample_rate", ///< sample rate (only audio)
71 
72  "n", ///< frame number (starting from zero)
73  "selected_n", ///< selected frame number (starting from zero)
74  "prev_selected_n", ///< number of the last selected frame
75 
76  "key", ///< tell if the frame is a key frame
77  "pos", ///< original position in the file of the frame
78 
79  "scene",
80 
81  NULL
82 };
83 
84 enum var_name {
86 
91 
96 
105 
110 
114 
118 
121 
123 
125 };
126 
127 typedef struct {
128  const AVClass *class;
129  char *expr_str;
131  double var_values[VAR_VARS_NB];
132  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
133 #if CONFIG_AVCODEC
134  AVCodecContext *avctx; ///< codec context required for the DSPContext (scene detect only)
135  DSPContext c; ///< context providing optimized SAD methods (scene detect only)
136  double prev_mafd; ///< previous MAFD (scene detect only)
137 #endif
138  AVFrame *prev_picref; ///< previous frame (scene detect only)
139  double select;
140  int select_out; ///< mark the selected output pad index
142 } SelectContext;
143 
144 #define OFFSET(x) offsetof(SelectContext, x)
145 #define DEFINE_OPTIONS(filt_name, FLAGS) \
146 static const AVOption filt_name##_options[] = { \
147  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
148  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
149  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
150  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
151  { NULL } \
152 }
153 
154 static int request_frame(AVFilterLink *outlink);
155 
156 static av_cold int init(AVFilterContext *ctx)
157 {
158  SelectContext *select = ctx->priv;
159  int i, ret;
160 
161  if ((ret = av_expr_parse(&select->expr, select->expr_str,
162  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
163  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
164  select->expr_str);
165  return ret;
166  }
167  select->do_scene_detect = !!strstr(select->expr_str, "scene");
168 
169  for (i = 0; i < select->nb_outputs; i++) {
170  AVFilterPad pad = { 0 };
171 
172  pad.name = av_asprintf("output%d", i);
173  if (!pad.name)
174  return AVERROR(ENOMEM);
175  pad.type = ctx->filter->inputs[0].type;
177  ff_insert_outpad(ctx, i, &pad);
178  }
179 
180  return 0;
181 }
182 
183 #define INTERLACE_TYPE_P 0
184 #define INTERLACE_TYPE_T 1
185 #define INTERLACE_TYPE_B 2
186 
187 static int config_input(AVFilterLink *inlink)
188 {
189  SelectContext *select = inlink->dst->priv;
190 
191  select->var_values[VAR_N] = 0.0;
192  select->var_values[VAR_SELECTED_N] = 0.0;
193 
194  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
195 
196  select->var_values[VAR_PREV_PTS] = NAN;
199  select->var_values[VAR_PREV_T] = NAN;
200  select->var_values[VAR_START_PTS] = NAN;
201  select->var_values[VAR_START_T] = NAN;
202 
208 
212 
213  select->var_values[VAR_PICT_TYPE] = NAN;
214  select->var_values[VAR_INTERLACE_TYPE] = NAN;
215  select->var_values[VAR_SCENE] = NAN;
217  select->var_values[VAR_SAMPLES_N] = NAN;
218 
219  select->var_values[VAR_SAMPLE_RATE] =
220  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
221 
222 #if CONFIG_AVCODEC
223  if (select->do_scene_detect) {
224  select->avctx = avcodec_alloc_context3(NULL);
225  if (!select->avctx)
226  return AVERROR(ENOMEM);
227  avpriv_dsputil_init(&select->c, select->avctx);
228  }
229 #endif
230  return 0;
231 }
232 
233 #if CONFIG_AVCODEC
234 static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
235 {
236  double ret = 0;
237  SelectContext *select = ctx->priv;
238  AVFrame *prev_picref = select->prev_picref;
239 
240  if (prev_picref &&
241  frame->height == prev_picref->height &&
242  frame->width == prev_picref->width &&
243  frame->linesize[0] == prev_picref->linesize[0]) {
244  int x, y, nb_sad = 0;
245  int64_t sad = 0;
246  double mafd, diff;
247  uint8_t *p1 = frame->data[0];
248  uint8_t *p2 = prev_picref->data[0];
249  const int linesize = frame->linesize[0];
250 
251  for (y = 0; y < frame->height - 8; y += 8) {
252  for (x = 0; x < frame->width*3 - 8; x += 8) {
253  sad += select->c.sad[1](select, p1 + x, p2 + x,
254  linesize, 8);
255  nb_sad += 8 * 8;
256  }
257  p1 += 8 * linesize;
258  p2 += 8 * linesize;
259  }
260  emms_c();
261  mafd = nb_sad ? sad / nb_sad : 0;
262  diff = fabs(mafd - select->prev_mafd);
263  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
264  select->prev_mafd = mafd;
265  av_frame_free(&prev_picref);
266  }
267  select->prev_picref = av_frame_clone(frame);
268  return ret;
269 }
270 #endif
271 
272 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
273 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
274 
275 static void select_frame(AVFilterContext *ctx, AVFrame *frame)
276 {
277  SelectContext *select = ctx->priv;
278  AVFilterLink *inlink = ctx->inputs[0];
279  double res;
280 
281  if (isnan(select->var_values[VAR_START_PTS]))
282  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
283  if (isnan(select->var_values[VAR_START_T]))
284  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
285 
286  select->var_values[VAR_PTS] = TS2D(frame->pts);
287  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
288  select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
289 
290  switch (inlink->type) {
291  case AVMEDIA_TYPE_AUDIO:
292  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
293  break;
294 
295  case AVMEDIA_TYPE_VIDEO:
296  select->var_values[VAR_INTERLACE_TYPE] =
299  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
300 #if CONFIG_AVCODEC
301  if (select->do_scene_detect) {
302  char buf[32];
303  select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
304  // TODO: document metadata
305  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
306  av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
307  }
308 #endif
309  break;
310  }
311 
312  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
313  av_log(inlink->dst, AV_LOG_DEBUG,
314  "n:%f pts:%f t:%f key:%d",
315  select->var_values[VAR_N],
316  select->var_values[VAR_PTS],
317  select->var_values[VAR_T],
318  (int)select->var_values[VAR_KEY]);
319 
320  switch (inlink->type) {
321  case AVMEDIA_TYPE_VIDEO:
322  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
325  select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
327  select->var_values[VAR_SCENE]);
328  break;
329  case AVMEDIA_TYPE_AUDIO:
330  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%d",
331  (int)select->var_values[VAR_SAMPLES_N],
332  (int)select->var_values[VAR_CONSUMED_SAMPLES_N]);
333  break;
334  }
335 
336  if (res == 0) {
337  select->select_out = -1; /* drop */
338  } else if (isnan(res) || res < 0) {
339  select->select_out = 0; /* first output */
340  } else {
341  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
342  }
343 
344  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
345 
346  if (res) {
347  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
349  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
350  select->var_values[VAR_SELECTED_N] += 1.0;
351  if (inlink->type == AVMEDIA_TYPE_AUDIO)
352  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
353  }
354 
355  select->var_values[VAR_N] += 1.0;
356  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
357  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
358 }
359 
360 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
361 {
362  AVFilterContext *ctx = inlink->dst;
363  SelectContext *select = ctx->priv;
364 
365  select_frame(ctx, frame);
366  if (select->select)
367  return ff_filter_frame(ctx->outputs[select->select_out], frame);
368 
369  av_frame_free(&frame);
370  return 0;
371 }
372 
373 static int request_frame(AVFilterLink *outlink)
374 {
375  AVFilterContext *ctx = outlink->src;
376  SelectContext *select = ctx->priv;
377  AVFilterLink *inlink = outlink->src->inputs[0];
378  int out_no = FF_OUTLINK_IDX(outlink);
379 
380  do {
381  int ret = ff_request_frame(inlink);
382  if (ret < 0)
383  return ret;
384  } while (select->select_out != out_no);
385 
386  return 0;
387 }
388 
389 static av_cold void uninit(AVFilterContext *ctx)
390 {
391  SelectContext *select = ctx->priv;
392  int i;
393 
394  av_expr_free(select->expr);
395  select->expr = NULL;
396 
397  for (i = 0; i < ctx->nb_outputs; i++)
398  av_freep(&ctx->output_pads[i].name);
399 
400 #if CONFIG_AVCODEC
401  if (select->do_scene_detect) {
402  av_frame_free(&select->prev_picref);
403  if (select->avctx) {
404  avcodec_close(select->avctx);
405  av_freep(&select->avctx);
406  }
407  }
408 #endif
409 }
410 
412 {
413  SelectContext *select = ctx->priv;
414 
415  if (!select->do_scene_detect) {
416  return ff_default_query_formats(ctx);
417  } else {
418  static const enum AVPixelFormat pix_fmts[] = {
421  };
423  }
424  return 0;
425 }
426 
427 #if CONFIG_ASELECT_FILTER
428 
430 AVFILTER_DEFINE_CLASS(aselect);
431 
432 static av_cold int aselect_init(AVFilterContext *ctx)
433 {
434  SelectContext *select = ctx->priv;
435  int ret;
436 
437  if ((ret = init(ctx)) < 0)
438  return ret;
439 
440  if (select->do_scene_detect) {
441  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
442  return AVERROR(EINVAL);
443  }
444 
445  return 0;
446 }
447 
448 static const AVFilterPad avfilter_af_aselect_inputs[] = {
449  {
450  .name = "default",
451  .type = AVMEDIA_TYPE_AUDIO,
452  .get_audio_buffer = ff_null_get_audio_buffer,
453  .config_props = config_input,
454  .filter_frame = filter_frame,
455  },
456  { NULL }
457 };
458 
459 AVFilter avfilter_af_aselect = {
460  .name = "aselect",
461  .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
462  .init = aselect_init,
463  .uninit = uninit,
464  .priv_size = sizeof(SelectContext),
465  .inputs = avfilter_af_aselect_inputs,
466  .priv_class = &aselect_class,
468 };
469 #endif /* CONFIG_ASELECT_FILTER */
470 
471 #if CONFIG_SELECT_FILTER
472 
474 AVFILTER_DEFINE_CLASS(select);
475 
476 static av_cold int select_init(AVFilterContext *ctx)
477 {
478  SelectContext *select = ctx->priv;
479  int ret;
480 
481  if ((ret = init(ctx)) < 0)
482  return ret;
483 
484  if (select->do_scene_detect && !CONFIG_AVCODEC) {
485  av_log(ctx, AV_LOG_ERROR, "Scene detection is not available without libavcodec.\n");
486  return AVERROR(EINVAL);
487  }
488 
489  return 0;
490 }
491 
492 static const AVFilterPad avfilter_vf_select_inputs[] = {
493  {
494  .name = "default",
495  .type = AVMEDIA_TYPE_VIDEO,
496  .get_video_buffer = ff_null_get_video_buffer,
497  .config_props = config_input,
498  .filter_frame = filter_frame,
499  },
500  { NULL }
501 };
502 
503 AVFilter avfilter_vf_select = {
504  .name = "select",
505  .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
506  .init = select_init,
507  .uninit = uninit,
508  .query_formats = query_formats,
509 
510  .priv_size = sizeof(SelectContext),
511  .priv_class = &select_class,
512 
513  .inputs = avfilter_vf_select_inputs,
515 };
516 #endif /* CONFIG_SELECT_FILTER */
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:389
external API header
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:70
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:284
#define INTERLACE_TYPE_P
Definition: f_select.c:183
AVFrame * ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
Definition: video.c:35
enum AVMediaType type
AVFilterPad type.
#define FF_OUTLINK_IDX(link)
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:640
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
Switching Intra.
Definition: avutil.h:220
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:538
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
AVOptions.
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:275
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
#define NAN
Definition: math.h:7
#define emms_c()
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
Definition: eval.c:140
int nb_outputs
Definition: f_select.c:141
#define INTERLACE_TYPE_T
Definition: f_select.c:184
char * expr_str
Definition: f_select.c:129
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:430
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
frame
Definition: stft.m:14
A filter pad used for either input or output.
Discrete Time axis x
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
static int query_formats(AVFilterContext *ctx)
Definition: f_select.c:411
int width
width and height of the video frame
Definition: frame.h:122
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
Definition: frame.c:49
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:287
double var_values[VAR_VARS_NB]
Definition: f_select.c:131
unsigned nb_outputs
number of output pads
Definition: avfilter.h:543
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int select_out
mark the selected output pad index
Definition: f_select.c:140
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:112
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:187
common internal API header
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
#define FFMIN(a, b)
Definition: common.h:58
var_name
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
ret
Definition: avfilter.c:821
static void ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:71
#define diff(a, as, b, bs)
Definition: vf_phase.c:80
AVFrame * ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
get_audio_buffer() handler for filters which simply pass audio along
Definition: audio.c:36
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:360
const AVFilterPad * inputs
NULL terminated list of inputs. NULL if none.
Definition: avfilter.h:445
AVFrame * av_frame_clone(AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:317
int ff_default_query_formats(AVFilterContext *ctx)
Definition: formats.c:567
NULL
Definition: eval.c:55
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:302
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:285
main external API structure.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
a very simple circular buffer FIFO implementation
void * buf
Definition: avisynth_c.h:594
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:62
Definition: f_select.c:92
Switching Predicted.
Definition: avutil.h:221
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
#define TS2D(ts)
Definition: f_select.c:273
const char * name
filter name
Definition: avfilter.h:437
#define snprintf
Definition: snprintf.h:34
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:373
static int flags
Definition: cpu.c:23
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:132
#define CONFIG_AVCODEC
Definition: config.h:356
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:138
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:145
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
static double c[64]
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:156
Bi-dir predicted.
Definition: avutil.h:218
function y
Definition: D.m:1
DSP utils.
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:691
#define AVFILTER_DEFINE_CLASS(fname)
AVExpr * expr
Definition: f_select.c:130
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
int(* request_frame)(AVFilterLink *link)
Frame request callback.
An instance of a filter.
Definition: avfilter.h:524
static const char *const var_names[]
Definition: f_select.c:41
int height
Definition: frame.h:122
av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2932
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:319
#define INTERLACE_TYPE_B
Definition: f_select.c:185
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
double select
Definition: f_select.c:139
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:127
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:527
Predicted.
Definition: avutil.h:217
DSPContext.
Definition: dsputil.h:127
simple arithmetic expression evaluator