vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
25 #include "avfilter.h"
26 #include "bufferqueue.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "video.h"
30 
31 #define TOP 0
32 #define BOTTOM 1
33 
34 enum BlendMode {
61 };
62 
63 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
65 
66 typedef struct FilterParams {
69  double opacity;
71  char *expr_str;
72  void (*blend)(const uint8_t *top, int top_linesize,
73  const uint8_t *bottom, int bottom_linesize,
74  uint8_t *dst, int dst_linesize,
75  int width, int height, struct FilterParams *param);
76 } FilterParams;
77 
78 typedef struct {
79  const AVClass *class;
80  struct FFBufQueue queue_top;
81  struct FFBufQueue queue_bottom;
82  int hsub, vsub; ///< chroma subsampling values
84  int framenum;
85  char *all_expr;
86  enum BlendMode all_mode;
87  double all_opacity;
88 
90 } BlendContext;
91 
92 #define OFFSET(x) offsetof(BlendContext, x)
93 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
94 
95 static const AVOption blend_options[] = {
96  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
97  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
98  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
99  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},
100  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},
101  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },
102  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },
103  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },
104  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },
105  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },
106  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },
107  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },
108  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },
109  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },
110  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },
111  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },
112  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },
113  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },
114  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },
115  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },
116  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },
117  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },
118  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },
119  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },
120  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },
121  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },
122  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },
123  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },
124  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },
125  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
126  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
127  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
128  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
129  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
130  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
131  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
132  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
133  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
134  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
135  { NULL },
136 };
137 
139 
140 static void blend_normal(const uint8_t *top, int top_linesize,
141  const uint8_t *bottom, int bottom_linesize,
142  uint8_t *dst, int dst_linesize,
143  int width, int height, FilterParams *param)
144 {
145  av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, height);
146 }
147 
148 #define DEFINE_BLEND(name, expr) \
149 static void blend_## name(const uint8_t *top, int top_linesize, \
150  const uint8_t *bottom, int bottom_linesize, \
151  uint8_t *dst, int dst_linesize, \
152  int width, int height, FilterParams *param) \
153 { \
154  double opacity = param->opacity; \
155  int i, j; \
156  \
157  for (i = 0; i < height; i++) { \
158  for (j = 0; j < width; j++) { \
159  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
160  } \
161  dst += dst_linesize; \
162  top += top_linesize; \
163  bottom += bottom_linesize; \
164  } \
165 }
166 
167 #define A top[j]
168 #define B bottom[j]
169 
170 #define MULTIPLY(x, a, b) (x * ((a * b) / 255))
171 #define SCREEN(x, a, b) (255 - x * ((255 - a) * (255 - b) / 255))
172 #define BURN(a, b) ((a == 0) ? a : FFMAX(0, 255 - ((255 - b) << 8) / a))
173 #define DODGE(a, b) ((a == 255) ? a : FFMIN(255, ((b << 8) / (255 - a))))
174 
175 DEFINE_BLEND(addition, FFMIN(255, A + B))
176 DEFINE_BLEND(average, (A + B) / 2)
177 DEFINE_BLEND(subtract, FFMAX(0, A - B))
178 DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
179 DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
180 DEFINE_BLEND(difference, FFABS(A - B))
181 DEFINE_BLEND(screen, SCREEN(1, A, B))
182 DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
183 DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
184 DEFINE_BLEND(darken, FFMIN(A, B))
185 DEFINE_BLEND(lighten, FFMAX(A, B))
186 DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
187 DEFINE_BLEND(dodge, DODGE(A, B))
188 DEFINE_BLEND(burn, BURN(A, B))
189 DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
190 DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
191 DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
192 DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
193 DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
194 DEFINE_BLEND(and, A & B)
195 DEFINE_BLEND(or, A | B)
196 DEFINE_BLEND(xor, A ^ B)
197 DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
198 
199 static void blend_expr(const uint8_t *top, int top_linesize,
200  const uint8_t *bottom, int bottom_linesize,
201  uint8_t *dst, int dst_linesize,
202  int width, int height,
203  FilterParams *param)
204 {
205  AVExpr *e = param->e;
206  double *values = param->values;
207  int y, x;
208 
209  for (y = 0; y < height; y++) {
210  values[VAR_Y] = y;
211  for (x = 0; x < width; x++) {
212  values[VAR_X] = x;
213  values[VAR_TOP] = values[VAR_A] = top[x];
214  values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
215  dst[x] = av_expr_eval(e, values, NULL);
216  }
217  dst += dst_linesize;
218  top += top_linesize;
219  bottom += bottom_linesize;
220  }
221 }
222 
223 static av_cold int init(AVFilterContext *ctx)
224 {
225  BlendContext *b = ctx->priv;
226  int ret, plane;
227 
228  for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
229  FilterParams *param = &b->params[plane];
230 
231  if (b->all_mode >= 0)
232  param->mode = b->all_mode;
233  if (b->all_opacity < 1)
234  param->opacity = b->all_opacity;
235 
236  switch (param->mode) {
237  case BLEND_ADDITION: param->blend = blend_addition; break;
238  case BLEND_AND: param->blend = blend_and; break;
239  case BLEND_AVERAGE: param->blend = blend_average; break;
240  case BLEND_BURN: param->blend = blend_burn; break;
241  case BLEND_DARKEN: param->blend = blend_darken; break;
242  case BLEND_DIFFERENCE: param->blend = blend_difference; break;
243  case BLEND_DIVIDE: param->blend = blend_divide; break;
244  case BLEND_DODGE: param->blend = blend_dodge; break;
245  case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
246  case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
247  case BLEND_LIGHTEN: param->blend = blend_lighten; break;
248  case BLEND_MULTIPLY: param->blend = blend_multiply; break;
249  case BLEND_NEGATION: param->blend = blend_negation; break;
250  case BLEND_NORMAL: param->blend = blend_normal; break;
251  case BLEND_OR: param->blend = blend_or; break;
252  case BLEND_OVERLAY: param->blend = blend_overlay; break;
253  case BLEND_PHOENIX: param->blend = blend_phoenix; break;
254  case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
255  case BLEND_REFLECT: param->blend = blend_reflect; break;
256  case BLEND_SCREEN: param->blend = blend_screen; break;
257  case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
258  case BLEND_SUBTRACT: param->blend = blend_subtract; break;
259  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
260  case BLEND_XOR: param->blend = blend_xor; break;
261  }
262 
263  if (b->all_expr && !param->expr_str) {
264  param->expr_str = av_strdup(b->all_expr);
265  if (!param->expr_str)
266  return AVERROR(ENOMEM);
267  }
268  if (param->expr_str) {
269  ret = av_expr_parse(&param->e, param->expr_str, var_names,
270  NULL, NULL, NULL, NULL, 0, ctx);
271  if (ret < 0)
272  return ret;
273  param->blend = blend_expr;
274  }
275  }
276 
277  return 0;
278 }
279 
281 {
282  static const enum AVPixelFormat pix_fmts[] = {
287  };
288 
290  return 0;
291 }
292 
293 static int config_output(AVFilterLink *outlink)
294 {
295  AVFilterContext *ctx = outlink->src;
296  AVFilterLink *toplink = ctx->inputs[TOP];
297  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
298 
299  if (toplink->format != bottomlink->format) {
300  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
301  return AVERROR(EINVAL);
302  }
303  if (toplink->w != bottomlink->w ||
304  toplink->h != bottomlink->h ||
305  toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
306  toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
307  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
308  "(size %dx%d, SAR %d:%d) do not match the corresponding "
309  "second input link %s parameters (%dx%d, SAR %d:%d)\n",
310  ctx->input_pads[TOP].name, toplink->w, toplink->h,
311  toplink->sample_aspect_ratio.num,
312  toplink->sample_aspect_ratio.den,
313  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
314  bottomlink->sample_aspect_ratio.num,
315  bottomlink->sample_aspect_ratio.den);
316  return AVERROR(EINVAL);
317  }
318 
319  outlink->w = toplink->w;
320  outlink->h = bottomlink->h;
321  outlink->time_base = toplink->time_base;
322  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
323  outlink->frame_rate = toplink->frame_rate;
324  return 0;
325 }
326 
327 static int config_input_top(AVFilterLink *inlink)
328 {
329  BlendContext *b = inlink->dst->priv;
330  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
331 
332  b->hsub = pix_desc->log2_chroma_w;
333  b->vsub = pix_desc->log2_chroma_h;
334  return 0;
335 }
336 
337 static av_cold void uninit(AVFilterContext *ctx)
338 {
339  BlendContext *b = ctx->priv;
340  int i;
341 
344 
345  for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
346  av_expr_free(b->params[i].e);
347 }
348 
349 static int request_frame(AVFilterLink *outlink)
350 {
351  AVFilterContext *ctx = outlink->src;
352  BlendContext *b = ctx->priv;
353  int in, ret;
354 
355  b->frame_requested = 1;
356  while (b->frame_requested) {
357  in = ff_bufqueue_peek(&b->queue_top, 0) ? BOTTOM : TOP;
358  ret = ff_request_frame(ctx->inputs[in]);
359  if (ret < 0)
360  return ret;
361  }
362  return 0;
363 }
364 
365 static void blend_frame(AVFilterContext *ctx,
366  AVFrame *top_buf,
367  AVFrame *bottom_buf,
368  AVFrame *dst_buf)
369 {
370  BlendContext *b = ctx->priv;
371  AVFilterLink *inlink = ctx->inputs[0];
372  FilterParams *param;
373  int plane;
374 
375  for (plane = 0; dst_buf->data[plane]; plane++) {
376  int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
377  int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
378  int outw = dst_buf->width >> hsub;
379  int outh = dst_buf->height >> vsub;
380  uint8_t *dst = dst_buf->data[plane];
381  uint8_t *top = top_buf->data[plane];
382  uint8_t *bottom = bottom_buf->data[plane];
383 
384  param = &b->params[plane];
385  param->values[VAR_N] = b->framenum++;
386  param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
387  param->values[VAR_W] = outw;
388  param->values[VAR_H] = outh;
389  param->values[VAR_SW] = outw / dst_buf->width;
390  param->values[VAR_SH] = outh / dst_buf->height;
391  param->blend(top, top_buf->linesize[plane],
392  bottom, bottom_buf->linesize[plane],
393  dst, dst_buf->linesize[plane], outw, outh, param);
394  }
395 }
396 
397 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
398 {
399  AVFilterContext *ctx = inlink->dst;
400  AVFilterLink *outlink = ctx->outputs[0];
401  BlendContext *b = ctx->priv;
402 
403  int ret = 0;
404  int is_bottom = (inlink == ctx->inputs[BOTTOM]);
405  struct FFBufQueue *queue =
406  (is_bottom ? &b->queue_bottom : &b->queue_top);
407  ff_bufqueue_add(ctx, queue, buf);
408 
409  while (1) {
410  AVFrame *top_buf, *bottom_buf, *out_buf;
411 
412  if (!ff_bufqueue_peek(&b->queue_top, 0) ||
413  !ff_bufqueue_peek(&b->queue_bottom, 0)) break;
414 
415  top_buf = ff_bufqueue_get(&b->queue_top);
416  bottom_buf = ff_bufqueue_get(&b->queue_bottom);
417 
418  out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
419  if (!out_buf) {
420  return AVERROR(ENOMEM);
421  }
422  av_frame_copy_props(out_buf, top_buf);
423 
424  b->frame_requested = 0;
425  blend_frame(ctx, top_buf, bottom_buf, out_buf);
426  ret = ff_filter_frame(ctx->outputs[0], out_buf);
427  av_frame_free(&top_buf);
428  av_frame_free(&bottom_buf);
429  }
430  return ret;
431 }
432 
433 static const AVFilterPad blend_inputs[] = {
434  {
435  .name = "top",
436  .type = AVMEDIA_TYPE_VIDEO,
437  .config_props = config_input_top,
438  .filter_frame = filter_frame,
439  },{
440  .name = "bottom",
441  .type = AVMEDIA_TYPE_VIDEO,
442  .filter_frame = filter_frame,
443  },
444  { NULL }
445 };
446 
447 static const AVFilterPad blend_outputs[] = {
448  {
449  .name = "default",
450  .type = AVMEDIA_TYPE_VIDEO,
451  .config_props = config_output,
452  .request_frame = request_frame,
453  },
454  { NULL }
455 };
456 
458  .name = "blend",
459  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
460  .init = init,
461  .uninit = uninit,
462  .priv_size = sizeof(BlendContext),
464  .inputs = blend_inputs,
465  .outputs = blend_outputs,
466  .priv_class = &blend_class,
467 };
this is essentially always true and is there for self documentation without modifying the data in between WRITE and PRESERVE permissions are about sharing the same buffer between several filters to avoid expensive copies without them doing conflicting changes on the data The REUSE and REUSE2 permissions are about special memory for direct rendering For example a buffer directly allocated in video memory must not modified once it is displayed on screen
AVExpr * e
Definition: vf_blend.c:70
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
static const AVOption blend_options[]
Definition: vf_blend.c:95
Definition: vf_blend.c:64
static const char *const var_names[]
Definition: vf_blend.c:63
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
double opacity
Definition: vf_blend.c:69
AVOption.
Definition: opt.h:251
AVFILTER_DEFINE_CLASS(blend)
enum BlendMode all_mode
Definition: vf_blend.c:86
static void blend_frame(AVFilterContext *ctx, AVFrame *top_buf, AVFrame *bottom_buf, AVFrame *dst_buf)
Definition: vf_blend.c:365
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
static int divide(int a, int b)
Definition: sonic.c:78
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
int num
numerator
Definition: rational.h:44
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
#define DODGE(a, b)
Definition: vf_blend.c:173
#define FF_ARRAY_ELEMS(a)
#define BURN(a, b)
Definition: vf_blend.c:172
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:640
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_blend.c:337
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
Structure holding the queue.
Definition: bufferqueue.h:49
int frame_requested
Definition: vf_blend.c:83
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
char * expr_str
Definition: vf_blend.c:71
#define av_cold
Definition: attributes.h:78
mode
Definition: f_perms.c:27
AVOptions.
int framenum
Definition: vf_blend.c:84
#define b
Definition: input.c:42
Definition: vf_blend.c:64
#define NAN
Definition: math.h:7
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
Definition: eval.c:140
#define OFFSET(x)
Definition: vf_blend.c:92
static int request_frame(AVFilterLink *outlink)
Definition: vf_blend.c:349
enum BlendMode mode
Definition: vf_blend.c:67
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:69
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: pixfmt.h:104
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
#define BOTTOM
Definition: vf_blend.c:32
static int config_output(AVFilterLink *outlink)
Definition: vf_blend.c:293
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
FilterParams params[4]
Definition: vf_blend.c:89
Definition: vf_blend.c:64
A filter pad used for either input or output.
Discrete Time axis x
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:531
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:219
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
Definition: vf_blend.c:397
int width
width and height of the video frame
Definition: frame.h:122
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
#define FLAGS
Definition: vf_blend.c:93
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
AVFilter avfilter_vf_blend
Definition: vf_blend.c:457
#define FFMAX(a, b)
Definition: common.h:56
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define TOP
Definition: vf_blend.c:31
BlendMode
Definition: vf_blend.c:34
AVFrame * queue[FF_BUFQUEUE_SIZE]
Definition: bufferqueue.h:50
#define FFMIN(a, b)
Definition: common.h:58
void(* blend)(const uint8_t *top, int top_linesize, const uint8_t *bottom, int bottom_linesize, uint8_t *dst, int dst_linesize, int width, int height, struct FilterParams *param)
Definition: vf_blend.c:72
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
Definition: vf_blend.c:64
ret
Definition: avfilter.c:821
#define FFABS(a)
Definition: common.h:53
static av_cold int init(AVFilterContext *ctx)
Definition: vf_blend.c:223
#define SCREEN(x, a, b)
Definition: vf_blend.c:171
#define B
Definition: vf_blend.c:168
struct FFBufQueue queue_bottom
Definition: vf_blend.c:81
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
typedef void(RENAME(mix_any_func_type))
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:302
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
double values[VAR_VARS_NB]
Definition: vf_blend.c:68
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:218
or(d) export
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
static const AVFilterPad blend_inputs[]
Definition: vf_blend.c:433
void * buf
Definition: avisynth_c.h:594
filter data
Definition: mlp.h:74
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
const char * name
filter name
Definition: avfilter.h:437
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
static void blend_normal(const uint8_t *top, int top_linesize, const uint8_t *bottom, int bottom_linesize, uint8_t *dst, int dst_linesize, int width, int height, FilterParams *param)
Definition: vf_blend.c:140
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
struct FilterParams FilterParams
struct FFBufQueue queue_top
Definition: vf_blend.c:80
const char const char * params
Definition: avisynth_c.h:675
char * all_expr
Definition: vf_blend.c:85
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
int vsub
chroma subsampling values
Definition: vf_blend.c:82
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
double all_opacity
Definition: vf_blend.c:87
#define A
Definition: vf_blend.c:167
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
pixel format definitions
Definition: vf_blend.c:64
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:691
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static int config_input_top(AVFilterLink *inlink)
Definition: vf_blend.c:327
An instance of a filter.
Definition: avfilter.h:524
Definition: vf_blend.c:64
int height
Definition: frame.h:122
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:319
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:242
internal API functions
#define MULTIPLY(x, a, b)
Definition: vf_blend.c:170
static const AVFilterPad blend_outputs[]
Definition: vf_blend.c:447
Definition: vf_blend.c:64
Definition: vf_blend.c:64
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int query_formats(AVFilterContext *ctx)
Definition: vf_blend.c:280
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
#define DEFINE_BLEND(name, expr)
Definition: vf_blend.c:148
simple arithmetic expression evaluator
static AVFrame * ff_bufqueue_peek(struct FFBufQueue *queue, unsigned index)
Get a buffer from the queue without altering it.
Definition: bufferqueue.h:87