vf_fieldmatch.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Fredrik Mellbin
3  * Copyright (c) 2013 Clément Bœsch
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Fieldmatching filter, ported from VFM filter (VapouSsynth) by Clément.
25  * Fredrik Mellbin is the author of the VIVTC/VFM filter, which is itself a
26  * light clone of the TIVTC/TFM (AviSynth) filter written by Kevin Stone
27  * (tritical), the original author.
28  *
29  * @see http://bengal.missouri.edu/~kes25c/
30  * @see http://www.vapoursynth.com/about/
31  */
32 
33 #include <inttypes.h>
34 
35 #include "libavutil/avassert.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/timestamp.h"
39 #include "avfilter.h"
40 #include "internal.h"
41 
42 #define INPUT_MAIN 0
43 #define INPUT_CLEANSRC 1
44 
49 };
50 
59 };
60 
66 };
67 
68 enum comb_dbg {
73 };
74 
75 typedef struct {
76  const AVClass *class;
77 
78  AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
79  AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
80  int64_t frame_count; ///< output frame counter
81  int got_frame[2]; ///< frame request flag for each input stream
82  int hsub, vsub; ///< chroma subsampling values
83  uint32_t eof; ///< bitmask for end of stream
84  int64_t lastscdiff;
85  int64_t lastn;
86 
87  /* options */
88  int order;
89  int ppsrc;
91  int field;
92  int mchroma;
93  int y0, y1;
94  int64_t scthresh;
95  double scthresh_flt;
96  enum comb_matching_mode combmatch;
97  int combdbg;
98  int cthresh;
99  int chroma;
100  int blockx, blocky;
101  int combpel;
102 
103  /* misc buffers */
104  uint8_t *map_data[4];
105  int map_linesize[4];
106  uint8_t *cmask_data[4];
107  int cmask_linesize[4];
108  int *c_array;
109  int tpitchy, tpitchuv;
112 
113 #define OFFSET(x) offsetof(FieldMatchContext, x)
114 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
115 
116 static const AVOption fieldmatch_options[] = {
117  { "order", "specify the assumed field order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "order" },
118  { "auto", "auto detect parity", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "order" },
119  { "bff", "assume bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "order" },
120  { "tff", "assume top field first", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "order" },
121  { "mode", "set the matching mode or strategy to use", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_PC_N}, MODE_PC, NB_MODE-1, FLAGS, "mode" },
122  { "pc", "2-way match (p/c)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC}, INT_MIN, INT_MAX, FLAGS, "mode" },
123  { "pc_n", "2-way match + 3rd match on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N}, INT_MIN, INT_MAX, FLAGS, "mode" },
124  { "pc_u", "2-way match + 3rd match (same order) on combed (p/c + u)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_U}, INT_MIN, INT_MAX, FLAGS, "mode" },
125  { "pc_n_ub", "2-way match + 3rd match on combed + 4th/5th matches if still combed (p/c + u + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PC_N_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
126  { "pcn", "3-way match (p/c/n)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN}, INT_MIN, INT_MAX, FLAGS, "mode" },
127  { "pcn_ub", "3-way match + 4th/5th matches on combed (p/c/n + u/b)", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PCN_UB}, INT_MIN, INT_MAX, FLAGS, "mode" },
128  { "ppsrc", "mark main input as a pre-processed input and activate clean source input stream", OFFSET(ppsrc), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
129  { "field", "set the field to match from", OFFSET(field), AV_OPT_TYPE_INT, {.i64=FM_PARITY_AUTO}, -1, 1, FLAGS, "field" },
130  { "auto", "automatic (same value as 'order')", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_AUTO}, INT_MIN, INT_MAX, FLAGS, "field" },
131  { "bottom", "bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field" },
132  { "top", "top field", 0, AV_OPT_TYPE_CONST, {.i64=FM_PARITY_TOP}, INT_MIN, INT_MAX, FLAGS, "field" },
133  { "mchroma", "set whether or not chroma is included during the match comparisons", OFFSET(mchroma), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
134  { "y0", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y0), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
135  { "y1", "define an exclusion band which excludes the lines between y0 and y1 from the field matching decision", OFFSET(y1), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
136  { "scthresh", "set scene change detection threshold", OFFSET(scthresh_flt), AV_OPT_TYPE_DOUBLE, {.dbl=12}, 0, 100, FLAGS },
137  { "combmatch", "set combmatching mode", OFFSET(combmatch), AV_OPT_TYPE_INT, {.i64=COMBMATCH_SC}, COMBMATCH_NONE, NB_COMBMATCH-1, FLAGS, "combmatching" },
138  { "none", "disable combmatching", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_NONE}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
139  { "sc", "enable combmatching only on scene change", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_SC}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
140  { "full", "enable combmatching all the time", 0, AV_OPT_TYPE_CONST, {.i64=COMBMATCH_FULL}, INT_MIN, INT_MAX, FLAGS, "combmatching" },
141  { "combdbg", "enable comb debug", OFFSET(combdbg), AV_OPT_TYPE_INT, {.i64=COMBDBG_NONE}, COMBDBG_NONE, NB_COMBDBG-1, FLAGS, "dbglvl" },
142  { "none", "no forced calculation", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_NONE}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
143  { "pcn", "calculate p/c/n", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCN}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
144  { "pcnub", "calculate p/c/n/u/b", 0, AV_OPT_TYPE_CONST, {.i64=COMBDBG_PCNUB}, INT_MIN, INT_MAX, FLAGS, "dbglvl" },
145  { "cthresh", "set the area combing threshold used for combed frame detection", OFFSET(cthresh), AV_OPT_TYPE_INT, {.i64= 9}, -1, 0xff, FLAGS },
146  { "chroma", "set whether or not chroma is considered in the combed frame decision", OFFSET(chroma), AV_OPT_TYPE_INT, {.i64= 0}, 0, 1, FLAGS },
147  { "blockx", "set the x-axis size of the window used during combed frame detection", OFFSET(blockx), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
148  { "blocky", "set the y-axis size of the window used during combed frame detection", OFFSET(blocky), AV_OPT_TYPE_INT, {.i64=16}, 4, 1<<9, FLAGS },
149  { "combpel", "set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed", OFFSET(combpel), AV_OPT_TYPE_INT, {.i64=80}, 0, INT_MAX, FLAGS },
150  { NULL }
151 };
152 
153 AVFILTER_DEFINE_CLASS(fieldmatch);
154 
155 static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
156 {
157  return plane ? f->width >> fm->hsub : f->width;
158 }
159 
160 static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
161 {
162  return plane ? f->height >> fm->vsub : f->height;
163 }
164 
165 static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
166 {
167  int x, y;
168  const uint8_t *srcp1 = f1->data[0];
169  const uint8_t *srcp2 = f2->data[0];
170  const int src1_linesize = f1->linesize[0];
171  const int src2_linesize = f2->linesize[0];
172  const int width = f1->width;
173  const int height = f1->height;
174  int64_t acc = 0;
175 
176  for (y = 0; y < height; y++) {
177  for (x = 0; x < width; x++)
178  acc += abs(srcp1[x] - srcp2[x]);
179  srcp1 += src1_linesize;
180  srcp2 += src2_linesize;
181  }
182  return acc;
183 }
184 
185 static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
186 {
187  int y;
188 
189  for (y = 0; y < h; y++) {
190  memset(data, v, w);
191  data += linesize;
192  }
193 }
194 
195 static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
196 {
197  int x, y, plane, max_v = 0;
198  const int cthresh = fm->cthresh;
199  const int cthresh6 = cthresh * 6;
200 
201  for (plane = 0; plane < (fm->chroma ? 3 : 1); plane++) {
202  const uint8_t *srcp = src->data[plane];
203  const int src_linesize = src->linesize[plane];
204  const int width = get_width (fm, src, plane);
205  const int height = get_height(fm, src, plane);
206  uint8_t *cmkp = fm->cmask_data[plane];
207  const int cmk_linesize = fm->cmask_linesize[plane];
208 
209  if (cthresh < 0) {
210  fill_buf(cmkp, width, height, cmk_linesize, 0xff);
211  continue;
212  }
213  fill_buf(cmkp, width, height, cmk_linesize, 0);
214 
215  /* [1 -3 4 -3 1] vertical filter */
216 #define FILTER(xm2, xm1, xp1, xp2) \
217  abs( 4 * srcp[x] \
218  -3 * (srcp[x + (xm1)*src_linesize] + srcp[x + (xp1)*src_linesize]) \
219  + (srcp[x + (xm2)*src_linesize] + srcp[x + (xp2)*src_linesize])) > cthresh6
220 
221  /* first line */
222  for (x = 0; x < width; x++) {
223  const int s1 = abs(srcp[x] - srcp[x + src_linesize]);
224  if (s1 > cthresh && FILTER(2, 1, 1, 2))
225  cmkp[x] = 0xff;
226  }
227  srcp += src_linesize;
228  cmkp += cmk_linesize;
229 
230  /* second line */
231  for (x = 0; x < width; x++) {
232  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
233  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
234  if (s1 > cthresh && s2 > cthresh && FILTER(2, -1, 1, 2))
235  cmkp[x] = 0xff;
236  }
237  srcp += src_linesize;
238  cmkp += cmk_linesize;
239 
240  /* all lines minus first two and last two */
241  for (y = 2; y < height-2; y++) {
242  for (x = 0; x < width; x++) {
243  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
244  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
245  if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, 2))
246  cmkp[x] = 0xff;
247  }
248  srcp += src_linesize;
249  cmkp += cmk_linesize;
250  }
251 
252  /* before-last line */
253  for (x = 0; x < width; x++) {
254  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
255  const int s2 = abs(srcp[x] - srcp[x + src_linesize]);
256  if (s1 > cthresh && s2 > cthresh && FILTER(-2, -1, 1, -2))
257  cmkp[x] = 0xff;
258  }
259  srcp += src_linesize;
260  cmkp += cmk_linesize;
261 
262  /* last line */
263  for (x = 0; x < width; x++) {
264  const int s1 = abs(srcp[x] - srcp[x - src_linesize]);
265  if (s1 > cthresh && FILTER(-2, -1, -1, -2))
266  cmkp[x] = 0xff;
267  }
268  }
269 
270  if (fm->chroma) {
271  uint8_t *cmkp = fm->cmask_data[0];
272  uint8_t *cmkpU = fm->cmask_data[1];
273  uint8_t *cmkpV = fm->cmask_data[2];
274  const int width = src->width >> fm->hsub;
275  const int height = src->height >> fm->vsub;
276  const int cmk_linesize = fm->cmask_linesize[0] << 1;
277  const int cmk_linesizeUV = fm->cmask_linesize[2];
278  uint8_t *cmkpp = cmkp - (cmk_linesize>>1);
279  uint8_t *cmkpn = cmkp + (cmk_linesize>>1);
280  uint8_t *cmkpnn = cmkp + cmk_linesize;
281  for (y = 1; y < height - 1; y++) {
282  cmkpp += cmk_linesize;
283  cmkp += cmk_linesize;
284  cmkpn += cmk_linesize;
285  cmkpnn += cmk_linesize;
286  cmkpV += cmk_linesizeUV;
287  cmkpU += cmk_linesizeUV;
288  for (x = 1; x < width - 1; x++) {
289 #define HAS_FF_AROUND(p, lz) (p[x-1 - lz] == 0xff || p[x - lz] == 0xff || p[x+1 - lz] == 0xff || \
290  p[x-1 ] == 0xff || p[x+1 ] == 0xff || \
291  p[x-1 + lz] == 0xff || p[x + lz] == 0xff || p[x+1 + lz] == 0xff)
292  if ((cmkpV[x] == 0xff && HAS_FF_AROUND(cmkpV, cmk_linesizeUV)) ||
293  (cmkpU[x] == 0xff && HAS_FF_AROUND(cmkpU, cmk_linesizeUV))) {
294  ((uint16_t*)cmkp)[x] = 0xffff;
295  ((uint16_t*)cmkpn)[x] = 0xffff;
296  if (y&1) ((uint16_t*)cmkpp)[x] = 0xffff;
297  else ((uint16_t*)cmkpnn)[x] = 0xffff;
298  }
299  }
300  }
301  }
302 
303  {
304  const int blockx = fm->blockx;
305  const int blocky = fm->blocky;
306  const int xhalf = blockx/2;
307  const int yhalf = blocky/2;
308  const int cmk_linesize = fm->cmask_linesize[0];
309  const uint8_t *cmkp = fm->cmask_data[0] + cmk_linesize;
310  const int width = src->width;
311  const int height = src->height;
312  const int xblocks = ((width+xhalf)/blockx) + 1;
313  const int xblocks4 = xblocks<<2;
314  const int yblocks = ((height+yhalf)/blocky) + 1;
315  int *c_array = fm->c_array;
316  const int arraysize = (xblocks*yblocks)<<2;
317  int heighta = (height/(blocky/2))*(blocky/2);
318  const int widtha = (width /(blockx/2))*(blockx/2);
319  if (heighta == height)
320  heighta = height - yhalf;
321  memset(c_array, 0, arraysize * sizeof(*c_array));
322 
323 #define C_ARRAY_ADD(v) do { \
324  const int box1 = (x / blockx) * 4; \
325  const int box2 = ((x + xhalf) / blockx) * 4; \
326  c_array[temp1 + box1 ] += v; \
327  c_array[temp1 + box2 + 1] += v; \
328  c_array[temp2 + box1 + 2] += v; \
329  c_array[temp2 + box2 + 3] += v; \
330 } while (0)
331 
332 #define VERTICAL_HALF(y_start, y_end) do { \
333  for (y = y_start; y < y_end; y++) { \
334  const int temp1 = (y / blocky) * xblocks4; \
335  const int temp2 = ((y + yhalf) / blocky) * xblocks4; \
336  for (x = 0; x < width; x++) \
337  if (cmkp[x - cmk_linesize] == 0xff && \
338  cmkp[x ] == 0xff && \
339  cmkp[x + cmk_linesize] == 0xff) \
340  C_ARRAY_ADD(1); \
341  cmkp += cmk_linesize; \
342  } \
343 } while (0)
344 
345  VERTICAL_HALF(1, yhalf);
346 
347  for (y = yhalf; y < heighta; y += yhalf) {
348  const int temp1 = (y / blocky) * xblocks4;
349  const int temp2 = ((y + yhalf) / blocky) * xblocks4;
350 
351  for (x = 0; x < widtha; x += xhalf) {
352  const uint8_t *cmkp_tmp = cmkp + x;
353  int u, v, sum = 0;
354  for (u = 0; u < yhalf; u++) {
355  for (v = 0; v < xhalf; v++)
356  if (cmkp_tmp[v - cmk_linesize] == 0xff &&
357  cmkp_tmp[v ] == 0xff &&
358  cmkp_tmp[v + cmk_linesize] == 0xff)
359  sum++;
360  cmkp_tmp += cmk_linesize;
361  }
362  if (sum)
363  C_ARRAY_ADD(sum);
364  }
365 
366  for (x = widtha; x < width; x++) {
367  const uint8_t *cmkp_tmp = cmkp + x;
368  int u, sum = 0;
369  for (u = 0; u < yhalf; u++) {
370  if (cmkp_tmp[-cmk_linesize] == 0xff &&
371  cmkp_tmp[ 0] == 0xff &&
372  cmkp_tmp[ cmk_linesize] == 0xff)
373  sum++;
374  cmkp_tmp += cmk_linesize;
375  }
376  if (sum)
377  C_ARRAY_ADD(sum);
378  }
379 
380  cmkp += cmk_linesize * yhalf;
381  }
382 
383  VERTICAL_HALF(heighta, height - 1);
384 
385  for (x = 0; x < arraysize; x++)
386  if (c_array[x] > max_v)
387  max_v = c_array[x];
388  }
389  return max_v;
390 }
391 
392 // the secret is that tbuffer is an interlaced, offset subset of all the lines
393 static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize,
394  const uint8_t *nxtp, int nxt_linesize,
395  uint8_t *tbuffer, int tbuf_linesize,
396  int width, int height)
397 {
398  int y, x;
399 
400  prvp -= prv_linesize;
401  nxtp -= nxt_linesize;
402  for (y = 0; y < height; y++) {
403  for (x = 0; x < width; x++)
404  tbuffer[x] = FFABS(prvp[x] - nxtp[x]);
405  prvp += prv_linesize;
406  nxtp += nxt_linesize;
407  tbuffer += tbuf_linesize;
408  }
409 }
410 
411 /**
412  * Build a map over which pixels differ a lot/a little
413  */
415  const uint8_t *prvp, int prv_linesize,
416  const uint8_t *nxtp, int nxt_linesize,
417  uint8_t *dstp, int dst_linesize, int height,
418  int width, int plane)
419 {
420  int x, y, u, diff, count;
421  int tpitch = plane ? fm->tpitchuv : fm->tpitchy;
422  const uint8_t *dp = fm->tbuffer + tpitch;
423 
424  build_abs_diff_mask(prvp, prv_linesize, nxtp, nxt_linesize,
425  fm->tbuffer, tpitch, width, height>>1);
426 
427  for (y = 2; y < height - 2; y += 2) {
428  for (x = 1; x < width - 1; x++) {
429  diff = dp[x];
430  if (diff > 3) {
431  for (count = 0, u = x-1; u < x+2 && count < 2; u++) {
432  count += dp[u-tpitch] > 3;
433  count += dp[u ] > 3;
434  count += dp[u+tpitch] > 3;
435  }
436  if (count > 1) {
437  dstp[x] = 1;
438  if (diff > 19) {
439  int upper = 0, lower = 0;
440  for (count = 0, u = x-1; u < x+2 && count < 6; u++) {
441  if (dp[u-tpitch] > 19) { count++; upper = 1; }
442  if (dp[u ] > 19) count++;
443  if (dp[u+tpitch] > 19) { count++; lower = 1; }
444  }
445  if (count > 3) {
446  if (upper && lower) {
447  dstp[x] |= 1<<1;
448  } else {
449  int upper2 = 0, lower2 = 0;
450  for (u = FFMAX(x-4,0); u < FFMIN(x+5,width); u++) {
451  if (y != 2 && dp[u-2*tpitch] > 19) upper2 = 1;
452  if ( dp[u- tpitch] > 19) upper = 1;
453  if ( dp[u+ tpitch] > 19) lower = 1;
454  if (y != height-4 && dp[u+2*tpitch] > 19) lower2 = 1;
455  }
456  if ((upper && (lower || upper2)) ||
457  (lower && (upper || lower2)))
458  dstp[x] |= 1<<1;
459  else if (count > 5)
460  dstp[x] |= 1<<2;
461  }
462  }
463  }
464  }
465  }
466  }
467  dp += tpitch;
468  dstp += dst_linesize;
469  }
470 }
471 
472 enum { mP, mC, mN, mB, mU };
473 
474 static int get_field_base(int match, int field)
475 {
476  return match < 3 ? 2 - field : 1 + field;
477 }
478 
479 static AVFrame *select_frame(FieldMatchContext *fm, int match)
480 {
481  if (match == mP || match == mB) return fm->prv;
482  else if (match == mN || match == mU) return fm->nxt;
483  else /* match == mC */ return fm->src;
484 }
485 
486 static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
487 {
488  int plane, ret;
489  uint64_t accumPc = 0, accumPm = 0, accumPml = 0;
490  uint64_t accumNc = 0, accumNm = 0, accumNml = 0;
491  int norm1, norm2, mtn1, mtn2;
492  float c1, c2, mr;
493  const AVFrame *src = fm->src;
494 
495  for (plane = 0; plane < (fm->mchroma ? 3 : 1); plane++) {
496  int x, y, temp1, temp2, fbase;
497  const AVFrame *prev, *next;
498  uint8_t *mapp = fm->map_data[plane];
499  int map_linesize = fm->map_linesize[plane];
500  const uint8_t *srcp = src->data[plane];
501  const int src_linesize = src->linesize[plane];
502  const int srcf_linesize = src_linesize << 1;
503  int prv_linesize, nxt_linesize;
504  int prvf_linesize, nxtf_linesize;
505  const int width = get_width (fm, src, plane);
506  const int height = get_height(fm, src, plane);
507  const int y0a = fm->y0 >> (plane != 0);
508  const int y1a = fm->y1 >> (plane != 0);
509  const int startx = (plane == 0 ? 8 : 4);
510  const int stopx = width - startx;
511  const uint8_t *srcpf, *srcf, *srcnf;
512  const uint8_t *prvpf, *prvnf, *nxtpf, *nxtnf;
513 
514  fill_buf(mapp, width, height, map_linesize, 0);
515 
516  /* match1 */
517  fbase = get_field_base(match1, field);
518  srcf = srcp + (fbase + 1) * src_linesize;
519  srcpf = srcf - srcf_linesize;
520  srcnf = srcf + srcf_linesize;
521  mapp = mapp + fbase * map_linesize;
522  prev = select_frame(fm, match1);
523  prv_linesize = prev->linesize[plane];
524  prvf_linesize = prv_linesize << 1;
525  prvpf = prev->data[plane] + fbase * prv_linesize; // previous frame, previous field
526  prvnf = prvpf + prvf_linesize; // previous frame, next field
527 
528  /* match2 */
529  fbase = get_field_base(match2, field);
530  next = select_frame(fm, match2);
531  nxt_linesize = next->linesize[plane];
532  nxtf_linesize = nxt_linesize << 1;
533  nxtpf = next->data[plane] + fbase * nxt_linesize; // next frame, previous field
534  nxtnf = nxtpf + nxtf_linesize; // next frame, next field
535 
536  map_linesize <<= 1;
537  if ((match1 >= 3 && field == 1) || (match1 < 3 && field != 1))
538  build_diff_map(fm, prvpf, prvf_linesize, nxtpf, nxtf_linesize,
539  mapp, map_linesize, height, width, plane);
540  else
541  build_diff_map(fm, prvnf, prvf_linesize, nxtnf, nxtf_linesize,
542  mapp + map_linesize, map_linesize, height, width, plane);
543 
544  for (y = 2; y < height - 2; y += 2) {
545  if (y0a == y1a || y < y0a || y > y1a) {
546  for (x = startx; x < stopx; x++) {
547  if (mapp[x] > 0 || mapp[x + map_linesize] > 0) {
548  temp1 = srcpf[x] + (srcf[x] << 2) + srcnf[x]; // [1 4 1]
549 
550  temp2 = abs(3 * (prvpf[x] + prvnf[x]) - temp1);
551  if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
552  accumPc += temp2;
553  if (temp2 > 42) {
554  if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
555  accumPm += temp2;
556  if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
557  accumPml += temp2;
558  }
559 
560  temp2 = abs(3 * (nxtpf[x] + nxtnf[x]) - temp1);
561  if (temp2 > 23 && ((mapp[x]&1) || (mapp[x + map_linesize]&1)))
562  accumNc += temp2;
563  if (temp2 > 42) {
564  if ((mapp[x]&2) || (mapp[x + map_linesize]&2))
565  accumNm += temp2;
566  if ((mapp[x]&4) || (mapp[x + map_linesize]&4))
567  accumNml += temp2;
568  }
569  }
570  }
571  }
572  prvpf += prvf_linesize;
573  prvnf += prvf_linesize;
574  srcpf += srcf_linesize;
575  srcf += srcf_linesize;
576  srcnf += srcf_linesize;
577  nxtpf += nxtf_linesize;
578  nxtnf += nxtf_linesize;
579  mapp += map_linesize;
580  }
581  }
582 
583  if (accumPm < 500 && accumNm < 500 && (accumPml >= 500 || accumNml >= 500) &&
584  FFMAX(accumPml,accumNml) > 3*FFMIN(accumPml,accumNml)) {
585  accumPm = accumPml;
586  accumNm = accumNml;
587  }
588 
589  norm1 = (int)((accumPc / 6.0f) + 0.5f);
590  norm2 = (int)((accumNc / 6.0f) + 0.5f);
591  mtn1 = (int)((accumPm / 6.0f) + 0.5f);
592  mtn2 = (int)((accumNm / 6.0f) + 0.5f);
593  c1 = ((float)FFMAX(norm1,norm2)) / ((float)FFMAX(FFMIN(norm1,norm2),1));
594  c2 = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMIN(mtn1, mtn2), 1));
595  mr = ((float)FFMAX(mtn1, mtn2)) / ((float)FFMAX(FFMAX(norm1,norm2),1));
596  if (((mtn1 >= 500 || mtn2 >= 500) && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1)) ||
597  ((mtn1 >= 1000 || mtn2 >= 1000) && (mtn1*3 < mtn2*2 || mtn2*3 < mtn1*2)) ||
598  ((mtn1 >= 2000 || mtn2 >= 2000) && (mtn1*5 < mtn2*4 || mtn2*5 < mtn1*4)) ||
599  ((mtn1 >= 4000 || mtn2 >= 4000) && c2 > c1))
600  ret = mtn1 > mtn2 ? match2 : match1;
601  else if (mr > 0.005 && FFMAX(mtn1, mtn2) > 150 && (mtn1*2 < mtn2*1 || mtn2*2 < mtn1*1))
602  ret = mtn1 > mtn2 ? match2 : match1;
603  else
604  ret = norm1 > norm2 ? match2 : match1;
605  return ret;
606 }
607 
608 static void copy_fields(const FieldMatchContext *fm, AVFrame *dst,
609  const AVFrame *src, int field)
610 {
611  int plane;
612  for (plane = 0; plane < 4 && src->data[plane]; plane++)
613  av_image_copy_plane(dst->data[plane] + field*dst->linesize[plane], dst->linesize[plane] << 1,
614  src->data[plane] + field*src->linesize[plane], src->linesize[plane] << 1,
615  get_width(fm, src, plane), get_height(fm, src, plane) / 2);
616 }
617 
618 static AVFrame *create_weave_frame(AVFilterContext *ctx, int match, int field,
619  const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
620 {
621  AVFrame *dst;
622  FieldMatchContext *fm = ctx->priv;
623 
624  if (match == mC) {
625  dst = av_frame_clone(src);
626  } else {
627  AVFilterLink *outlink = ctx->outputs[0];
628 
629  dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
630  if (!dst)
631  return NULL;
632  av_frame_copy_props(dst, src);
633 
634  switch (match) {
635  case mP: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, prv, field); break;
636  case mN: copy_fields(fm, dst, src, 1-field); copy_fields(fm, dst, nxt, field); break;
637  case mB: copy_fields(fm, dst, src, field); copy_fields(fm, dst, prv, 1-field); break;
638  case mU: copy_fields(fm, dst, src, field); copy_fields(fm, dst, nxt, 1-field); break;
639  default: av_assert0(0);
640  }
641  }
642  return dst;
643 }
644 
645 static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2,
646  AVFrame **gen_frames, int field)
647 {
648  const FieldMatchContext *fm = ctx->priv;
649 
650 #define LOAD_COMB(mid) do { \
651  if (combs[mid] < 0) { \
652  if (!gen_frames[mid]) \
653  gen_frames[mid] = create_weave_frame(ctx, mid, field, \
654  fm->prv, fm->src, fm->nxt); \
655  combs[mid] = calc_combed_score(fm, gen_frames[mid]); \
656  } \
657 } while (0)
658 
659  LOAD_COMB(m1);
660  LOAD_COMB(m2);
661 
662  if ((combs[m2] * 3 < combs[m1] || (combs[m2] * 2 < combs[m1] && combs[m1] > fm->combpel)) &&
663  abs(combs[m2] - combs[m1]) >= 30 && combs[m2] < fm->combpel)
664  return m2;
665  else
666  return m1;
667 }
668 
669 static const int fxo0m[] = { mP, mC, mN, mB, mU };
670 static const int fxo1m[] = { mN, mC, mP, mU, mB };
671 
672 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
673 {
674  AVFilterContext *ctx = inlink->dst;
675  AVFilterLink *outlink = ctx->outputs[0];
676  FieldMatchContext *fm = ctx->priv;
677  int combs[] = { -1, -1, -1, -1, -1 };
678  int order, field, i, match, sc = 0;
679  const int *fxo;
680  AVFrame *gen_frames[] = { NULL, NULL, NULL, NULL, NULL };
681  AVFrame *dst;
682 
683  /* update frames queue(s) */
684 #define SLIDING_FRAME_WINDOW(prv, src, nxt) do { \
685  if (prv != src) /* 2nd loop exception (1st has prv==src and we don't want to loose src) */ \
686  av_frame_free(&prv); \
687  prv = src; \
688  src = nxt; \
689  if (in) \
690  nxt = in; \
691  if (!prv) \
692  prv = src; \
693  if (!prv) /* received only one frame at that point */ \
694  return 0; \
695  av_assert0(prv && src && nxt); \
696 } while (0)
697  if (FF_INLINK_IDX(inlink) == INPUT_MAIN) {
698  SLIDING_FRAME_WINDOW(fm->prv, fm->src, fm->nxt);
699  fm->got_frame[INPUT_MAIN] = 1;
700  } else {
701  SLIDING_FRAME_WINDOW(fm->prv2, fm->src2, fm->nxt2);
702  fm->got_frame[INPUT_CLEANSRC] = 1;
703  }
704  if (!fm->got_frame[INPUT_MAIN] || (fm->ppsrc && !fm->got_frame[INPUT_CLEANSRC]))
705  return 0;
707  in = fm->src;
708 
709  /* parity */
710  order = fm->order != FM_PARITY_AUTO ? fm->order : (in->interlaced_frame ? in->top_field_first : 1);
711  field = fm->field != FM_PARITY_AUTO ? fm->field : order;
712  av_assert0(order == 0 || order == 1 || field == 0 || field == 1);
713  fxo = field ^ order ? fxo1m : fxo0m;
714 
715  /* debug mode: we generate all the fields combinations and their associated
716  * combed score. XXX: inject as frame metadata? */
717  if (fm->combdbg) {
718  for (i = 0; i < FF_ARRAY_ELEMS(combs); i++) {
719  if (i > mN && fm->combdbg == COMBDBG_PCN)
720  break;
721  gen_frames[i] = create_weave_frame(ctx, i, field, fm->prv, fm->src, fm->nxt);
722  if (!gen_frames[i])
723  return AVERROR(ENOMEM);
724  combs[i] = calc_combed_score(fm, gen_frames[i]);
725  }
726  av_log(ctx, AV_LOG_INFO, "COMBS: %3d %3d %3d %3d %3d\n",
727  combs[0], combs[1], combs[2], combs[3], combs[4]);
728  } else {
729  gen_frames[mC] = av_frame_clone(fm->src);
730  if (!gen_frames[mC])
731  return AVERROR(ENOMEM);
732  }
733 
734  /* p/c selection and optional 3-way p/c/n matches */
735  match = compare_fields(fm, fxo[mC], fxo[mP], field);
736  if (fm->mode == MODE_PCN || fm->mode == MODE_PCN_UB)
737  match = compare_fields(fm, match, fxo[mN], field);
738 
739  /* scene change check */
740  if (fm->combmatch == COMBMATCH_SC) {
741  if (fm->lastn == fm->frame_count - 1) {
742  if (fm->lastscdiff > fm->scthresh)
743  sc = 1;
744  } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
745  sc = 1;
746  }
747 
748  if (!sc) {
749  fm->lastn = fm->frame_count;
750  fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
751  sc = fm->lastscdiff > fm->scthresh;
752  }
753  }
754 
755  if (fm->combmatch == COMBMATCH_FULL || (fm->combmatch == COMBMATCH_SC && sc)) {
756  switch (fm->mode) {
757  /* 2-way p/c matches */
758  case MODE_PC:
759  match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
760  break;
761  case MODE_PC_N:
762  match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
763  break;
764  case MODE_PC_U:
765  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
766  break;
767  case MODE_PC_N_UB:
768  match = checkmm(ctx, combs, match, fxo[mN], gen_frames, field);
769  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
770  match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
771  break;
772  /* 3-way p/c/n matches */
773  case MODE_PCN:
774  match = checkmm(ctx, combs, match, match == fxo[mP] ? fxo[mC] : fxo[mP], gen_frames, field);
775  break;
776  case MODE_PCN_UB:
777  match = checkmm(ctx, combs, match, fxo[mU], gen_frames, field);
778  match = checkmm(ctx, combs, match, fxo[mB], gen_frames, field);
779  break;
780  default:
781  av_assert0(0);
782  }
783  }
784 
785  /* get output frame and drop the others */
786  if (fm->ppsrc) {
787  /* field matching was based on a filtered/post-processed input, we now
788  * pick the untouched fields from the clean source */
789  dst = create_weave_frame(ctx, match, field, fm->prv2, fm->src2, fm->nxt2);
790  } else {
791  if (!gen_frames[match]) { // XXX: is that possible?
792  dst = create_weave_frame(ctx, match, field, fm->prv, fm->src, fm->nxt);
793  } else {
794  dst = gen_frames[match];
795  gen_frames[match] = NULL;
796  }
797  }
798  if (!dst)
799  return AVERROR(ENOMEM);
800  for (i = 0; i < FF_ARRAY_ELEMS(gen_frames); i++)
801  av_frame_free(&gen_frames[i]);
802 
803  /* mark the frame we are unable to match properly as interlaced so a proper
804  * de-interlacer can take the relay */
805  dst->interlaced_frame = combs[match] >= fm->combpel;
806  if (dst->interlaced_frame) {
807  av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
808  fm->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
809  dst->top_field_first = field;
810  }
811  fm->frame_count++;
812 
813  av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
814  " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
815  fm->combpel, match, dst->interlaced_frame ? "YES" : "NO");
816 
817  return ff_filter_frame(outlink, dst);
818 }
819 
820 static int request_inlink(AVFilterContext *ctx, int lid)
821 {
822  int ret = 0;
823  FieldMatchContext *fm = ctx->priv;
824 
825  if (!fm->got_frame[lid]) {
826  AVFilterLink *inlink = ctx->inputs[lid];
827  ret = ff_request_frame(inlink);
828  if (ret == AVERROR_EOF) { // flushing
829  fm->eof |= 1 << lid;
830  ret = filter_frame(inlink, NULL);
831  }
832  }
833  return ret;
834 }
835 
836 static int request_frame(AVFilterLink *outlink)
837 {
838  int ret;
839  AVFilterContext *ctx = outlink->src;
840  FieldMatchContext *fm = ctx->priv;
841  const uint32_t eof_mask = 1<<INPUT_MAIN | fm->ppsrc<<INPUT_CLEANSRC;
842 
843  if ((fm->eof & eof_mask) == eof_mask) // flush done?
844  return AVERROR_EOF;
845  if ((ret = request_inlink(ctx, INPUT_MAIN)) < 0)
846  return ret;
847  if (fm->ppsrc && (ret = request_inlink(ctx, INPUT_CLEANSRC)) < 0)
848  return ret;
849  return 0;
850 }
851 
853 {
854  // TODO: second input source can support >8bit depth
855  static const enum AVPixelFormat pix_fmts[] = {
859  };
861  return 0;
862 }
863 
864 static int config_input(AVFilterLink *inlink)
865 {
866  int ret;
867  AVFilterContext *ctx = inlink->dst;
868  FieldMatchContext *fm = ctx->priv;
869  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
870  const int w = inlink->w;
871  const int h = inlink->h;
872 
873  fm->scthresh = (int64_t)((w * h * 255.0 * fm->scthresh_flt) / 100.0);
874 
875  if ((ret = av_image_alloc(fm->map_data, fm->map_linesize, w, h, inlink->format, 32)) < 0 ||
876  (ret = av_image_alloc(fm->cmask_data, fm->cmask_linesize, w, h, inlink->format, 32)) < 0)
877  return ret;
878 
879  fm->hsub = pix_desc->log2_chroma_w;
880  fm->vsub = pix_desc->log2_chroma_h;
881 
882  fm->tpitchy = FFALIGN(w, 16);
883  fm->tpitchuv = FFALIGN(w >> 1, 16);
884 
885  fm->tbuffer = av_malloc(h/2 * fm->tpitchy);
886  fm->c_array = av_malloc((((w + fm->blockx/2)/fm->blockx)+1) *
887  (((h + fm->blocky/2)/fm->blocky)+1) *
888  4 * sizeof(*fm->c_array));
889  if (!fm->tbuffer || !fm->c_array)
890  return AVERROR(ENOMEM);
891 
892  return 0;
893 }
894 
896 {
897  const FieldMatchContext *fm = ctx->priv;
898  AVFilterPad pad = {
899  .name = av_strdup("main"),
900  .type = AVMEDIA_TYPE_VIDEO,
901  .filter_frame = filter_frame,
902  .config_props = config_input,
903  };
904 
905  if (!pad.name)
906  return AVERROR(ENOMEM);
907  ff_insert_inpad(ctx, INPUT_MAIN, &pad);
908 
909  if (fm->ppsrc) {
910  pad.name = av_strdup("clean_src");
911  pad.config_props = NULL;
912  if (!pad.name)
913  return AVERROR(ENOMEM);
914  ff_insert_inpad(ctx, INPUT_CLEANSRC, &pad);
915  }
916 
917  if ((fm->blockx & (fm->blockx - 1)) ||
918  (fm->blocky & (fm->blocky - 1))) {
919  av_log(ctx, AV_LOG_ERROR, "blockx and blocky settings must be power of two\n");
920  return AVERROR(EINVAL);
921  }
922 
923  if (fm->combpel > fm->blockx * fm->blocky) {
924  av_log(ctx, AV_LOG_ERROR, "Combed pixel should not be larger than blockx x blocky\n");
925  return AVERROR(EINVAL);
926  }
927 
928  return 0;
929 }
930 
932 {
933  int i;
934  FieldMatchContext *fm = ctx->priv;
935 
936  if (fm->prv != fm->src)
937  av_frame_free(&fm->prv);
938  if (fm->nxt != fm->src)
939  av_frame_free(&fm->nxt);
940  av_frame_free(&fm->src);
941  av_freep(&fm->map_data[0]);
942  av_freep(&fm->cmask_data[0]);
943  av_freep(&fm->tbuffer);
944  av_freep(&fm->c_array);
945  for (i = 0; i < ctx->nb_inputs; i++)
946  av_freep(&ctx->input_pads[i].name);
947 }
948 
949 static int config_output(AVFilterLink *outlink)
950 {
951  AVFilterContext *ctx = outlink->src;
952  const FieldMatchContext *fm = ctx->priv;
953  const AVFilterLink *inlink =
954  ctx->inputs[fm->ppsrc ? INPUT_CLEANSRC : INPUT_MAIN];
955 
956  outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
957  outlink->time_base = inlink->time_base;
958  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
959  outlink->frame_rate = inlink->frame_rate;
960  outlink->w = inlink->w;
961  outlink->h = inlink->h;
962  return 0;
963 }
964 
965 static const AVFilterPad fieldmatch_outputs[] = {
966  {
967  .name = "default",
968  .type = AVMEDIA_TYPE_VIDEO,
969  .request_frame = request_frame,
970  .config_props = config_output,
971  },
972  { NULL }
973 };
974 
976  .name = "fieldmatch",
977  .description = NULL_IF_CONFIG_SMALL("Field matching for inverse telecine."),
978  .query_formats = query_formats,
979  .priv_size = sizeof(FieldMatchContext),
982  .inputs = NULL,
983  .outputs = fieldmatch_outputs,
984  .priv_class = &fieldmatch_class,
986 };
comb_matching_mode
Definition: vf_fieldmatch.c:61
#define VERTICAL_HALF(y_start, y_end)
float v
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
#define c2
Definition: idct_sh4.c:27
AVOption.
Definition: opt.h:251
#define LOAD_COMB(mid)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
Definition: imgutils.c:190
int acc
Definition: yuv2rgb.c:519
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
static int get_width(const FieldMatchContext *fm, const AVFrame *f, int plane)
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:424
y1
Definition: lab5.m:33
Sinusoidal phase f
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
static int get_field_base(int match, int field)
static int config_output(AVFilterLink *outlink)
static void fill_buf(uint8_t *data, int w, int h, int linesize, uint8_t v)
#define FF_ARRAY_ELEMS(a)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
output residual component w
#define FFALIGN(x, a)
Definition: common.h:63
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
#define INPUT_CLEANSRC
Definition: vf_fieldmatch.c:43
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
BYTE int const BYTE * srcp
Definition: avisynth_c.h:713
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static AVFrame * create_weave_frame(AVFilterContext *ctx, int match, int field, const AVFrame *prv, AVFrame *src, const AVFrame *nxt)
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:334
mode
Definition: f_perms.c:27
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
static const AVOption fieldmatch_options[]
#define INPUT_MAIN
Definition: vf_fieldmatch.c:42
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:159
static const int fxo0m[]
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint8_t * map_data[4]
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
f2
Definition: genspecsines3.m:4
A filter pad used for either input or output.
Discrete Time axis x
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:531
int width
width and height of the video frame
Definition: frame.h:122
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define s2
Definition: regdef.h:39
BYTE * dstp
Definition: avisynth_c.h:713
static av_cold int fieldmatch_init(AVFilterContext *ctx)
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:72
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
void * priv
private data for use by the filter
Definition: avfilter.h:545
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
#define C_ARRAY_ADD(v)
#define FFMAX(a, b)
Definition: common.h:56
uint32_t eof
bitmask for end of stream
Definition: vf_fieldmatch.c:83
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
static int request_frame(AVFilterLink *outlink)
static int request_inlink(AVFilterContext *ctx, int lid)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
int(* config_props)(AVFilterLink *link)
Link configuration callback.
static int compare_fields(FieldMatchContext *fm, int match1, int match2, int field)
unsigned nb_inputs
number of input pads
Definition: avfilter.h:536
#define FFMIN(a, b)
Definition: common.h:58
fieldmatch_parity
Definition: vf_fieldmatch.c:45
ret
Definition: avfilter.c:821
static const AVFilterPad fieldmatch_outputs[]
static AVFrame * select_frame(FieldMatchContext *fm, int match)
AVFrame * nxt
main sliding window of 3 frames
Definition: vf_fieldmatch.c:78
#define FFABS(a)
Definition: common.h:53
AVFILTER_DEFINE_CLASS(fieldmatch)
float u
#define diff(a, as, b, bs)
Definition: vf_phase.c:80
static void build_diff_map(FieldMatchContext *fm, const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *dstp, int dst_linesize, int height, int width, int plane)
Build a map over which pixels differ a lot/a little.
AVFrame * av_frame_clone(AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:317
#define OFFSET(x)
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
AVS_Value src
Definition: avisynth_c.h:523
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
enum matching_mode mode
Definition: vf_fieldmatch.c:90
int got_frame[2]
frame request flag for each input stream
Definition: vf_fieldmatch.c:81
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
matching_mode
Definition: vf_fieldmatch.c:51
#define HAS_FF_AROUND(p, lz)
const char * name
filter name
Definition: avfilter.h:437
#define s1
Definition: regdef.h:38
enum comb_matching_mode combmatch
Definition: vf_fieldmatch.c:96
static const int fxo1m[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
static int flags
Definition: cpu.c:23
static int checkmm(AVFilterContext *ctx, int *combs, int m1, int m2, AVFrame **gen_frames, int field)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static void build_abs_diff_mask(const uint8_t *prvp, int prv_linesize, const uint8_t *nxtp, int nxt_linesize, uint8_t *tbuffer, int tbuf_linesize, int width, int height)
#define FF_INLINK_IDX(link)
Find the index of a link.
#define SLIDING_FRAME_WINDOW(prv, src, nxt)
#define FILTER(xm2, xm1, xp1, xp2)
static int query_formats(AVFilterContext *ctx)
uint8_t * cmask_data[4]
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
static int config_input(AVFilterLink *inlink)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
int64_t frame_count
output frame counter
Definition: vf_fieldmatch.c:80
static av_cold void fieldmatch_uninit(AVFilterContext *ctx)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
function y
Definition: D.m:1
static void copy_fields(const FieldMatchContext *fm, AVFrame *dst, const AVFrame *src, int field)
static int64_t luma_abs_diff(const AVFrame *f1, const AVFrame *f2)
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
static void ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
AVFrame * nxt2
sliding window of the optional second stream
Definition: vf_fieldmatch.c:79
static int calc_combed_score(const FieldMatchContext *fm, const AVFrame *src)
int vsub
chroma subsampling values
Definition: vf_fieldmatch.c:82
An instance of a filter.
Definition: avfilter.h:524
int height
Definition: frame.h:122
#define AV_LOG_INFO
Definition: log.h:156
void INT64 INT64 count
Definition: avisynth_c.h:594
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:319
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:242
comb_dbg
Definition: vf_fieldmatch.c:68
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
Frame requests may need to loop in order to be fulfilled.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
f1
Definition: genspecsines3.m:3
static int get_height(const FieldMatchContext *fm, const AVFrame *f, int plane)
#define FLAGS
#define c1
Definition: idct_sh4.c:26
AVFilter avfilter_vf_fieldmatch