vf_kerndeint.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Jeremy Tran
3  * Copyright (c) 2004 Tobias Diedrich
4  * Copyright (c) 2003 Donald A. Graft
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22 
23 /**
24  * @file
25  * Kernel Deinterlacer
26  * Ported from MPlayer libmpcodecs/vf_kerndeint.c.
27  */
28 
29 #include "libavutil/imgutils.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 
34 #include "avfilter.h"
35 #include "formats.h"
36 #include "internal.h"
37 
38 typedef struct {
39  const AVClass *class;
40  int frame; ///< frame count, starting from 0
41  int thresh, map, order, sharp, twoway;
42  int vsub;
44  uint8_t *tmp_data [4]; ///< temporary plane data buffer
45  int tmp_linesize[4]; ///< temporary plane byte linesize
46  int tmp_bwidth [4]; ///< temporary plane byte width
48 
49 #define OFFSET(x) offsetof(KerndeintContext, x)
50 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
51 static const AVOption kerndeint_options[] = {
52  { "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
53  { "map", "set the map", OFFSET(map), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
54  { "order", "set the order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
55  { "sharp", "enable sharpening", OFFSET(sharp), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
56  { "twoway", "enable twoway", OFFSET(twoway), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
57  { NULL }
58 };
59 
61 
62 static av_cold void uninit(AVFilterContext *ctx)
63 {
64  KerndeintContext *kerndeint = ctx->priv;
65 
66  av_free(kerndeint->tmp_data[0]);
67 }
68 
70 {
71  static const enum PixelFormat pix_fmts[] = {
79  };
80 
82 
83  return 0;
84 }
85 
86 static int config_props(AVFilterLink *inlink)
87 {
88  KerndeintContext *kerndeint = inlink->dst->priv;
89  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
90  int ret;
91 
92  kerndeint->is_packed_rgb = av_pix_fmt_desc_get(inlink->format)->flags & PIX_FMT_RGB;
93  kerndeint->vsub = desc->log2_chroma_h;
94 
95  ret = av_image_alloc(kerndeint->tmp_data, kerndeint->tmp_linesize,
96  inlink->w, inlink->h, inlink->format, 16);
97  if (ret < 0)
98  return ret;
99  memset(kerndeint->tmp_data[0], 0, ret);
100 
101  if ((ret = av_image_fill_linesizes(kerndeint->tmp_bwidth, inlink->format, inlink->w)) < 0)
102  return ret;
103 
104  return 0;
105 }
106 
107 static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
108 {
109  KerndeintContext *kerndeint = inlink->dst->priv;
110  AVFilterLink *outlink = inlink->dst->outputs[0];
111  AVFrame *outpic;
112  const uint8_t *prvp; ///< Previous field's pixel line number n
113  const uint8_t *prvpp; ///< Previous field's pixel line number (n - 1)
114  const uint8_t *prvpn; ///< Previous field's pixel line number (n + 1)
115  const uint8_t *prvppp; ///< Previous field's pixel line number (n - 2)
116  const uint8_t *prvpnn; ///< Previous field's pixel line number (n + 2)
117  const uint8_t *prvp4p; ///< Previous field's pixel line number (n - 4)
118  const uint8_t *prvp4n; ///< Previous field's pixel line number (n + 4)
119 
120  const uint8_t *srcp; ///< Current field's pixel line number n
121  const uint8_t *srcpp; ///< Current field's pixel line number (n - 1)
122  const uint8_t *srcpn; ///< Current field's pixel line number (n + 1)
123  const uint8_t *srcppp; ///< Current field's pixel line number (n - 2)
124  const uint8_t *srcpnn; ///< Current field's pixel line number (n + 2)
125  const uint8_t *srcp3p; ///< Current field's pixel line number (n - 3)
126  const uint8_t *srcp3n; ///< Current field's pixel line number (n + 3)
127  const uint8_t *srcp4p; ///< Current field's pixel line number (n - 4)
128  const uint8_t *srcp4n; ///< Current field's pixel line number (n + 4)
129 
130  uint8_t *dstp, *dstp_saved;
131  const uint8_t *srcp_saved;
132 
133  int src_linesize, psrc_linesize, dst_linesize, bwidth;
134  int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++;
135  double valf;
136 
137  const int thresh = kerndeint->thresh;
138  const int order = kerndeint->order;
139  const int map = kerndeint->map;
140  const int sharp = kerndeint->sharp;
141  const int twoway = kerndeint->twoway;
142 
143  const int is_packed_rgb = kerndeint->is_packed_rgb;
144 
145  outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
146  if (!outpic) {
147  av_frame_free(&inpic);
148  return AVERROR(ENOMEM);
149  }
150  av_frame_copy_props(outpic, inpic);
151  outpic->interlaced_frame = 0;
152 
153  for (plane = 0; inpic->data[plane] && plane < 4; plane++) {
154  h = plane == 0 ? inlink->h : inlink->h >> kerndeint->vsub;
155  bwidth = kerndeint->tmp_bwidth[plane];
156 
157  srcp = srcp_saved = inpic->data[plane];
158  src_linesize = inpic->linesize[plane];
159  psrc_linesize = kerndeint->tmp_linesize[plane];
160  dstp = dstp_saved = outpic->data[plane];
161  dst_linesize = outpic->linesize[plane];
162  srcp = srcp_saved + (1 - order) * src_linesize;
163  dstp = dstp_saved + (1 - order) * dst_linesize;
164 
165  for (y = 0; y < h; y += 2) {
166  memcpy(dstp, srcp, bwidth);
167  srcp += 2 * src_linesize;
168  dstp += 2 * dst_linesize;
169  }
170 
171  // Copy through the lines that will be missed below.
172  memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, bwidth);
173  memcpy(dstp_saved + (2 + order ) * dst_linesize, srcp_saved + (3 - order) * src_linesize, bwidth);
174  memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, bwidth);
175  memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, bwidth);
176 
177  /* For the other field choose adaptively between using the previous field
178  or the interpolant from the current field. */
179  prvp = kerndeint->tmp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize;
180  prvpp = prvp - psrc_linesize;
181  prvppp = prvp - 2 * psrc_linesize;
182  prvp4p = prvp - 4 * psrc_linesize;
183  prvpn = prvp + psrc_linesize;
184  prvpnn = prvp + 2 * psrc_linesize;
185  prvp4n = prvp + 4 * psrc_linesize;
186 
187  srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize;
188  srcpp = srcp - src_linesize;
189  srcppp = srcp - 2 * src_linesize;
190  srcp3p = srcp - 3 * src_linesize;
191  srcp4p = srcp - 4 * src_linesize;
192 
193  srcpn = srcp + src_linesize;
194  srcpnn = srcp + 2 * src_linesize;
195  srcp3n = srcp + 3 * src_linesize;
196  srcp4n = srcp + 4 * src_linesize;
197 
198  dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize;
199 
200  for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) {
201  for (x = 0; x < bwidth; x++) {
202  if (thresh == 0 || n == 0 ||
203  (abs((int)prvp[x] - (int)srcp[x]) > thresh) ||
204  (abs((int)prvpp[x] - (int)srcpp[x]) > thresh) ||
205  (abs((int)prvpn[x] - (int)srcpn[x]) > thresh)) {
206  if (map) {
207  g = x & ~3;
208 
209  if (is_packed_rgb) {
210  AV_WB32(dstp + g, 0xffffffff);
211  x = g + 3;
212  } else if (inlink->format == AV_PIX_FMT_YUYV422) {
213  // y <- 235, u <- 128, y <- 235, v <- 128
214  AV_WB32(dstp + g, 0xeb80eb80);
215  x = g + 3;
216  } else {
217  dstp[x] = plane == 0 ? 235 : 128;
218  }
219  } else {
220  if (is_packed_rgb) {
221  hi = 255;
222  lo = 0;
223  } else if (inlink->format == AV_PIX_FMT_YUYV422) {
224  hi = x & 1 ? 240 : 235;
225  lo = 16;
226  } else {
227  hi = plane == 0 ? 235 : 240;
228  lo = 16;
229  }
230 
231  if (sharp) {
232  if (twoway) {
233  valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
234  + 0.170 * ((int)srcp[x] + (int)prvp[x])
235  - 0.116 * ((int)srcppp[x] + (int)srcpnn[x] + (int)prvppp[x] + (int)prvpnn[x])
236  - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
237  + 0.031 * ((int)srcp4p[x] + (int)srcp4n[x] + (int)prvp4p[x] + (int)prvp4n[x]);
238  } else {
239  valf = + 0.526 * ((int)srcpp[x] + (int)srcpn[x])
240  + 0.170 * ((int)prvp[x])
241  - 0.116 * ((int)prvppp[x] + (int)prvpnn[x])
242  - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
243  + 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]);
244  }
245  dstp[x] = av_clip(valf, lo, hi);
246  } else {
247  if (twoway) {
248  val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)srcp[x] + (int)prvp[x])
249  - (int)(srcppp[x]) - (int)(srcpnn[x])
250  - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
251  } else {
252  val = (8 * ((int)srcpp[x] + (int)srcpn[x]) + 2 * ((int)prvp[x])
253  - (int)(prvppp[x]) - (int)(prvpnn[x])) >> 4;
254  }
255  dstp[x] = av_clip(val, lo, hi);
256  }
257  }
258  } else {
259  dstp[x] = srcp[x];
260  }
261  }
262  prvp += 2 * psrc_linesize;
263  prvpp += 2 * psrc_linesize;
264  prvppp += 2 * psrc_linesize;
265  prvpn += 2 * psrc_linesize;
266  prvpnn += 2 * psrc_linesize;
267  prvp4p += 2 * psrc_linesize;
268  prvp4n += 2 * psrc_linesize;
269  srcp += 2 * src_linesize;
270  srcpp += 2 * src_linesize;
271  srcppp += 2 * src_linesize;
272  srcp3p += 2 * src_linesize;
273  srcp4p += 2 * src_linesize;
274  srcpn += 2 * src_linesize;
275  srcpnn += 2 * src_linesize;
276  srcp3n += 2 * src_linesize;
277  srcp4n += 2 * src_linesize;
278  dstp += 2 * dst_linesize;
279  }
280 
281  srcp = inpic->data[plane];
282  dstp = kerndeint->tmp_data[plane];
283  av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, bwidth, h);
284  }
285 
286  av_frame_free(&inpic);
287  return ff_filter_frame(outlink, outpic);
288 }
289 
290 static const AVFilterPad kerndeint_inputs[] = {
291  {
292  .name = "default",
293  .type = AVMEDIA_TYPE_VIDEO,
294  .filter_frame = filter_frame,
295  .config_props = config_props,
296  },
297  { NULL }
298 };
299 
300 static const AVFilterPad kerndeint_outputs[] = {
301  {
302  .name = "default",
303  .type = AVMEDIA_TYPE_VIDEO,
304  },
305  { NULL }
306 };
307 
308 
310  .name = "kerndeint",
311  .description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
312  .priv_size = sizeof(KerndeintContext),
313  .uninit = uninit,
315 
316  .inputs = kerndeint_inputs,
317  .outputs = kerndeint_outputs,
318 
319  .priv_class = &kerndeint_class,
320 };
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
#define OFFSET(x)
Definition: vf_kerndeint.c:49
external API header
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
Definition: imgutils.c:190
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_kerndeint.c:62
static const AVFilterPad kerndeint_inputs[]
Definition: vf_kerndeint.c:290
packed BGR 8:8:8, 32bpp, 0BGR0BGR...
Definition: pixfmt.h:216
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t * tmp_data[4]
temporary plane data buffer
Definition: vf_kerndeint.c:44
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
#define AV_WB32(p, darg)
Definition: intreadwrite.h:265
BYTE int const BYTE * srcp
Definition: avisynth_c.h:713
const char * name
Pad name.
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
static const AVFilterPad kerndeint_outputs[]
Definition: vf_kerndeint.c:300
packed RGB 8:8:8, 32bpp, RGB0RGB0...
Definition: pixfmt.h:215
AVOptions.
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:98
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
uint8_t * data[8]
pointer to the picture/channel planes.
Definition: frame.h:87
A filter pad used for either input or output.
Discrete Time axis x
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
BYTE * dstp
Definition: avisynth_c.h:713
static int config_props(AVFilterLink *inlink)
Definition: vf_kerndeint.c:86
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:99
void * priv
private data for use by the filter
Definition: avfilter.h:545
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:96
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:97
static int query_formats(AVFilterContext *ctx)
Definition: vf_kerndeint.c:69
FFT buffer for g
Definition: stft_peak.m:17
ret
Definition: avfilter.c:821
int tmp_linesize[4]
temporary plane byte linesize
Definition: vf_kerndeint.c:45
NULL
Definition: eval.c:55
uint8_t flags
Definition: pixdesc.h:76
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:69
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:86
const uint8_t ptrdiff_t int h
Definition: hpel_template.c:97
packed BGR 8:8:8, 32bpp, BGR0BGR0...
Definition: pixfmt.h:217
const char * name
filter name
Definition: avfilter.h:437
static const AVClass kerndeint_class
Definition: vf_kerndeint.c:60
static const AVOption kerndeint_options[]
Definition: vf_kerndeint.c:51
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
#define PIX_FMT_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale)
Definition: pixdesc.h:94
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
Definition: vf_kerndeint.c:107
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
int frame
frame count, starting from 0
Definition: vf_kerndeint.c:40
int tmp_bwidth[4]
temporary plane byte width
Definition: vf_kerndeint.c:46
function y
Definition: D.m:1
int linesize[8]
For video, size in bytes of each picture line.
Definition: frame.h:101
#define FLAGS
Definition: vf_kerndeint.c:50
#define AVFILTER_DEFINE_CLASS(fname)
AVFilter avfilter_vf_kerndeint
Definition: vf_kerndeint.c:309
An instance of a filter.
Definition: avfilter.h:524
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:242
internal API functions
packed RGB 8:8:8, 32bpp, 0RGB0RGB...
Definition: pixfmt.h:214
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs