vf_hqdn3d.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Daniel Moreno <comac AT comac DOT darktech DOT org>
3  * Copyright (c) 2010 Baptiste Coudurier
4  * Copyright (c) 2012 Loren Merritt
5  *
6  * This file is part of FFmpeg, ported from MPlayer.
7  *
8  * FFmpeg is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22 
23 /**
24  * @file
25  * high quality 3d video denoiser, ported from MPlayer
26  * libmpcodecs/vf_hqdn3d.c.
27  */
28 
29 #include <float.h>
30 
31 #include "config.h"
32 #include "libavutil/common.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/intreadwrite.h"
35 #include "libavutil/opt.h"
36 
37 #include "avfilter.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "video.h"
41 #include "vf_hqdn3d.h"
42 
43 #define LUT_BITS (depth==16 ? 8 : 4)
44 #define LOAD(x) (((depth == 8 ? src[x] : AV_RN16A(src + (x) * 2)) << (16 - depth))\
45  + (((1 << (16 - depth)) - 1) >> 1))
46 #define STORE(x,val) (depth == 8 ? dst[x] = (val) >> (16 - depth) : \
47  AV_WN16A(dst + (x) * 2, (val) >> (16 - depth)))
48 
50 static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth)
51 {
52  int d = (prev - cur) >> (8 - LUT_BITS);
53  return cur + coef[d];
54 }
55 
58  uint16_t *frame_ant,
59  int w, int h, int sstride, int dstride,
60  int16_t *temporal, int depth)
61 {
62  long x, y;
63  uint32_t tmp;
64 
65  temporal += 256 << LUT_BITS;
66 
67  for (y = 0; y < h; y++) {
68  for (x = 0; x < w; x++) {
69  frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal, depth);
70  STORE(x, tmp);
71  }
72  src += sstride;
73  dst += dstride;
74  frame_ant += w;
75  }
76 }
77 
79 static void denoise_spatial(HQDN3DContext *hqdn3d,
81  uint16_t *line_ant, uint16_t *frame_ant,
82  int w, int h, int sstride, int dstride,
83  int16_t *spatial, int16_t *temporal, int depth)
84 {
85  long x, y;
86  uint32_t pixel_ant;
87  uint32_t tmp;
88 
89  spatial += 256 << LUT_BITS;
90  temporal += 256 << LUT_BITS;
91 
92  /* First line has no top neighbor. Only left one for each tmp and
93  * last frame */
94  pixel_ant = LOAD(0);
95  for (x = 0; x < w; x++) {
96  line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial, depth);
97  frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
98  STORE(x, tmp);
99  }
100 
101  for (y = 1; y < h; y++) {
102  src += sstride;
103  dst += dstride;
104  frame_ant += w;
105  if (hqdn3d->denoise_row[depth]) {
106  hqdn3d->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal);
107  continue;
108  }
109  pixel_ant = LOAD(0);
110  for (x = 0; x < w-1; x++) {
111  line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
112  pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial, depth);
113  frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
114  STORE(x, tmp);
115  }
116  line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth);
117  frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth);
118  STORE(x, tmp);
119  }
120 }
121 
123 static void denoise_depth(HQDN3DContext *hqdn3d,
124  uint8_t *src, uint8_t *dst,
125  uint16_t *line_ant, uint16_t **frame_ant_ptr,
126  int w, int h, int sstride, int dstride,
127  int16_t *spatial, int16_t *temporal, int depth)
128 {
129  // FIXME: For 16bit depth, frame_ant could be a pointer to the previous
130  // filtered frame rather than a separate buffer.
131  long x, y;
132  uint16_t *frame_ant = *frame_ant_ptr;
133  if (!frame_ant) {
134  uint8_t *frame_src = src;
135  *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t));
136  for (y = 0; y < h; y++, src += sstride, frame_ant += w)
137  for (x = 0; x < w; x++)
138  frame_ant[x] = LOAD(x);
139  src = frame_src;
140  frame_ant = *frame_ant_ptr;
141  }
142 
143  if (spatial[0])
144  denoise_spatial(hqdn3d, src, dst, line_ant, frame_ant,
145  w, h, sstride, dstride, spatial, temporal, depth);
146  else
147  denoise_temporal(src, dst, frame_ant,
148  w, h, sstride, dstride, temporal, depth);
149 }
150 
151 #define denoise(...) \
152  switch (hqdn3d->depth) {\
153  case 8: denoise_depth(__VA_ARGS__, 8); break;\
154  case 9: denoise_depth(__VA_ARGS__, 9); break;\
155  case 10: denoise_depth(__VA_ARGS__, 10); break;\
156  case 16: denoise_depth(__VA_ARGS__, 16); break;\
157  }
158 
159 static int16_t *precalc_coefs(double dist25, int depth)
160 {
161  int i;
162  double gamma, simil, C;
163  int16_t *ct = av_malloc((512<<LUT_BITS)*sizeof(int16_t));
164  if (!ct)
165  return NULL;
166 
167  gamma = log(0.25) / log(1.0 - FFMIN(dist25,252.0)/255.0 - 0.00001);
168 
169  for (i = -255<<LUT_BITS; i <= 255<<LUT_BITS; i++) {
170  double f = ((i<<(9-LUT_BITS)) + (1<<(8-LUT_BITS)) - 1) / 512.0; // midpoint of the bin
171  simil = 1.0 - FFABS(f) / 255.0;
172  C = pow(simil, gamma) * 256.0 * f;
173  ct[(256<<LUT_BITS)+i] = lrint(C);
174  }
175 
176  ct[0] = !!dist25;
177  return ct;
178 }
179 
180 #define PARAM1_DEFAULT 4.0
181 #define PARAM2_DEFAULT 3.0
182 #define PARAM3_DEFAULT 6.0
183 
184 static int init(AVFilterContext *ctx)
185 {
186  HQDN3DContext *hqdn3d = ctx->priv;
187 
188  if (!hqdn3d->strength[LUMA_SPATIAL])
190  if (!hqdn3d->strength[CHROMA_SPATIAL])
192  if (!hqdn3d->strength[LUMA_TMP])
194  if (!hqdn3d->strength[CHROMA_TMP])
195  hqdn3d->strength[CHROMA_TMP] = hqdn3d->strength[LUMA_TMP] * hqdn3d->strength[CHROMA_SPATIAL] / hqdn3d->strength[LUMA_SPATIAL];
196 
197  av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n",
198  hqdn3d->strength[LUMA_SPATIAL], hqdn3d->strength[CHROMA_SPATIAL],
199  hqdn3d->strength[LUMA_TMP], hqdn3d->strength[CHROMA_TMP]);
200 
201  return 0;
202 }
203 
204 static void uninit(AVFilterContext *ctx)
205 {
206  HQDN3DContext *hqdn3d = ctx->priv;
207 
208  av_freep(&hqdn3d->coefs[0]);
209  av_freep(&hqdn3d->coefs[1]);
210  av_freep(&hqdn3d->coefs[2]);
211  av_freep(&hqdn3d->coefs[3]);
212  av_freep(&hqdn3d->line);
213  av_freep(&hqdn3d->frame_prev[0]);
214  av_freep(&hqdn3d->frame_prev[1]);
215  av_freep(&hqdn3d->frame_prev[2]);
216 }
217 
219 {
220  static const enum AVPixelFormat pix_fmts[] = {
241  };
242 
244 
245  return 0;
246 }
247 
248 static int config_input(AVFilterLink *inlink)
249 {
250  HQDN3DContext *hqdn3d = inlink->dst->priv;
251  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
252  int i;
253 
254  hqdn3d->hsub = desc->log2_chroma_w;
255  hqdn3d->vsub = desc->log2_chroma_h;
256  hqdn3d->depth = desc->comp[0].depth_minus1+1;
257 
258  hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
259  if (!hqdn3d->line)
260  return AVERROR(ENOMEM);
261 
262  for (i = 0; i < 4; i++) {
263  hqdn3d->coefs[i] = precalc_coefs(hqdn3d->strength[i], hqdn3d->depth);
264  if (!hqdn3d->coefs[i])
265  return AVERROR(ENOMEM);
266  }
267 
268  if (ARCH_X86)
269  ff_hqdn3d_init_x86(hqdn3d);
270 
271  return 0;
272 }
273 
274 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
275 {
276  HQDN3DContext *hqdn3d = inlink->dst->priv;
277  AVFilterLink *outlink = inlink->dst->outputs[0];
278 
279  AVFrame *out;
280  int direct, c;
281 
282  if (av_frame_is_writable(in)) {
283  direct = 1;
284  out = in;
285  } else {
286  direct = 0;
287  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
288  if (!out) {
289  av_frame_free(&in);
290  return AVERROR(ENOMEM);
291  }
292 
293  av_frame_copy_props(out, in);
294  }
295 
296  for (c = 0; c < 3; c++) {
297  denoise(hqdn3d, in->data[c], out->data[c],
298  hqdn3d->line, &hqdn3d->frame_prev[c],
299  in->width >> (!!c * hqdn3d->hsub),
300  in->height >> (!!c * hqdn3d->vsub),
301  in->linesize[c], out->linesize[c],
302  hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
303  }
304 
305  if (!direct)
306  av_frame_free(&in);
307 
308  return ff_filter_frame(outlink, out);
309 }
310 
311 #define OFFSET(x) offsetof(HQDN3DContext, x)
312 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
313 static const AVOption options[] = {
314  { "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
315  { "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
316  { "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
317  { "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
318  { NULL },
319 };
320 
321 static const AVClass hqdn3d_class = {
322  .class_name = "hqdn3d",
323  .item_name = av_default_item_name,
324  .option = options,
325  .version = LIBAVUTIL_VERSION_INT,
326 };
327 
329  {
330  .name = "default",
331  .type = AVMEDIA_TYPE_VIDEO,
332  .config_props = config_input,
333  .filter_frame = filter_frame,
334  },
335  { NULL }
336 };
337 
338 
340  {
341  .name = "default",
342  .type = AVMEDIA_TYPE_VIDEO
343  },
344  { NULL }
345 };
346 
348  .name = "hqdn3d",
349  .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
350 
351  .priv_size = sizeof(HQDN3DContext),
352  .priv_class = &hqdn3d_class,
353  .init = init,
354  .uninit = uninit,
356 
357  .inputs = avfilter_vf_hqdn3d_inputs,
358 
359  .outputs = avfilter_vf_hqdn3d_outputs,
360 };
static const AVOption options[]
Definition: vf_hqdn3d.c:313
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
Definition: pixfmt.h:158
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:151
#define C
static const AVFilterPad avfilter_vf_hqdn3d_inputs[]
Definition: vf_hqdn3d.c:328
static const AVFilterPad avfilter_vf_hqdn3d_outputs[]
Definition: vf_hqdn3d.c:339
av_default_item_name
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
#define LUMA_SPATIAL
Definition: vf_hqdn3d.h:42
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
Definition: pixfmt.h:154
Sinusoidal phase f
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
#define FLAGS
Definition: vf_hqdn3d.c:312
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
Definition: pixfmt.h:148
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint16_t * line
Definition: vf_hqdn3d.h:34
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:128
void(* denoise_row[17])(uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, ptrdiff_t w, int16_t *spatial, int16_t *temporal)
Definition: vf_hqdn3d.h:39
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
void ff_hqdn3d_init_x86(HQDN3DContext *hqdn3d)
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
set threshold d
const char * name
Pad name.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:86
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
uint16_t * frame_prev[3]
Definition: vf_hqdn3d.h:35
AVOptions.
#define AV_NE(be, le)
Definition: common.h:44
#define PARAM1_DEFAULT
Definition: vf_hqdn3d.c:180
static av_always_inline void denoise_temporal(uint8_t *src, uint8_t *dst, uint16_t *frame_ant, int w, int h, int sstride, int dstride, int16_t *temporal, int depth)
Definition: vf_hqdn3d.c:57
static int init(AVFilterContext *ctx)
Definition: vf_hqdn3d.c:184
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: pixfmt.h:104
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
#define CHROMA_SPATIAL
Definition: vf_hqdn3d.h:44
A filter pad used for either input or output.
planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
Definition: pixfmt.h:149
Discrete Time axis x
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:157
uint16_t depth_minus1
number of bits in the component minus 1
Definition: pixdesc.h:43
int width
width and height of the video frame
Definition: frame.h:122
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define LOAD(x)
Definition: vf_hqdn3d.c:44
#define ARCH_X86
Definition: config.h:35
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
Definition: pixfmt.h:133
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int16_t * coefs[4]
Definition: vf_hqdn3d.h:33
void * priv
private data for use by the filter
Definition: avfilter.h:545
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:153
double strength[4]
Definition: vf_hqdn3d.h:36
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:361
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
Definition: pixfmt.h:131
static int config_input(AVFilterLink *inlink)
Definition: vf_hqdn3d.c:248
int depth
Definition: v4l.c:62
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define AV_LOG_VERBOSE
Definition: log.h:157
#define FFMIN(a, b)
Definition: common.h:58
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
static av_always_inline void denoise_spatial(HQDN3DContext *hqdn3d, uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t *frame_ant, int w, int h, int sstride, int dstride, int16_t *spatial, int16_t *temporal, int depth)
Definition: vf_hqdn3d.c:79
#define FFABS(a)
Definition: common.h:53
static av_always_inline uint32_t lowpass(int prev, int cur, int16_t *coef, int depth)
Definition: vf_hqdn3d.c:50
planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
Definition: pixfmt.h:150
planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:159
LIBAVUTIL_VERSION_INT
Definition: eval.c:55
#define OFFSET(x)
Definition: vf_hqdn3d.c:311
NULL
Definition: eval.c:55
planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
Definition: pixfmt.h:129
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define LUMA_TMP
Definition: vf_hqdn3d.h:43
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
#define LUT_BITS
Definition: vf_hqdn3d.c:43
#define CHROMA_TMP
Definition: vf_hqdn3d.h:45
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
static void uninit(AVFilterContext *ctx)
Definition: vf_hqdn3d.c:204
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
const char * name
filter name
Definition: avfilter.h:437
static av_always_inline void denoise_depth(HQDN3DContext *hqdn3d, uint8_t *src, uint8_t *dst, uint16_t *line_ant, uint16_t **frame_ant_ptr, int w, int h, int sstride, int dstride, int16_t *spatial, int16_t *temporal, int depth)
Definition: vf_hqdn3d.c:123
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
#define PARAM3_DEFAULT
Definition: vf_hqdn3d.c:182
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_hqdn3d.c:274
AVFilter avfilter_vf_hqdn3d
Definition: vf_hqdn3d.c:347
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:155
planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
Definition: pixfmt.h:132
#define PARAM2_DEFAULT
Definition: vf_hqdn3d.c:181
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
common internal and external API header
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
static double c[64]
planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:130
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
function y
Definition: D.m:1
static const AVClass hqdn3d_class
Definition: vf_hqdn3d.c:321
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
An instance of a filter.
Definition: avfilter.h:524
#define denoise(...)
Definition: vf_hqdn3d.c:151
int height
Definition: frame.h:122
static int16_t * precalc_coefs(double dist25, int depth)
Definition: vf_hqdn3d.c:159
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
#define av_always_inline
Definition: attributes.h:41
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int query_formats(AVFilterContext *ctx)
Definition: vf_hqdn3d.c:218
planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
Definition: pixfmt.h:156
#define STORE(x, val)
Definition: vf_hqdn3d.c:46
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
Definition: pixfmt.h:152