vf_lut.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Compute a look-up table for binding the input value to the output
24  * value, and apply it to input video.
25  */
26 
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "avfilter.h"
32 #include "drawutils.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 
37 static const char *const var_names[] = {
38  "w", ///< width of the input video
39  "h", ///< height of the input video
40  "val", ///< input value for the pixel
41  "maxval", ///< max value for the pixel
42  "minval", ///< min value for the pixel
43  "negval", ///< negated value
44  "clipval",
45  NULL
46 };
47 
48 enum var_name {
57 };
58 
59 typedef struct {
60  const AVClass *class;
61  uint8_t lut[4][256]; ///< lookup table for each component
62  char *comp_expr_str[4];
63  AVExpr *comp_expr[4];
64  int hsub, vsub;
65  double var_values[VAR_VARS_NB];
66  int is_rgb, is_yuv;
67  int step;
68  int negate_alpha; /* only used by negate */
69 } LutContext;
70 
71 #define Y 0
72 #define U 1
73 #define V 2
74 #define R 0
75 #define G 1
76 #define B 2
77 #define A 3
78 
79 #define OFFSET(x) offsetof(LutContext, x)
80 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
81 
82 static const AVOption options[] = {
83  { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
84  { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
85  { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
86  { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
87  { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
88  { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
89  { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
90  { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
91  { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
92  { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
93  { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
94  { NULL },
95 };
96 
97 static av_cold void uninit(AVFilterContext *ctx)
98 {
99  LutContext *lut = ctx->priv;
100  int i;
101 
102  for (i = 0; i < 4; i++) {
103  av_expr_free(lut->comp_expr[i]);
104  lut->comp_expr[i] = NULL;
105  av_freep(&lut->comp_expr_str[i]);
106  }
107 }
108 
109 #define YUV_FORMATS \
110  AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
111  AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
112  AV_PIX_FMT_YUVA420P, \
113  AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
114  AV_PIX_FMT_YUVJ440P
115 
116 #define RGB_FORMATS \
117  AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
118  AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
119  AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
120 
124 
126 {
127  LutContext *lut = ctx->priv;
128 
129  const enum AVPixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts :
131 
133  return 0;
134 }
135 
136 /**
137  * Clip value val in the minval - maxval range.
138  */
139 static double clip(void *opaque, double val)
140 {
141  LutContext *lut = opaque;
142  double minval = lut->var_values[VAR_MINVAL];
143  double maxval = lut->var_values[VAR_MAXVAL];
144 
145  return av_clip(val, minval, maxval);
146 }
147 
148 /**
149  * Compute gamma correction for value val, assuming the minval-maxval
150  * range, val is clipped to a value contained in the same interval.
151  */
152 static double compute_gammaval(void *opaque, double gamma)
153 {
154  LutContext *lut = opaque;
155  double val = lut->var_values[VAR_CLIPVAL];
156  double minval = lut->var_values[VAR_MINVAL];
157  double maxval = lut->var_values[VAR_MAXVAL];
158 
159  return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
160 }
161 
162 static double (* const funcs1[])(void *, double) = {
163  (void *)clip,
164  (void *)compute_gammaval,
165  NULL
166 };
167 
168 static const char * const funcs1_names[] = {
169  "clip",
170  "gammaval",
171  NULL
172 };
173 
174 static int config_props(AVFilterLink *inlink)
175 {
176  AVFilterContext *ctx = inlink->dst;
177  LutContext *lut = ctx->priv;
178  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
179  uint8_t rgba_map[4]; /* component index -> RGBA color index map */
180  int min[4], max[4];
181  int val, color, ret;
182 
183  lut->hsub = desc->log2_chroma_w;
184  lut->vsub = desc->log2_chroma_h;
185 
186  lut->var_values[VAR_W] = inlink->w;
187  lut->var_values[VAR_H] = inlink->h;
188 
189  switch (inlink->format) {
190  case AV_PIX_FMT_YUV410P:
191  case AV_PIX_FMT_YUV411P:
192  case AV_PIX_FMT_YUV420P:
193  case AV_PIX_FMT_YUV422P:
194  case AV_PIX_FMT_YUV440P:
195  case AV_PIX_FMT_YUV444P:
196  case AV_PIX_FMT_YUVA420P:
197  min[Y] = min[U] = min[V] = 16;
198  max[Y] = 235;
199  max[U] = max[V] = 240;
200  min[A] = 0; max[A] = 255;
201  break;
202  default:
203  min[0] = min[1] = min[2] = min[3] = 0;
204  max[0] = max[1] = max[2] = max[3] = 255;
205  }
206 
207  lut->is_yuv = lut->is_rgb = 0;
208  if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1;
209  else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1;
210 
211  if (lut->is_rgb) {
212  ff_fill_rgba_map(rgba_map, inlink->format);
213  lut->step = av_get_bits_per_pixel(desc) >> 3;
214  }
215 
216  for (color = 0; color < desc->nb_components; color++) {
217  double res;
218  int comp = lut->is_rgb ? rgba_map[color] : color;
219 
220  /* create the parsed expression */
221  ret = av_expr_parse(&lut->comp_expr[color], lut->comp_expr_str[color],
222  var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
223  if (ret < 0) {
224  av_log(ctx, AV_LOG_ERROR,
225  "Error when parsing the expression '%s' for the component %d and color %d.\n",
226  lut->comp_expr_str[comp], comp, color);
227  return AVERROR(EINVAL);
228  }
229 
230  /* compute the lut */
231  lut->var_values[VAR_MAXVAL] = max[color];
232  lut->var_values[VAR_MINVAL] = min[color];
233 
234  for (val = 0; val < 256; val++) {
235  lut->var_values[VAR_VAL] = val;
236  lut->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
237  lut->var_values[VAR_NEGVAL] =
238  av_clip(min[color] + max[color] - lut->var_values[VAR_VAL],
239  min[color], max[color]);
240 
241  res = av_expr_eval(lut->comp_expr[color], lut->var_values, lut);
242  if (isnan(res)) {
243  av_log(ctx, AV_LOG_ERROR,
244  "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
245  lut->comp_expr_str[color], val, comp);
246  return AVERROR(EINVAL);
247  }
248  lut->lut[comp][val] = av_clip((int)res, min[color], max[color]);
249  av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]);
250  }
251  }
252 
253  return 0;
254 }
255 
256 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
257 {
258  AVFilterContext *ctx = inlink->dst;
259  LutContext *lut = ctx->priv;
260  AVFilterLink *outlink = ctx->outputs[0];
261  AVFrame *out;
262  uint8_t *inrow, *outrow, *inrow0, *outrow0;
263  int i, j, plane, direct = 0;
264 
265  if (av_frame_is_writable(in)) {
266  direct = 1;
267  out = in;
268  } else {
269  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
270  if (!out) {
271  av_frame_free(&in);
272  return AVERROR(ENOMEM);
273  }
274  av_frame_copy_props(out, in);
275  }
276 
277  if (lut->is_rgb) {
278  /* packed */
279  inrow0 = in ->data[0];
280  outrow0 = out->data[0];
281 
282  for (i = 0; i < in->height; i ++) {
283  int w = inlink->w;
284  const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut;
285  inrow = inrow0;
286  outrow = outrow0;
287  for (j = 0; j < w; j++) {
288  switch (lut->step) {
289  case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
290  case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
291  case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
292  default: outrow[0] = tab[0][inrow[0]];
293  }
294  outrow += lut->step;
295  inrow += lut->step;
296  }
297  inrow0 += in ->linesize[0];
298  outrow0 += out->linesize[0];
299  }
300  } else {
301  /* planar */
302  for (plane = 0; plane < 4 && in->data[plane]; plane++) {
303  int vsub = plane == 1 || plane == 2 ? lut->vsub : 0;
304  int hsub = plane == 1 || plane == 2 ? lut->hsub : 0;
305 
306  inrow = in ->data[plane];
307  outrow = out->data[plane];
308 
309  for (i = 0; i < (in->height + (1<<vsub) - 1)>>vsub; i ++) {
310  const uint8_t *tab = lut->lut[plane];
311  int w = (inlink->w + (1<<hsub) - 1)>>hsub;
312  for (j = 0; j < w; j++)
313  outrow[j] = tab[inrow[j]];
314  inrow += in ->linesize[plane];
315  outrow += out->linesize[plane];
316  }
317  }
318  }
319 
320  if (!direct)
321  av_frame_free(&in);
322 
323  return ff_filter_frame(outlink, out);
324 }
325 
326 static const AVFilterPad inputs[] = {
327  { .name = "default",
328  .type = AVMEDIA_TYPE_VIDEO,
329  .filter_frame = filter_frame,
330  .config_props = config_props,
331  },
332  { .name = NULL}
333 };
334 static const AVFilterPad outputs[] = {
335  { .name = "default",
336  .type = AVMEDIA_TYPE_VIDEO, },
337  { .name = NULL}
338 };
339 
340 #define DEFINE_LUT_FILTER(name_, description_) \
341  AVFilter avfilter_vf_##name_ = { \
342  .name = #name_, \
343  .description = NULL_IF_CONFIG_SMALL(description_), \
344  .priv_size = sizeof(LutContext), \
345  .priv_class = &name_ ## _class, \
346  \
347  .init = name_##_init, \
348  .uninit = uninit, \
349  .query_formats = query_formats, \
350  \
351  .inputs = inputs, \
352  .outputs = outputs, \
353  }
354 
355 #if CONFIG_LUT_FILTER
356 
357 #define lut_options options
359 
360 static int lut_init(AVFilterContext *ctx)
361 {
362  return 0;
363 }
364 
365 DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
366 #endif
367 
368 #if CONFIG_LUTYUV_FILTER
369 
370 #define lutyuv_options options
371 AVFILTER_DEFINE_CLASS(lutyuv);
372 
373 static int lutyuv_init(AVFilterContext *ctx)
374 {
375  LutContext *lut = ctx->priv;
376 
377  lut->is_yuv = 1;
378 
379  return 0;
380 }
381 
382 DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
383 #endif
384 
385 #if CONFIG_LUTRGB_FILTER
386 
387 #define lutrgb_options options
388 AVFILTER_DEFINE_CLASS(lutrgb);
389 
390 static int lutrgb_init(AVFilterContext *ctx)
391 {
392  LutContext *lut = ctx->priv;
393 
394  lut->is_rgb = 1;
395 
396  return 0;
397 }
398 
399 DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
400 #endif
401 
402 #if CONFIG_NEGATE_FILTER
403 
404 static const AVOption negate_options[] = {
405  { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, .flags = FLAGS },
406  { NULL },
407 };
408 
409 AVFILTER_DEFINE_CLASS(negate);
410 
411 static int negate_init(AVFilterContext *ctx)
412 {
413  LutContext *lut = ctx->priv;
414  int i;
415 
416  av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", lut->negate_alpha);
417 
418  for (i = 0; i < 4; i++) {
419  lut->comp_expr_str[i] = av_strdup((i == 3 && !lut->negate_alpha) ?
420  "val" : "negval");
421  if (!lut->comp_expr_str[i]) {
422  uninit(ctx);
423  return AVERROR(ENOMEM);
424  }
425  }
426 
427  return 0;
428 }
429 
430 DEFINE_LUT_FILTER(negate, "Negate input video.");
431 
432 #endif
char * comp_expr_str[4]
Definition: vf_lut.c:62
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
#define G
Definition: vf_lut.c:75
#define A
Definition: vf_lut.c:77
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
external API header
if max(w)>1 w=0.9 *w/max(w)
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:1731
uint8_t lut[4][256]
lookup table for each component
Definition: vf_lut.c:61
#define RGB_FORMATS
Definition: vf_lut.c:116
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_lut.c:256
static const AVOption options[]
Definition: vf_lut.c:82
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
static const char *const funcs1_names[]
Definition: vf_lut.c:168
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:640
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
static enum AVPixelFormat yuv_pix_fmts[]
Definition: vf_lut.c:121
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
const char * name
Pad name.
static const char *const var_names[]
Definition: vf_lut.c:37
#define R
Definition: vf_lut.c:74
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:77
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
static double(*const funcs1[])(void *, double)
Definition: vf_lut.c:162
Definition: eval.c:140
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
int ff_fmt_is_in(int fmt, const int *fmts)
Tell is a format is contained in the provided list terminated by -1.
Definition: formats.c:254
A filter pad used for either input or output.
#define YUV_FORMATS
Definition: vf_lut.c:109
static enum AVPixelFormat all_pix_fmts[]
Definition: vf_lut.c:123
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
void * priv
private data for use by the filter
Definition: avfilter.h:545
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:361
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
double var_values[VAR_VARS_NB]
Definition: vf_lut.c:65
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:57
int hsub
Definition: vf_lut.c:64
var_name
#define OFFSET(x)
Definition: vf_lut.c:79
ret
Definition: avfilter.c:821
#define U
Definition: vf_lut.c:72
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:33
NULL
Definition: eval.c:55
Definition: vf_lut.c:51
misc drawing utilities
int is_rgb
Definition: vf_lut.c:66
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:302
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static const AVFilterPad outputs[]
Definition: vf_lut.c:334
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
static int query_formats(AVFilterContext *ctx)
Definition: vf_lut.c:125
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Describe the class of an AVClass context structure.
Definition: log.h:50
synthesis window for stochastic i
Definition: vf_lut.c:49
int step
Definition: vf_lut.c:67
AVExpr * comp_expr[4]
Definition: vf_lut.c:63
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
#define DEFINE_LUT_FILTER(name_, description_)
Definition: vf_lut.c:340
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static enum AVPixelFormat rgb_pix_fmts[]
Definition: vf_lut.c:122
static const AVFilterPad inputs[]
Definition: vf_lut.c:326
static int config_props(AVFilterLink *inlink)
Definition: vf_lut.c:174
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_lut.c:97
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
#define V
Definition: vf_lut.c:73
common internal and external API header
static double clip(void *opaque, double val)
Clip value val in the minval - maxval range.
Definition: vf_lut.c:139
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
#define Y
Definition: vf_lut.c:71
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:691
#define AVFILTER_DEFINE_CLASS(fname)
#define FLAGS
Definition: vf_lut.c:80
int negate_alpha
Definition: vf_lut.c:68
int vsub
Definition: vf_lut.c:64
static const struct twinvq_data tab
An instance of a filter.
Definition: avfilter.h:524
static double compute_gammaval(void *opaque, double gamma)
Compute gamma correction for value val, assuming the minval-maxval range, val is clipped to a value c...
Definition: vf_lut.c:152
int height
Definition: frame.h:122
static void comp(unsigned char *dst, int dst_stride, unsigned char *src, int src_stride, int add)
Definition: eamad.c:71
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
internal API functions
#define B
Definition: vf_lut.c:76
float min
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
int is_yuv
Definition: vf_lut.c:66
Definition: vf_lut.c:50
for(j=16;j >0;--j)
simple arithmetic expression evaluator