vf_boxblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2011 Stefano Sabatini
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Apply a boxblur filter to the input video.
25  * Ported from MPlayer libmpcodecs/vf_boxblur.c.
26  */
27 
28 #include "libavutil/avstring.h"
29 #include "libavutil/common.h"
30 #include "libavutil/eval.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "internal.h"
36 #include "video.h"
37 
38 static const char *const var_names[] = {
39  "w",
40  "h",
41  "cw",
42  "ch",
43  "hsub",
44  "vsub",
45  NULL
46 };
47 
48 enum var_name {
56 };
57 
58 typedef struct {
59  int radius;
60  int power;
61  char *radius_expr;
62 } FilterParam;
63 
64 typedef struct {
65  const AVClass *class;
69 
70  int hsub, vsub;
71  int radius[4];
72  int power[4];
73  uint8_t *temp[2]; ///< temporary buffer used in blur_power()
75 
76 #define Y 0
77 #define U 1
78 #define V 2
79 #define A 3
80 
81 static av_cold int init(AVFilterContext *ctx)
82 {
83  BoxBlurContext *boxblur = ctx->priv;
84 
85  if (!boxblur->luma_param.radius_expr) {
86  av_log(ctx, AV_LOG_ERROR, "Luma radius expression is not set.\n");
87  return AVERROR(EINVAL);
88  }
89 
90  /* fill missing params */
91  if (!boxblur->chroma_param.radius_expr) {
93  if (!boxblur->chroma_param.radius_expr)
94  return AVERROR(ENOMEM);
95  }
96  if (boxblur->chroma_param.power < 0)
97  boxblur->chroma_param.power = boxblur->luma_param.power;
98 
99  if (!boxblur->alpha_param.radius_expr) {
101  if (!boxblur->alpha_param.radius_expr)
102  return AVERROR(ENOMEM);
103  }
104  if (boxblur->alpha_param.power < 0)
105  boxblur->alpha_param.power = boxblur->luma_param.power;
106 
107  return 0;
108 }
109 
110 static av_cold void uninit(AVFilterContext *ctx)
111 {
112  BoxBlurContext *boxblur = ctx->priv;
113 
114  av_freep(&boxblur->temp[0]);
115  av_freep(&boxblur->temp[1]);
116 }
117 
119 {
120  static const enum AVPixelFormat pix_fmts[] = {
127  };
128 
130  return 0;
131 }
132 
133 static int config_input(AVFilterLink *inlink)
134 {
135  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
136  AVFilterContext *ctx = inlink->dst;
137  BoxBlurContext *boxblur = ctx->priv;
138  int w = inlink->w, h = inlink->h;
139  int cw, ch;
140  double var_values[VARS_NB], res;
141  char *expr;
142  int ret;
143 
144  if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) ||
145  !(boxblur->temp[1] = av_malloc(FFMAX(w, h))))
146  return AVERROR(ENOMEM);
147 
148  boxblur->hsub = desc->log2_chroma_w;
149  boxblur->vsub = desc->log2_chroma_h;
150 
151  var_values[VAR_W] = inlink->w;
152  var_values[VAR_H] = inlink->h;
153  var_values[VAR_CW] = cw = w>>boxblur->hsub;
154  var_values[VAR_CH] = ch = h>>boxblur->vsub;
155  var_values[VAR_HSUB] = 1<<boxblur->hsub;
156  var_values[VAR_VSUB] = 1<<boxblur->vsub;
157 
158 #define EVAL_RADIUS_EXPR(comp) \
159  expr = boxblur->comp##_param.radius_expr; \
160  ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
161  NULL, NULL, NULL, NULL, NULL, 0, ctx); \
162  boxblur->comp##_param.radius = res; \
163  if (ret < 0) { \
164  av_log(NULL, AV_LOG_ERROR, \
165  "Error when evaluating " #comp " radius expression '%s'\n", expr); \
166  return ret; \
167  }
168  EVAL_RADIUS_EXPR(luma);
169  EVAL_RADIUS_EXPR(chroma);
171 
172  av_log(ctx, AV_LOG_VERBOSE,
173  "luma_radius:%d luma_power:%d "
174  "chroma_radius:%d chroma_power:%d "
175  "alpha_radius:%d alpha_power:%d "
176  "w:%d chroma_w:%d h:%d chroma_h:%d\n",
177  boxblur->luma_param .radius, boxblur->luma_param .power,
178  boxblur->chroma_param.radius, boxblur->chroma_param.power,
179  boxblur->alpha_param .radius, boxblur->alpha_param .power,
180  w, cw, h, ch);
181 
182 #define CHECK_RADIUS_VAL(w_, h_, comp) \
183  if (boxblur->comp##_param.radius < 0 || \
184  2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { \
185  av_log(ctx, AV_LOG_ERROR, \
186  "Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \
187  boxblur->comp##_param.radius, FFMIN(w_, h_)/2); \
188  return AVERROR(EINVAL); \
189  }
190  CHECK_RADIUS_VAL(w, h, luma);
191  CHECK_RADIUS_VAL(cw, ch, chroma);
192  CHECK_RADIUS_VAL(w, h, alpha);
193 
194  boxblur->radius[Y] = boxblur->luma_param.radius;
195  boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius;
196  boxblur->radius[A] = boxblur->alpha_param.radius;
197 
198  boxblur->power[Y] = boxblur->luma_param.power;
199  boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power;
200  boxblur->power[A] = boxblur->alpha_param.power;
201 
202  return 0;
203 }
204 
205 static inline void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
206  int len, int radius)
207 {
208  /* Naive boxblur would sum source pixels from x-radius .. x+radius
209  * for destination pixel x. That would be O(radius*width).
210  * If you now look at what source pixels represent 2 consecutive
211  * output pixels, then you see they are almost identical and only
212  * differ by 2 pixels, like:
213  * src0 111111111
214  * dst0 1
215  * src1 111111111
216  * dst1 1
217  * src0-src1 1 -1
218  * so when you know one output pixel you can find the next by just adding
219  * and subtracting 1 input pixel.
220  * The following code adopts this faster variant.
221  */
222  const int length = radius*2 + 1;
223  const int inv = ((1<<16) + length/2)/length;
224  int x, sum = 0;
225 
226  for (x = 0; x < radius; x++)
227  sum += src[x*src_step]<<1;
228  sum += src[radius*src_step];
229 
230  for (x = 0; x <= radius; x++) {
231  sum += src[(radius+x)*src_step] - src[(radius-x)*src_step];
232  dst[x*dst_step] = (sum*inv + (1<<15))>>16;
233  }
234 
235  for (; x < len-radius; x++) {
236  sum += src[(radius+x)*src_step] - src[(x-radius-1)*src_step];
237  dst[x*dst_step] = (sum*inv + (1<<15))>>16;
238  }
239 
240  for (; x < len; x++) {
241  sum += src[(2*len-radius-x-1)*src_step] - src[(x-radius-1)*src_step];
242  dst[x*dst_step] = (sum*inv + (1<<15))>>16;
243  }
244 }
245 
246 static inline void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step,
247  int len, int radius, int power, uint8_t *temp[2])
248 {
249  uint8_t *a = temp[0], *b = temp[1];
250 
251  if (radius && power) {
252  blur(a, 1, src, src_step, len, radius);
253  for (; power > 2; power--) {
254  uint8_t *c;
255  blur(b, 1, a, 1, len, radius);
256  c = a; a = b; b = c;
257  }
258  if (power > 1) {
259  blur(dst, dst_step, a, 1, len, radius);
260  } else {
261  int i;
262  for (i = 0; i < len; i++)
263  dst[i*dst_step] = a[i];
264  }
265  } else {
266  int i;
267  for (i = 0; i < len; i++)
268  dst[i*dst_step] = src[i*src_step];
269  }
270 }
271 
272 static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
273  int w, int h, int radius, int power, uint8_t *temp[2])
274 {
275  int y;
276 
277  if (radius == 0 && dst == src)
278  return;
279 
280  for (y = 0; y < h; y++)
281  blur_power(dst + y*dst_linesize, 1, src + y*src_linesize, 1,
282  w, radius, power, temp);
283 }
284 
285 static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize,
286  int w, int h, int radius, int power, uint8_t *temp[2])
287 {
288  int x;
289 
290  if (radius == 0 && dst == src)
291  return;
292 
293  for (x = 0; x < w; x++)
294  blur_power(dst + x, dst_linesize, src + x, src_linesize,
295  h, radius, power, temp);
296 }
297 
298 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
299 {
300  AVFilterContext *ctx = inlink->dst;
301  BoxBlurContext *boxblur = ctx->priv;
302  AVFilterLink *outlink = inlink->dst->outputs[0];
303  AVFrame *out;
304  int plane;
305  int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub;
306  int w[4] = { inlink->w, cw, cw, inlink->w };
307  int h[4] = { in->height, ch, ch, in->height };
308 
309  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
310  if (!out) {
311  av_frame_free(&in);
312  return AVERROR(ENOMEM);
313  }
314  av_frame_copy_props(out, in);
315 
316  for (plane = 0; in->data[plane] && plane < 4; plane++)
317  hblur(out->data[plane], out->linesize[plane],
318  in ->data[plane], in ->linesize[plane],
319  w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
320  boxblur->temp);
321 
322  for (plane = 0; in->data[plane] && plane < 4; plane++)
323  vblur(out->data[plane], out->linesize[plane],
324  out->data[plane], out->linesize[plane],
325  w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
326  boxblur->temp);
327 
328  av_frame_free(&in);
329 
330  return ff_filter_frame(outlink, out);
331 }
332 
333 #define OFFSET(x) offsetof(BoxBlurContext, x)
334 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
335 
336 static const AVOption boxblur_options[] = {
337  { "luma_radius", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
338  { "lr", "Radius of the luma blurring box", OFFSET(luma_param.radius_expr), AV_OPT_TYPE_STRING, {.str="2"}, .flags = FLAGS },
339  { "luma_power", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
340  { "lp", "How many times should the boxblur be applied to luma", OFFSET(luma_param.power), AV_OPT_TYPE_INT, {.i64=2}, 0, INT_MAX, .flags = FLAGS },
341 
342  { "chroma_radius", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
343  { "cr", "Radius of the chroma blurring box", OFFSET(chroma_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
344  { "chroma_power", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
345  { "cp", "How many times should the boxblur be applied to chroma", OFFSET(chroma_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
346 
347  { "alpha_radius", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
348  { "ar", "Radius of the alpha blurring box", OFFSET(alpha_param.radius_expr), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
349  { "alpha_power", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
350  { "ap", "How many times should the boxblur be applied to alpha", OFFSET(alpha_param.power), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, .flags = FLAGS },
351 
352  { NULL }
353 };
354 
355 AVFILTER_DEFINE_CLASS(boxblur);
356 
358  {
359  .name = "default",
360  .type = AVMEDIA_TYPE_VIDEO,
361  .config_props = config_input,
362  .filter_frame = filter_frame,
363  },
364  { NULL }
365 };
366 
368  {
369  .name = "default",
370  .type = AVMEDIA_TYPE_VIDEO,
371  },
372  { NULL }
373 };
374 
376  .name = "boxblur",
377  .description = NULL_IF_CONFIG_SMALL("Blur the input."),
378  .priv_size = sizeof(BoxBlurContext),
379  .priv_class = &boxblur_class,
380  .init = init,
381  .uninit = uninit,
383 
384  .inputs = avfilter_vf_boxblur_inputs,
385  .outputs = avfilter_vf_boxblur_outputs,
386 };
#define U
Definition: vf_boxblur.c:77
AVFilter avfilter_vf_boxblur
Definition: vf_boxblur.c:375
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
float radius
Definition: vf_sab.c:46
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
static void hblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int w, int h, int radius, int power, uint8_t *temp[2])
Definition: vf_boxblur.c:272
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
else temp
Definition: vf_mcdeint.c:148
static int config_input(AVFilterLink *inlink)
Definition: vf_boxblur.c:133
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
static av_cold int init(AVFilterContext *ctx)
Definition: vf_boxblur.c:81
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
const char * name
Pad name.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:105
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
uint8_t * temp[2]
temporary buffer used in blur_power()
Definition: vf_boxblur.c:73
AVOptions.
#define Y
Definition: vf_boxblur.c:76
#define b
Definition: input.c:42
#define A
Definition: vf_boxblur.c:79
static const AVFilterPad avfilter_vf_boxblur_outputs[]
Definition: vf_boxblur.c:367
char * radius_expr
Definition: vf_boxblur.c:61
static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int w, int h, int radius, int power, uint8_t *temp[2])
Definition: vf_boxblur.c:285
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: pixfmt.h:104
static const AVOption boxblur_options[]
Definition: vf_boxblur.c:336
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
FilterParam alpha_param
Definition: vf_boxblur.c:68
#define OFFSET(x)
Definition: vf_boxblur.c:333
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
A filter pad used for either input or output.
Discrete Time axis x
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:86
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
static int query_formats(AVFilterContext *ctx)
Definition: vf_boxblur.c:118
int radius[4]
Definition: vf_boxblur.c:71
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
static const AVFilterPad avfilter_vf_boxblur_inputs[]
Definition: vf_boxblur.c:357
#define FFMAX(a, b)
Definition: common.h:56
#define V
Definition: vf_boxblur.c:78
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define AV_LOG_VERBOSE
Definition: log.h:157
#define FLAGS
Definition: vf_boxblur.c:334
var_name
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
FilterParam luma_param
Definition: vf_boxblur.c:66
ret
Definition: avfilter.c:821
static const char *const var_names[]
Definition: vf_boxblur.c:38
int power[4]
Definition: vf_boxblur.c:72
#define EVAL_RADIUS_EXPR(comp)
NULL
Definition: eval.c:55
static void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, int len, int radius)
Definition: vf_boxblur.c:205
AVS_Value src
Definition: avisynth_c.h:523
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_boxblur.c:298
static void blur_power(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, int len, int radius, int power, uint8_t *temp[2])
Definition: vf_boxblur.c:246
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
struct FilterParam FilterParam
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_boxblur.c:110
const char * name
filter name
Definition: avfilter.h:437
AVFILTER_DEFINE_CLASS(boxblur)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
common internal and external API header
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
#define CHECK_RADIUS_VAL(w_, h_, comp)
static double c[64]
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
function y
Definition: D.m:1
FilterParam chroma_param
Definition: vf_boxblur.c:67
int len
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
An instance of a filter.
Definition: avfilter.h:524
int height
Definition: frame.h:122
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
const char int length
Definition: avisynth_c.h:668
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
simple arithmetic expression evaluator