vf_colorchannelmixer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/opt.h"
22 #include "avfilter.h"
23 #include "drawutils.h"
24 #include "formats.h"
25 #include "internal.h"
26 #include "video.h"
27 
28 #define R 0
29 #define G 1
30 #define B 2
31 #define A 3
32 
33 typedef struct {
34  const AVClass *class;
35  double rr, rg, rb, ra;
36  double gr, gg, gb, ga;
37  double br, bg, bb, ba;
38  double ar, ag, ab, aa;
39 
40  int *lut[4][4];
41 
42  int *buffer;
43 
44  uint8_t rgba_map[4];
46 
47 #define OFFSET(x) offsetof(ColorChannelMixerContext, x)
48 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
50  { "rr", "set the red gain for the red channel", OFFSET(rr), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
51  { "rg", "set the green gain for the red channel", OFFSET(rg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
52  { "rb", "set the blue gain for the red channel", OFFSET(rb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
53  { "ra", "set the alpha gain for the red channel", OFFSET(ra), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
54  { "gr", "set the red gain for the green channel", OFFSET(gr), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
55  { "gg", "set the green gain for the green channel", OFFSET(gg), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
56  { "gb", "set the blue gain for the green channel", OFFSET(gb), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
57  { "ga", "set the alpha gain for the green channel", OFFSET(ga), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
58  { "br", "set the red gain for the blue channel", OFFSET(br), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
59  { "bg", "set the green gain for the blue channel", OFFSET(bg), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
60  { "bb", "set the blue gain for the blue channel", OFFSET(bb), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
61  { "ba", "set the alpha gain for the blue channel", OFFSET(ba), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
62  { "ar", "set the red gain for the alpha channel", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
63  { "ag", "set the green gain for the alpha channel", OFFSET(ag), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
64  { "ab", "set the blue gain for the alpha channel", OFFSET(ab), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -2, 2, FLAGS },
65  { "aa", "set the alpha gain for the alpha channel", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -2, 2, FLAGS },
66  { NULL }
67 };
68 
69 AVFILTER_DEFINE_CLASS(colorchannelmixer);
70 
72 {
73  static const enum AVPixelFormat pix_fmts[] = {
82  };
83 
85  return 0;
86 }
87 
88 static int config_output(AVFilterLink *outlink)
89 {
90  AVFilterContext *ctx = outlink->src;
92  int i, j, size, *buffer;
93 
94  switch (outlink->format) {
95  case AV_PIX_FMT_RGB48:
96  case AV_PIX_FMT_BGR48:
97  case AV_PIX_FMT_RGBA64:
98  case AV_PIX_FMT_BGRA64:
99  if (outlink->format == AV_PIX_FMT_RGB48 ||
100  outlink->format == AV_PIX_FMT_RGBA64) {
101  cm->rgba_map[R] = 0;
102  cm->rgba_map[G] = 1;
103  cm->rgba_map[B] = 2;
104  cm->rgba_map[A] = 3;
105  } else {
106  cm->rgba_map[R] = 2;
107  cm->rgba_map[G] = 1;
108  cm->rgba_map[B] = 0;
109  cm->rgba_map[A] = 3;
110  }
111  size = 65536;
112  break;
113  default:
114  ff_fill_rgba_map(cm->rgba_map, outlink->format);
115  size = 256;
116  }
117 
118  cm->buffer = buffer = av_malloc(16 * size * sizeof(*cm->buffer));
119  if (!cm->buffer)
120  return AVERROR(ENOMEM);
121 
122  for (i = 0; i < 4; i++)
123  for (j = 0; j < 4; j++, buffer += size)
124  cm->lut[i][j] = buffer;
125 
126  for (i = 0; i < size; i++) {
127  cm->lut[R][R][i] = i * cm->rr;
128  cm->lut[R][G][i] = i * cm->rg;
129  cm->lut[R][B][i] = i * cm->rb;
130  cm->lut[R][A][i] = i * cm->ra;
131 
132  cm->lut[G][R][i] = i * cm->gr;
133  cm->lut[G][G][i] = i * cm->gg;
134  cm->lut[G][B][i] = i * cm->gb;
135  cm->lut[G][A][i] = i * cm->ga;
136 
137  cm->lut[B][R][i] = i * cm->br;
138  cm->lut[B][G][i] = i * cm->bg;
139  cm->lut[B][B][i] = i * cm->bb;
140  cm->lut[B][A][i] = i * cm->ba;
141 
142  cm->lut[A][R][i] = i * cm->ar;
143  cm->lut[A][G][i] = i * cm->ag;
144  cm->lut[A][B][i] = i * cm->ab;
145  cm->lut[A][A][i] = i * cm->aa;
146  }
147 
148  return 0;
149 }
150 
151 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
152 {
153  AVFilterContext *ctx = inlink->dst;
155  AVFilterLink *outlink = ctx->outputs[0];
156  const uint8_t roffset = cm->rgba_map[R];
157  const uint8_t goffset = cm->rgba_map[G];
158  const uint8_t boffset = cm->rgba_map[B];
159  const uint8_t aoffset = cm->rgba_map[A];
160  const uint8_t *srcrow = in->data[0];
161  uint8_t *dstrow;
162  AVFrame *out;
163  int i, j;
164 
165  if (av_frame_is_writable(in)) {
166  out = in;
167  } else {
168  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
169  if (!out) {
170  av_frame_free(&in);
171  return AVERROR(ENOMEM);
172  }
173  av_frame_copy_props(out, in);
174  }
175 
176  dstrow = out->data[0];
177  switch (outlink->format) {
178  case AV_PIX_FMT_BGR24:
179  case AV_PIX_FMT_RGB24:
180  for (i = 0; i < outlink->h; i++) {
181  const uint8_t *src = srcrow;
182  uint8_t *dst = dstrow;
183 
184  for (j = 0; j < outlink->w * 3; j += 3) {
185  const uint8_t rin = src[j + roffset];
186  const uint8_t gin = src[j + goffset];
187  const uint8_t bin = src[j + boffset];
188 
189  dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
190  cm->lut[R][G][gin] +
191  cm->lut[R][B][bin]);
192  dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
193  cm->lut[G][G][gin] +
194  cm->lut[G][B][bin]);
195  dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
196  cm->lut[B][G][gin] +
197  cm->lut[B][B][bin]);
198  }
199 
200  srcrow += in->linesize[0];
201  dstrow += out->linesize[0];
202  }
203  break;
204  case AV_PIX_FMT_0BGR:
205  case AV_PIX_FMT_0RGB:
206  case AV_PIX_FMT_BGR0:
207  case AV_PIX_FMT_RGB0:
208  for (i = 0; i < outlink->h; i++) {
209  const uint8_t *src = srcrow;
210  uint8_t *dst = dstrow;
211 
212  for (j = 0; j < outlink->w * 4; j += 4) {
213  const uint8_t rin = src[j + roffset];
214  const uint8_t gin = src[j + goffset];
215  const uint8_t bin = src[j + boffset];
216 
217  dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
218  cm->lut[R][G][gin] +
219  cm->lut[R][B][bin]);
220  dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
221  cm->lut[G][G][gin] +
222  cm->lut[G][B][bin]);
223  dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
224  cm->lut[B][G][gin] +
225  cm->lut[B][B][bin]);
226  if (in != out)
227  dst[j + aoffset] = 0;
228  }
229 
230  srcrow += in->linesize[0];
231  dstrow += out->linesize[0];
232  }
233  break;
234  case AV_PIX_FMT_ABGR:
235  case AV_PIX_FMT_ARGB:
236  case AV_PIX_FMT_BGRA:
237  case AV_PIX_FMT_RGBA:
238  for (i = 0; i < outlink->h; i++) {
239  const uint8_t *src = srcrow;
240  uint8_t *dst = dstrow;
241 
242  for (j = 0; j < outlink->w * 4; j += 4) {
243  const uint8_t rin = src[j + roffset];
244  const uint8_t gin = src[j + goffset];
245  const uint8_t bin = src[j + boffset];
246  const uint8_t ain = src[j + aoffset];
247 
248  dst[j + roffset] = av_clip_uint8(cm->lut[R][R][rin] +
249  cm->lut[R][G][gin] +
250  cm->lut[R][B][bin] +
251  cm->lut[R][A][ain]);
252  dst[j + goffset] = av_clip_uint8(cm->lut[G][R][rin] +
253  cm->lut[G][G][gin] +
254  cm->lut[G][B][bin] +
255  cm->lut[G][A][ain]);
256  dst[j + boffset] = av_clip_uint8(cm->lut[B][R][rin] +
257  cm->lut[B][G][gin] +
258  cm->lut[B][B][bin] +
259  cm->lut[B][A][ain]);
260  dst[j + aoffset] = av_clip_uint8(cm->lut[A][R][rin] +
261  cm->lut[A][G][gin] +
262  cm->lut[A][B][bin] +
263  cm->lut[A][A][ain]);
264  }
265 
266  srcrow += in->linesize[0];
267  dstrow += out->linesize[0];
268  }
269  break;
270  case AV_PIX_FMT_BGR48:
271  case AV_PIX_FMT_RGB48:
272  for (i = 0; i < outlink->h; i++) {
273  const uint16_t *src = (const uint16_t *)srcrow;
274  uint16_t *dst = (uint16_t *)dstrow;
275 
276  for (j = 0; j < outlink->w * 3; j += 3) {
277  const uint16_t rin = src[j + roffset];
278  const uint16_t gin = src[j + goffset];
279  const uint16_t bin = src[j + boffset];
280 
281  dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
282  cm->lut[R][G][gin] +
283  cm->lut[R][B][bin]);
284  dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
285  cm->lut[G][G][gin] +
286  cm->lut[G][B][bin]);
287  dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
288  cm->lut[B][G][gin] +
289  cm->lut[B][B][bin]);
290  }
291 
292  srcrow += in->linesize[0];
293  dstrow += out->linesize[0];
294  }
295  break;
296  case AV_PIX_FMT_BGRA64:
297  case AV_PIX_FMT_RGBA64:
298  for (i = 0; i < outlink->h; i++) {
299  const uint16_t *src = (const uint16_t *)srcrow;
300  uint16_t *dst = (uint16_t *)dstrow;
301 
302  for (j = 0; j < outlink->w * 4; j += 4) {
303  const uint16_t rin = src[j + roffset];
304  const uint16_t gin = src[j + goffset];
305  const uint16_t bin = src[j + boffset];
306  const uint16_t ain = src[j + aoffset];
307 
308  dst[j + roffset] = av_clip_uint16(cm->lut[R][R][rin] +
309  cm->lut[R][G][gin] +
310  cm->lut[R][B][bin] +
311  cm->lut[R][A][ain]);
312  dst[j + goffset] = av_clip_uint16(cm->lut[G][R][rin] +
313  cm->lut[G][G][gin] +
314  cm->lut[G][B][bin] +
315  cm->lut[G][A][ain]);
316  dst[j + boffset] = av_clip_uint16(cm->lut[B][R][rin] +
317  cm->lut[B][G][gin] +
318  cm->lut[B][B][bin] +
319  cm->lut[B][A][ain]);
320  dst[j + aoffset] = av_clip_uint16(cm->lut[A][R][rin] +
321  cm->lut[A][G][gin] +
322  cm->lut[A][B][bin] +
323  cm->lut[A][A][ain]);
324  }
325 
326  srcrow += in->linesize[0];
327  dstrow += out->linesize[0];
328  }
329  }
330 
331  if (in != out)
332  av_frame_free(&in);
333  return ff_filter_frame(ctx->outputs[0], out);
334 }
335 
336 static av_cold void uninit(AVFilterContext *ctx)
337 {
339 
340  av_freep(&cm->buffer);
341 }
342 
344  {
345  .name = "default",
346  .type = AVMEDIA_TYPE_VIDEO,
347  .filter_frame = filter_frame,
348  },
349  { NULL }
350 };
351 
353  {
354  .name = "default",
355  .type = AVMEDIA_TYPE_VIDEO,
356  .config_props = config_output,
357  },
358  { NULL }
359 };
360 
362  .name = "colorchannelmixer",
363  .description = NULL_IF_CONFIG_SMALL("Adjust colors by mixing color channels."),
364  .priv_size = sizeof(ColorChannelMixerContext),
365  .priv_class = &colorchannelmixer_class,
366  .uninit = uninit,
368  .inputs = colorchannelmixer_inputs,
369  .outputs = colorchannelmixer_outputs,
370 };
AVFILTER_DEFINE_CLASS(colorchannelmixer)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static int query_formats(AVFilterContext *ctx)
AVOption.
Definition: opt.h:251
AVFilter avfilter_vf_colorchannelmixer
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:70
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:292
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
#define AV_PIX_FMT_BGRA64
Definition: pixfmt.h:293
packed BGR 8:8:8, 32bpp, 0BGR0BGR...
Definition: pixfmt.h:216
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
#define OFFSET(x)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
const char * name
Pad name.
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
packed RGB 8:8:8, 32bpp, RGB0RGB0...
Definition: pixfmt.h:215
#define A
AVOptions.
static const AVFilterPad colorchannelmixer_inputs[]
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:98
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:271
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
#define G
#define cm
Definition: dvbsubdec.c:34
A filter pad used for either input or output.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static const AVOption colorchannelmixer_options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:99
void * priv
private data for use by the filter
Definition: avfilter.h:545
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:361
#define AV_PIX_FMT_RGB48
Definition: pixfmt.h:267
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:96
static av_cold void uninit(AVFilterContext *ctx)
int size
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:97
#define B
static const AVFilterPad colorchannelmixer_outputs[]
static int config_output(AVFilterLink *outlink)
#define ra
Definition: regdef.h:57
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:71
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
Definition: drawutils.c:33
NULL
Definition: eval.c:55
AVS_Value src
Definition: avisynth_c.h:523
misc drawing utilities
#define FLAGS
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
packed BGR 8:8:8, 32bpp, BGR0BGR0...
Definition: pixfmt.h:217
const char * name
filter name
Definition: avfilter.h:437
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define R
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
the buffer and buffer reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFilterBuffer structures They must not be accessed but through references stored in AVFilterBufferRef structures Several references can point to the same buffer
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
An instance of a filter.
Definition: avfilter.h:524
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
internal API functions
packed RGB 8:8:8, 32bpp, 0RGB0RGB...
Definition: pixfmt.h:214
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
for(j=16;j >0;--j)