vf_gradfun.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Nolan Lum <nol888@gmail.com>
3  * Copyright (c) 2009 Loren Merritt <lorenm@u.washignton.edu>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * gradfun debanding filter, ported from MPlayer
25  * libmpcodecs/vf_gradfun.c
26  *
27  * Apply a boxblur debanding algorithm (based on the gradfun2db
28  * Avisynth filter by prunedtree).
29  * Foreach pixel, if it's within threshold of the blurred value, make it closer.
30  * So now we have a smoothed and higher bitdepth version of all the shallow
31  * gradients, while leaving detailed areas untouched.
32  * Dither it back to 8bit.
33  */
34 
35 #include "libavutil/imgutils.h"
36 #include "libavutil/common.h"
37 #include "libavutil/cpu.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avfilter.h"
42 #include "formats.h"
43 #include "gradfun.h"
44 #include "internal.h"
45 #include "video.h"
46 
47 DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
48  {0x00,0x60,0x18,0x78,0x06,0x66,0x1E,0x7E},
49  {0x40,0x20,0x58,0x38,0x46,0x26,0x5E,0x3E},
50  {0x10,0x70,0x08,0x68,0x16,0x76,0x0E,0x6E},
51  {0x50,0x30,0x48,0x28,0x56,0x36,0x4E,0x2E},
52  {0x04,0x64,0x1C,0x7C,0x02,0x62,0x1A,0x7A},
53  {0x44,0x24,0x5C,0x3C,0x42,0x22,0x5A,0x3A},
54  {0x14,0x74,0x0C,0x6C,0x12,0x72,0x0A,0x6A},
55  {0x54,0x34,0x4C,0x2C,0x52,0x32,0x4A,0x2A},
56 };
57 
58 void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
59 {
60  int x;
61  for (x = 0; x < width; dc += x & 1, x++) {
62  int pix = src[x] << 7;
63  int delta = dc[0] - pix;
64  int m = abs(delta) * thresh >> 16;
65  m = FFMAX(0, 127 - m);
66  m = m * m * delta >> 14;
67  pix += m + dithers[x & 7];
68  dst[x] = av_clip_uint8(pix >> 7);
69  }
70 }
71 
72 void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
73 {
74  int x, v, old;
75  for (x = 0; x < width; x++) {
76  v = buf1[x] + src[2 * x] + src[2 * x + 1] + src[2 * x + src_linesize] + src[2 * x + 1 + src_linesize];
77  old = buf[x];
78  buf[x] = v;
79  dc[x] = v - old;
80  }
81 }
82 
83 static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
84 {
85  int bstride = FFALIGN(width, 16) / 2;
86  int y;
87  uint32_t dc_factor = (1 << 21) / (r * r);
88  uint16_t *dc = ctx->buf + 16;
89  uint16_t *buf = ctx->buf + bstride + 32;
90  int thresh = ctx->thresh;
91 
92  memset(dc, 0, (bstride + 16) * sizeof(*buf));
93  for (y = 0; y < r; y++)
94  ctx->blur_line(dc, buf + y * bstride, buf + (y - 1) * bstride, src + 2 * y * src_linesize, src_linesize, width / 2);
95  for (;;) {
96  if (y < height - r) {
97  int mod = ((y + r) / 2) % r;
98  uint16_t *buf0 = buf + mod * bstride;
99  uint16_t *buf1 = buf + (mod ? mod - 1 : r - 1) * bstride;
100  int x, v;
101  ctx->blur_line(dc, buf0, buf1, src + (y + r) * src_linesize, src_linesize, width / 2);
102  for (x = v = 0; x < r; x++)
103  v += dc[x];
104  for (; x < width / 2; x++) {
105  v += dc[x] - dc[x-r];
106  dc[x-r] = v * dc_factor >> 16;
107  }
108  for (; x < (width + r + 1) / 2; x++)
109  dc[x-r] = v * dc_factor >> 16;
110  for (x = -r / 2; x < 0; x++)
111  dc[x] = dc[0];
112  }
113  if (y == r) {
114  for (y = 0; y < r; y++)
115  ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
116  }
117  ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
118  if (++y >= height) break;
119  ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
120  if (++y >= height) break;
121  }
122 }
123 
124 static av_cold int init(AVFilterContext *ctx)
125 {
126  GradFunContext *gf = ctx->priv;
127 
128  gf->thresh = (1 << 15) / gf->strength;
129  gf->radius = av_clip((gf->radius + 1) & ~1, 4, 32);
130 
133 
134  if (ARCH_X86)
136 
137  av_log(ctx, AV_LOG_VERBOSE, "threshold:%.2f radius:%d\n", gf->strength, gf->radius);
138 
139  return 0;
140 }
141 
142 static av_cold void uninit(AVFilterContext *ctx)
143 {
144  GradFunContext *gf = ctx->priv;
145  av_freep(&gf->buf);
146 }
147 
149 {
150  static const enum AVPixelFormat pix_fmts[] = {
157  };
158 
160 
161  return 0;
162 }
163 
164 static int config_input(AVFilterLink *inlink)
165 {
166  GradFunContext *gf = inlink->dst->priv;
167  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
168  int hsub = desc->log2_chroma_w;
169  int vsub = desc->log2_chroma_h;
170 
171  gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t));
172  if (!gf->buf)
173  return AVERROR(ENOMEM);
174 
175  gf->chroma_w = -((-inlink->w) >> hsub);
176  gf->chroma_h = -((-inlink->h) >> vsub);
177  gf->chroma_r = av_clip(((((gf->radius >> hsub) + (gf->radius >> vsub)) / 2 ) + 1) & ~1, 4, 32);
178 
179  return 0;
180 }
181 
182 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
183 {
184  GradFunContext *gf = inlink->dst->priv;
185  AVFilterLink *outlink = inlink->dst->outputs[0];
186  AVFrame *out;
187  int p, direct;
188 
189  if (av_frame_is_writable(in)) {
190  direct = 1;
191  out = in;
192  } else {
193  direct = 0;
194  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
195  if (!out) {
196  av_frame_free(&in);
197  return AVERROR(ENOMEM);
198  }
199  av_frame_copy_props(out, in);
200  }
201 
202  for (p = 0; p < 4 && in->data[p]; p++) {
203  int w = inlink->w;
204  int h = inlink->h;
205  int r = gf->radius;
206  if (p) {
207  w = gf->chroma_w;
208  h = gf->chroma_h;
209  r = gf->chroma_r;
210  }
211 
212  if (FFMIN(w, h) > 2 * r)
213  filter(gf, out->data[p], in->data[p], w, h, out->linesize[p], in->linesize[p], r);
214  else if (out->data[p] != in->data[p])
215  av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p], w, h);
216  }
217 
218  if (!direct)
219  av_frame_free(&in);
220 
221  return ff_filter_frame(outlink, out);
222 }
223 
224 #define OFFSET(x) offsetof(GradFunContext, x)
225 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
226 
227 static const AVOption gradfun_options[] = {
228  { "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
229  { "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
230  { NULL },
231 };
232 
233 AVFILTER_DEFINE_CLASS(gradfun);
234 
236  {
237  .name = "default",
238  .type = AVMEDIA_TYPE_VIDEO,
239  .config_props = config_input,
240  .filter_frame = filter_frame,
241  },
242  { NULL }
243 };
244 
246  {
247  .name = "default",
248  .type = AVMEDIA_TYPE_VIDEO,
249  },
250  { NULL }
251 };
252 
254  .name = "gradfun",
255  .description = NULL_IF_CONFIG_SMALL("Debands video quickly using gradients."),
256  .priv_size = sizeof(GradFunContext),
257  .priv_class = &gradfun_class,
258  .init = init,
259  .uninit = uninit,
261  .inputs = avfilter_vf_gradfun_inputs,
262  .outputs = avfilter_vf_gradfun_outputs,
263 };
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
float v
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
int radius
blur radius
Definition: gradfun.h:32
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
Holds instance-specific information for gradfun.
Definition: gradfun.h:28
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
output residual component w
#define FFALIGN(x, a)
Definition: common.h:63
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8]
const char * name
Pad name.
int chroma_h
weight of the chroma planes
Definition: gradfun.h:34
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_gradfun.c:182
float delta
AVOptions.
window constants for m
AVFilter avfilter_vf_gradfun
Definition: vf_gradfun.c:253
static int config_input(AVFilterLink *inlink)
Definition: vf_gradfun.c:164
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
static const AVFilterPad avfilter_vf_gradfun_outputs[]
Definition: vf_gradfun.c:245
A filter pad used for either input or output.
Discrete Time axis x
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_gradfun.c:142
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
#define ARCH_X86
Definition: config.h:35
static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int width, int height, int dst_linesize, int src_linesize, int r)
Definition: vf_gradfun.c:83
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * r
Definition: vf_curves.c:94
void * priv
private data for use by the filter
Definition: avfilter.h:545
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:361
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:93
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
#define FFMAX(a, b)
Definition: common.h:56
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define AV_LOG_VERBOSE
Definition: log.h:157
as above, but U and V bytes are swapped
Definition: pixfmt.h:94
#define FFMIN(a, b)
Definition: common.h:58
void ff_gradfun_filter_line_c(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
Definition: vf_gradfun.c:58
void ff_gradfun_init_x86(GradFunContext *gf)
uint16_t * buf
holds image data for blur algorithm passed into filter.
Definition: gradfun.h:36
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
void * buf
Definition: avisynth_c.h:594
static av_cold int init(AVFilterContext *ctx)
Definition: vf_gradfun.c:124
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Filter definition.
Definition: avfilter.h:436
struct GradFunContext GradFunContext
Holds instance-specific information for gradfun.
const char * name
filter name
Definition: avfilter.h:437
float strength
Definition: gradfun.h:30
void(* filter_line)(uint8_t *dst, const uint8_t *src, const uint16_t *dc, int width, int thresh, const uint16_t *dithers)
DSP functions.
Definition: gradfun.h:38
static const AVFilterPad avfilter_vf_gradfun_inputs[]
Definition: vf_gradfun.c:235
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
#define FLAGS
Definition: vf_gradfun.c:225
static int query_formats(AVFilterContext *ctx)
Definition: vf_gradfun.c:148
const uint8_t dithers[8][8][8]
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
int chroma_w
width of the chroma planes
Definition: gradfun.h:33
common internal and external API header
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
void(* blur_line)(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
Definition: gradfun.h:39
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
function y
Definition: D.m:1
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int thresh
threshold for gradient algorithm
Definition: gradfun.h:31
int chroma_r
blur radius for the chroma planes
Definition: gradfun.h:35
AVFILTER_DEFINE_CLASS(gradfun)
An instance of a filter.
Definition: avfilter.h:524
void ff_gradfun_blur_line_c(uint16_t *dc, uint16_t *buf, const uint16_t *buf1, const uint8_t *src, int src_linesize, int width)
Definition: vf_gradfun.c:72
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:103
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:242
internal API functions
#define OFFSET(x)
Definition: vf_gradfun.c:224
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static const AVOption gradfun_options[]
Definition: vf_gradfun.c:227