vf_libopencv.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * libopencv wrapper functions
24  */
25 
26 /* #define DEBUG */
27 
28 #include <opencv/cv.h>
29 #include <opencv/cxcore.h>
30 #include "libavutil/avstring.h"
31 #include "libavutil/common.h"
32 #include "libavutil/file.h"
33 #include "libavutil/opt.h"
34 #include "avfilter.h"
35 #include "formats.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
40 {
41  IplImage *tmpimg;
42  int depth, channels_nb;
43 
44  if (pixfmt == AV_PIX_FMT_GRAY8) { depth = IPL_DEPTH_8U; channels_nb = 1; }
45  else if (pixfmt == AV_PIX_FMT_BGRA) { depth = IPL_DEPTH_8U; channels_nb = 4; }
46  else if (pixfmt == AV_PIX_FMT_BGR24) { depth = IPL_DEPTH_8U; channels_nb = 3; }
47  else return;
48 
49  tmpimg = cvCreateImageHeader((CvSize){frame->width, frame->height}, depth, channels_nb);
50  *img = *tmpimg;
51  img->imageData = img->imageDataOrigin = frame->data[0];
52  img->dataOrder = IPL_DATA_ORDER_PIXEL;
53  img->origin = IPL_ORIGIN_TL;
54  img->widthStep = frame->linesize[0];
55 }
56 
57 static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
58 {
59  frame->linesize[0] = img->widthStep;
60  frame->data[0] = img->imageData;
61 }
62 
64 {
65  static const enum AVPixelFormat pix_fmts[] = {
67  };
68 
70  return 0;
71 }
72 
73 typedef struct {
74  const AVClass *class;
75  char *name;
76  char *params;
77  int (*init)(AVFilterContext *ctx, const char *args);
79  void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
80  void *priv;
81 } OCVContext;
82 
83 typedef struct {
84  int type;
85  int param1, param2;
86  double param3, param4;
88 
89 static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
90 {
91  OCVContext *ocv = ctx->priv;
92  SmoothContext *smooth = ocv->priv;
93  char type_str[128] = "gaussian";
94 
95  smooth->param1 = 3;
96  smooth->param2 = 0;
97  smooth->param3 = 0.0;
98  smooth->param4 = 0.0;
99 
100  if (args)
101  sscanf(args, "%127[^|]|%d|%d|%lf|%lf", type_str, &smooth->param1, &smooth->param2, &smooth->param3, &smooth->param4);
102 
103  if (!strcmp(type_str, "blur" )) smooth->type = CV_BLUR;
104  else if (!strcmp(type_str, "blur_no_scale")) smooth->type = CV_BLUR_NO_SCALE;
105  else if (!strcmp(type_str, "median" )) smooth->type = CV_MEDIAN;
106  else if (!strcmp(type_str, "gaussian" )) smooth->type = CV_GAUSSIAN;
107  else if (!strcmp(type_str, "bilateral" )) smooth->type = CV_BILATERAL;
108  else {
109  av_log(ctx, AV_LOG_ERROR, "Smoothing type '%s' unknown.\n", type_str);
110  return AVERROR(EINVAL);
111  }
112 
113  if (smooth->param1 < 0 || !(smooth->param1%2)) {
114  av_log(ctx, AV_LOG_ERROR,
115  "Invalid value '%d' for param1, it has to be a positive odd number\n",
116  smooth->param1);
117  return AVERROR(EINVAL);
118  }
119  if ((smooth->type == CV_BLUR || smooth->type == CV_BLUR_NO_SCALE || smooth->type == CV_GAUSSIAN) &&
120  (smooth->param2 < 0 || (smooth->param2 && !(smooth->param2%2)))) {
121  av_log(ctx, AV_LOG_ERROR,
122  "Invalid value '%d' for param2, it has to be zero or a positive odd number\n",
123  smooth->param2);
124  return AVERROR(EINVAL);
125  }
126 
127  av_log(ctx, AV_LOG_VERBOSE, "type:%s param1:%d param2:%d param3:%f param4:%f\n",
128  type_str, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
129  return 0;
130 }
131 
132 static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
133 {
134  OCVContext *ocv = ctx->priv;
135  SmoothContext *smooth = ocv->priv;
136  cvSmooth(inimg, outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4);
137 }
138 
139 static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename,
140  void *log_ctx)
141 {
142  uint8_t *buf, *p, *pend;
143  size_t size;
144  int ret, i, j, w;
145 
146  if ((ret = av_file_map(filename, &buf, &size, 0, log_ctx)) < 0)
147  return ret;
148 
149  /* prescan file to get the number of lines and the maximum width */
150  w = 0;
151  for (i = 0; i < size; i++) {
152  if (buf[i] == '\n') {
153  if (*rows == INT_MAX) {
154  av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of rows in the file\n");
155  return AVERROR_INVALIDDATA;
156  }
157  ++(*rows);
158  *cols = FFMAX(*cols, w);
159  w = 0;
160  } else if (w == INT_MAX) {
161  av_log(log_ctx, AV_LOG_ERROR, "Overflow on the number of columns in the file\n");
162  return AVERROR_INVALIDDATA;
163  }
164  w++;
165  }
166  if (*rows > (SIZE_MAX / sizeof(int) / *cols)) {
167  av_log(log_ctx, AV_LOG_ERROR, "File with size %dx%d is too big\n",
168  *rows, *cols);
169  return AVERROR_INVALIDDATA;
170  }
171  if (!(*values = av_mallocz(sizeof(int) * *rows * *cols)))
172  return AVERROR(ENOMEM);
173 
174  /* fill *values */
175  p = buf;
176  pend = buf + size-1;
177  for (i = 0; i < *rows; i++) {
178  for (j = 0;; j++) {
179  if (p > pend || *p == '\n') {
180  p++;
181  break;
182  } else
183  (*values)[*cols*i + j] = !!av_isgraph(*(p++));
184  }
185  }
186  av_file_unmap(buf, size);
187 
188 #ifdef DEBUG
189  {
190  char *line;
191  if (!(line = av_malloc(*cols + 1)))
192  return AVERROR(ENOMEM);
193  for (i = 0; i < *rows; i++) {
194  for (j = 0; j < *cols; j++)
195  line[j] = (*values)[i * *cols + j] ? '@' : ' ';
196  line[j] = 0;
197  av_log(log_ctx, AV_LOG_DEBUG, "%3d: %s\n", i, line);
198  }
199  av_free(line);
200  }
201 #endif
202 
203  return 0;
204 }
205 
206 static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx)
207 {
208  char shape_filename[128] = "", shape_str[32] = "rect";
209  int cols = 0, rows = 0, anchor_x = 0, anchor_y = 0, shape = CV_SHAPE_RECT;
210  int *values = NULL, ret;
211 
212  sscanf(buf, "%dx%d+%dx%d/%32[^=]=%127s", &cols, &rows, &anchor_x, &anchor_y, shape_str, shape_filename);
213 
214  if (!strcmp(shape_str, "rect" )) shape = CV_SHAPE_RECT;
215  else if (!strcmp(shape_str, "cross" )) shape = CV_SHAPE_CROSS;
216  else if (!strcmp(shape_str, "ellipse")) shape = CV_SHAPE_ELLIPSE;
217  else if (!strcmp(shape_str, "custom" )) {
218  shape = CV_SHAPE_CUSTOM;
219  if ((ret = read_shape_from_file(&cols, &rows, &values, shape_filename, log_ctx)) < 0)
220  return ret;
221  } else {
222  av_log(log_ctx, AV_LOG_ERROR,
223  "Shape unspecified or type '%s' unknown.\n", shape_str);
224  return AVERROR(EINVAL);
225  }
226 
227  if (rows <= 0 || cols <= 0) {
228  av_log(log_ctx, AV_LOG_ERROR,
229  "Invalid non-positive values for shape size %dx%d\n", cols, rows);
230  return AVERROR(EINVAL);
231  }
232 
233  if (anchor_x < 0 || anchor_y < 0 || anchor_x >= cols || anchor_y >= rows) {
234  av_log(log_ctx, AV_LOG_ERROR,
235  "Shape anchor %dx%d is not inside the rectangle with size %dx%d.\n",
236  anchor_x, anchor_y, cols, rows);
237  return AVERROR(EINVAL);
238  }
239 
240  *kernel = cvCreateStructuringElementEx(cols, rows, anchor_x, anchor_y, shape, values);
241  av_freep(&values);
242  if (!*kernel)
243  return AVERROR(ENOMEM);
244 
245  av_log(log_ctx, AV_LOG_VERBOSE, "Structuring element: w:%d h:%d x:%d y:%d shape:%s\n",
246  rows, cols, anchor_x, anchor_y, shape_str);
247  return 0;
248 }
249 
250 typedef struct {
252  IplConvKernel *kernel;
253 } DilateContext;
254 
255 static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
256 {
257  OCVContext *ocv = ctx->priv;
258  DilateContext *dilate = ocv->priv;
259  char default_kernel_str[] = "3x3+0x0/rect";
260  char *kernel_str;
261  const char *buf = args;
262  int ret;
263 
264  if (args)
265  kernel_str = av_get_token(&buf, "|");
266  else
267  kernel_str = av_strdup(default_kernel_str);
268  if (!kernel_str)
269  return AVERROR(ENOMEM);
270  if ((ret = parse_iplconvkernel(&dilate->kernel, kernel_str, ctx)) < 0)
271  return ret;
272  av_free(kernel_str);
273 
274  if (!buf || sscanf(buf, "|%d", &dilate->nb_iterations) != 1)
275  dilate->nb_iterations = 1;
276  av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations);
277  if (dilate->nb_iterations <= 0) {
278  av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n",
279  dilate->nb_iterations);
280  return AVERROR(EINVAL);
281  }
282  return 0;
283 }
284 
286 {
287  OCVContext *ocv = ctx->priv;
288  DilateContext *dilate = ocv->priv;
289 
290  cvReleaseStructuringElement(&dilate->kernel);
291 }
292 
293 static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
294 {
295  OCVContext *ocv = ctx->priv;
296  DilateContext *dilate = ocv->priv;
297  cvDilate(inimg, outimg, dilate->kernel, dilate->nb_iterations);
298 }
299 
300 static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
301 {
302  OCVContext *ocv = ctx->priv;
303  DilateContext *dilate = ocv->priv;
304  cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
305 }
306 
307 typedef struct {
308  const char *name;
309  size_t priv_size;
310  int (*init)(AVFilterContext *ctx, const char *args);
312  void (*end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg);
314 
318  { "smooth", sizeof(SmoothContext), smooth_init, NULL, smooth_end_frame_filter },
319 };
320 
321 static av_cold int init(AVFilterContext *ctx)
322 {
323  OCVContext *ocv = ctx->priv;
324  int i;
325 
326  if (!ocv->name) {
327  av_log(ctx, AV_LOG_ERROR, "No libopencv filter name specified\n");
328  return AVERROR(EINVAL);
329  }
330  for (i = 0; i < FF_ARRAY_ELEMS(ocv_filter_entries); i++) {
331  OCVFilterEntry *entry = &ocv_filter_entries[i];
332  if (!strcmp(ocv->name, entry->name)) {
333  ocv->init = entry->init;
334  ocv->uninit = entry->uninit;
335  ocv->end_frame_filter = entry->end_frame_filter;
336 
337  if (!(ocv->priv = av_mallocz(entry->priv_size)))
338  return AVERROR(ENOMEM);
339  return ocv->init(ctx, ocv->params);
340  }
341  }
342 
343  av_log(ctx, AV_LOG_ERROR, "No libopencv filter named '%s'\n", ocv->name);
344  return AVERROR(EINVAL);
345 }
346 
347 static av_cold void uninit(AVFilterContext *ctx)
348 {
349  OCVContext *ocv = ctx->priv;
350 
351  if (ocv->uninit)
352  ocv->uninit(ctx);
353  av_free(ocv->priv);
354 }
355 
356 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
357 {
358  AVFilterContext *ctx = inlink->dst;
359  OCVContext *ocv = ctx->priv;
360  AVFilterLink *outlink= inlink->dst->outputs[0];
361  AVFrame *out;
362  IplImage inimg, outimg;
363 
364  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
365  if (!out) {
366  av_frame_free(&in);
367  return AVERROR(ENOMEM);
368  }
369  av_frame_copy_props(out, in);
370 
371  fill_iplimage_from_frame(&inimg , in , inlink->format);
372  fill_iplimage_from_frame(&outimg, out, inlink->format);
373  ocv->end_frame_filter(ctx, &inimg, &outimg);
374  fill_frame_from_iplimage(out, &outimg, inlink->format);
375 
376  av_frame_free(&in);
377 
378  return ff_filter_frame(outlink, out);
379 }
380 
381 #define OFFSET(x) offsetof(OCVContext, x)
382 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
383 static const AVOption options[] = {
384  { "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
385  { "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
386  { NULL },
387 };
388 
389 static const AVClass ocv_class = {
390  .class_name = "ocv",
391  .item_name = av_default_item_name,
392  .option = options,
393  .version = LIBAVUTIL_VERSION_INT,
394 };
395 
397  {
398  .name = "default",
399  .type = AVMEDIA_TYPE_VIDEO,
400  .filter_frame = filter_frame,
401  },
402  { NULL }
403 };
404 
406  {
407  .name = "default",
408  .type = AVMEDIA_TYPE_VIDEO,
409  },
410  { NULL }
411 };
412 
414  .name = "ocv",
415  .description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
416 
417  .priv_size = sizeof(OCVContext),
418  .priv_class = &ocv_class,
419 
421  .init = init,
422  .uninit = uninit,
423 
424  .inputs = avfilter_vf_ocv_inputs,
425 
426  .outputs = avfilter_vf_ocv_outputs,
427 };
const char * name
Definition: avisynth_c.h:675
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
int(* init)(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:310
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int parse_iplconvkernel(IplConvKernel **kernel, char *buf, void *log_ctx)
Definition: vf_libopencv.c:206
char * name
Definition: vf_libopencv.c:75
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
void(* end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:79
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
av_default_item_name
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
static OCVFilterEntry ocv_filter_entries[]
Definition: vf_libopencv.c:315
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
#define FF_ARRAY_ELEMS(a)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
#define FLAGS
Definition: vf_libopencv.c:382
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:308
initialize sound pointer to middle of analysis window pend
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_libopencv.c:356
#define img
const char * name
Pad name.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
static void smooth_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:132
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
AVOptions.
static av_cold void dilate_uninit(AVFilterContext *ctx)
Definition: vf_libopencv.c:285
Misc file utilities.
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:545
frame
Definition: stft.m:14
#define OFFSET(x)
Definition: vf_libopencv.c:381
A filter pad used for either input or output.
int width
width and height of the video frame
Definition: frame.h:122
void av_file_unmap(uint8_t *bufptr, size_t size)
Unmap or free the buffer bufptr created by av_file_map().
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, int log_offset, void *log_ctx)
Read the file with name filename, and put its content in a newly allocated buffer or map it with mmap...
void(* uninit)(AVFilterContext *ctx)
Definition: vf_libopencv.c:311
static av_cold int init(AVFilterContext *ctx)
Definition: vf_libopencv.c:321
const char * name
Definition: vf_libopencv.c:308
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:99
void * priv
private data for use by the filter
Definition: avfilter.h:545
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int(* init)(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:77
#define FFMAX(a, b)
Definition: common.h:56
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:148
int depth
Definition: v4l.c:62
int size
static void dilate_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:293
#define AV_LOG_VERBOSE
Definition: log.h:157
ret
Definition: avfilter.c:821
char * params
Definition: vf_libopencv.c:76
static const AVFilterPad avfilter_vf_ocv_outputs[]
Definition: vf_libopencv.c:405
static void fill_iplimage_from_frame(IplImage *img, const AVFrame *frame, enum AVPixelFormat pixfmt)
Definition: vf_libopencv.c:39
AVS_Value args
Definition: avisynth_c.h:603
IplConvKernel * kernel
Definition: vf_libopencv.c:252
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:71
LIBAVUTIL_VERSION_INT
Definition: eval.c:55
void * priv
Definition: vf_libopencv.c:80
static const AVClass ocv_class
Definition: vf_libopencv.c:389
NULL
Definition: eval.c:55
static const AVFilterPad avfilter_vf_ocv_inputs[]
Definition: vf_libopencv.c:396
typedef void(RENAME(mix_any_func_type))
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:220
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static av_cold int dilate_init(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:255
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_libopencv.c:347
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
const char * name
filter name
Definition: avfilter.h:437
static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename, void *log_ctx)
Definition: vf_libopencv.c:139
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:300
const char const char * params
Definition: avisynth_c.h:675
Y , 8bpp.
Definition: pixfmt.h:76
int av_isgraph(int c)
Locale-independent conversion of ASCII isgraph.
Definition: avstring.c:293
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
common internal and external API header
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
void(* end_frame_filter)(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
Definition: vf_libopencv.c:312
static int query_formats(AVFilterContext *ctx)
Definition: vf_libopencv.c:63
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip please avoid problems through this extra level of scrutiny For cosmetics only commits you should e g by running git config global user name My Name git config global user email my email which is either set in your personal configuration file through git config core editor or set by one of the following environment VISUAL or EDITOR Log messages should be concise but descriptive Explain why you made a what you did will be obvious from the changes themselves most of the time Saying just bug fix or is bad Remember that people of varying skill levels look at and educate themselves while reading through your code Don t include filenames in log Git provides that information Possibly make the commit message have a descriptive first line
Definition: git-howto.txt:153
static av_cold int smooth_init(AVFilterContext *ctx, const char *args)
Definition: vf_libopencv.c:89
An instance of a filter.
Definition: avfilter.h:524
int height
Definition: frame.h:122
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static void fill_frame_from_iplimage(AVFrame *frame, const IplImage *img, enum AVPixelFormat pixfmt)
Definition: vf_libopencv.c:57
static const AVOption options[]
Definition: vf_libopencv.c:383
AVFilter avfilter_vf_ocv
Definition: vf_libopencv.c:413
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
void(* uninit)(AVFilterContext *ctx)
Definition: vf_libopencv.c:78
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame This method is called when a frame is wanted on an output For an it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values