vf_scale.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Bobby Bingham
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * scale video filter
24  */
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/internal.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/opt.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/avassert.h"
42 #include "libswscale/swscale.h"
43 
44 static const char *const var_names[] = {
45  "in_w", "iw",
46  "in_h", "ih",
47  "out_w", "ow",
48  "out_h", "oh",
49  "a",
50  "sar",
51  "dar",
52  "hsub",
53  "vsub",
54  NULL
55 };
56 
57 enum var_name {
68 };
69 
70 typedef struct {
71  const AVClass *class;
72  struct SwsContext *sws; ///< software scaler context
73  struct SwsContext *isws[2]; ///< software scaler context for interlaced material
74 
75  /**
76  * New dimensions. Special values are:
77  * 0 = original width/height
78  * -1 = keep original aspect
79  */
80  int w, h;
81  char *size_str;
82  unsigned int flags; ///sws flags
83 
84  int hsub, vsub; ///< chroma subsampling
85  int slice_y; ///< top of current output slice
86  int input_is_pal; ///< set to 1 if the input format is paletted
87  int output_is_pal; ///< set to 1 if the output format is paletted
89 
90  char *w_expr; ///< width expression string
91  char *h_expr; ///< height expression string
92  char *flags_str;
93 } ScaleContext;
94 
95 static av_cold int init(AVFilterContext *ctx)
96 {
97  ScaleContext *scale = ctx->priv;
98  int ret;
99 
100  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
101  av_log(ctx, AV_LOG_ERROR,
102  "Size and width/height expressions cannot be set at the same time.\n");
103  return AVERROR(EINVAL);
104  }
105 
106  if (scale->w_expr && !scale->h_expr)
107  FFSWAP(char *, scale->w_expr, scale->size_str);
108 
109  if (scale->size_str) {
110  char buf[32];
111  if ((ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str)) < 0) {
112  av_log(ctx, AV_LOG_ERROR,
113  "Invalid size '%s'\n", scale->size_str);
114  return ret;
115  }
116  snprintf(buf, sizeof(buf)-1, "%d", scale->w);
117  av_opt_set(scale, "w", buf, 0);
118  snprintf(buf, sizeof(buf)-1, "%d", scale->h);
119  av_opt_set(scale, "h", buf, 0);
120  }
121  if (!scale->w_expr)
122  av_opt_set(scale, "w", "iw", 0);
123  if (!scale->h_expr)
124  av_opt_set(scale, "h", "ih", 0);
125 
126  av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n",
127  scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced);
128 
129  scale->flags = SWS_BILINEAR;
130 
131  if (scale->flags_str) {
132  const AVClass *class = sws_get_class();
133  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
135  int ret = av_opt_eval_flags(&class, o, scale->flags_str, &scale->flags);
136  if (ret < 0)
137  return ret;
138  }
139 
140  return 0;
141 }
142 
143 static av_cold void uninit(AVFilterContext *ctx)
144 {
145  ScaleContext *scale = ctx->priv;
146  sws_freeContext(scale->sws);
147  sws_freeContext(scale->isws[0]);
148  sws_freeContext(scale->isws[1]);
149  scale->sws = NULL;
150  av_opt_free(scale);
151 }
152 
154 {
156  enum AVPixelFormat pix_fmt;
157  int ret;
158 
159  if (ctx->inputs[0]) {
160  formats = NULL;
161  for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
162  if ( sws_isSupportedInput(pix_fmt)
163  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
164  ff_formats_unref(&formats);
165  return ret;
166  }
167  ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
168  }
169  if (ctx->outputs[0]) {
170  formats = NULL;
171  for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++)
172  if ( (sws_isSupportedOutput(pix_fmt) || pix_fmt == AV_PIX_FMT_PAL8)
173  && (ret = ff_add_format(&formats, pix_fmt)) < 0) {
174  ff_formats_unref(&formats);
175  return ret;
176  }
177  ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
178  }
179 
180  return 0;
181 }
182 
183 static int config_props(AVFilterLink *outlink)
184 {
185  AVFilterContext *ctx = outlink->src;
186  AVFilterLink *inlink = outlink->src->inputs[0];
187  enum AVPixelFormat outfmt = outlink->format;
188  ScaleContext *scale = ctx->priv;
189  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
190  int64_t w, h;
191  double var_values[VARS_NB], res;
192  char *expr;
193  int ret;
194 
195  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
196  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
197  var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
198  var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
199  var_values[VAR_A] = (double) inlink->w / inlink->h;
200  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
201  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
202  var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
203  var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
204  var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
205 
206  /* evaluate width and height */
207  av_expr_parse_and_eval(&res, (expr = scale->w_expr),
208  var_names, var_values,
209  NULL, NULL, NULL, NULL, NULL, 0, ctx);
210  scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
211  if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
212  var_names, var_values,
213  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
214  goto fail;
215  scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
216  /* evaluate again the width, as it may depend on the output height */
217  if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
218  var_names, var_values,
219  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
220  goto fail;
221  scale->w = res;
222 
223  w = scale->w;
224  h = scale->h;
225 
226  /* sanity check params */
227  if (w < -1 || h < -1) {
228  av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
229  return AVERROR(EINVAL);
230  }
231  if (w == -1 && h == -1)
232  scale->w = scale->h = 0;
233 
234  if (!(w = scale->w))
235  w = inlink->w;
236  if (!(h = scale->h))
237  h = inlink->h;
238  if (w == -1)
239  w = av_rescale(h, inlink->w, inlink->h);
240  if (h == -1)
241  h = av_rescale(w, inlink->h, inlink->w);
242 
243  if (w > INT_MAX || h > INT_MAX ||
244  (h * inlink->w) > INT_MAX ||
245  (w * inlink->h) > INT_MAX)
246  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
247 
248  outlink->w = w;
249  outlink->h = h;
250 
251  /* TODO: make algorithm configurable */
252 
253  scale->input_is_pal = desc->flags & PIX_FMT_PAL ||
254  desc->flags & PIX_FMT_PSEUDOPAL;
255  if (outfmt == AV_PIX_FMT_PAL8) outfmt = AV_PIX_FMT_BGR8;
256  scale->output_is_pal = av_pix_fmt_desc_get(outfmt)->flags & PIX_FMT_PAL ||
258 
259  if (scale->sws)
260  sws_freeContext(scale->sws);
261  if (inlink->w == outlink->w && inlink->h == outlink->h &&
262  inlink->format == outlink->format)
263  scale->sws = NULL;
264  else {
265  scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format,
266  outlink->w, outlink->h, outfmt,
267  scale->flags, NULL, NULL, NULL);
268  if (scale->isws[0])
269  sws_freeContext(scale->isws[0]);
270  scale->isws[0] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
271  outlink->w, outlink->h/2, outfmt,
272  scale->flags, NULL, NULL, NULL);
273  if (scale->isws[1])
274  sws_freeContext(scale->isws[1]);
275  scale->isws[1] = sws_getContext(inlink ->w, inlink ->h/2, inlink ->format,
276  outlink->w, outlink->h/2, outfmt,
277  scale->flags, NULL, NULL, NULL);
278  if (!scale->sws || !scale->isws[0] || !scale->isws[1])
279  return AVERROR(EINVAL);
280  }
281 
282  if (inlink->sample_aspect_ratio.num){
283  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
284  } else
285  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
286 
287  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d fmt:%s sar:%d/%d flags:0x%0x\n",
288  inlink ->w, inlink ->h, av_get_pix_fmt_name( inlink->format),
290  outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
291  outlink->sample_aspect_ratio.num, outlink->sample_aspect_ratio.den,
292  scale->flags);
293  return 0;
294 
295 fail:
297  "Error when evaluating the expression '%s'.\n"
298  "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
299  expr, scale->w_expr, scale->h_expr);
300  return ret;
301 }
302 
303 static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
304 {
305  ScaleContext *scale = link->dst->priv;
306  const uint8_t *in[4];
307  uint8_t *out[4];
308  int in_stride[4],out_stride[4];
309  int i;
310 
311  for(i=0; i<4; i++){
312  int vsub= ((i+1)&2) ? scale->vsub : 0;
313  in_stride[i] = cur_pic->linesize[i] * mul;
314  out_stride[i] = out_buf->linesize[i] * mul;
315  in[i] = cur_pic->data[i] + ((y>>vsub)+field) * cur_pic->linesize[i];
316  out[i] = out_buf->data[i] + field * out_buf->linesize[i];
317  }
318  if(scale->input_is_pal)
319  in[1] = cur_pic->data[1];
320  if(scale->output_is_pal)
321  out[1] = out_buf->data[1];
322 
323  return sws_scale(sws, in, in_stride, y/mul, h,
324  out,out_stride);
325 }
326 
328 {
329  ScaleContext *scale = link->dst->priv;
330  AVFilterLink *outlink = link->dst->outputs[0];
331  AVFrame *out;
332  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
333  char buf[32];
334 
335  if( in->width != link->w
336  || in->height != link->h
337  || in->format != link->format) {
338  int ret;
339  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
340  av_opt_set(scale, "w", buf, 0);
341  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
342  av_opt_set(scale, "h", buf, 0);
343 
344  link->dst->inputs[0]->format = in->format;
345  link->dst->inputs[0]->w = in->width;
346  link->dst->inputs[0]->h = in->height;
347 
348  if ((ret = config_props(outlink)) < 0)
349  return ret;
350  }
351 
352  if (!scale->sws)
353  return ff_filter_frame(outlink, in);
354 
355  scale->hsub = desc->log2_chroma_w;
356  scale->vsub = desc->log2_chroma_h;
357 
358  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
359  if (!out) {
360  av_frame_free(&in);
361  return AVERROR(ENOMEM);
362  }
363 
364  av_frame_copy_props(out, in);
365  out->width = outlink->w;
366  out->height = outlink->h;
367 
368  if(scale->output_is_pal)
369  avpriv_set_systematic_pal2((uint32_t*)out->data[1], outlink->format == AV_PIX_FMT_PAL8 ? AV_PIX_FMT_BGR8 : outlink->format);
370 
372  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
373  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
374  INT_MAX);
375 
376  if(scale->interlaced>0 || (scale->interlaced<0 && in->interlaced_frame)){
377  scale_slice(link, out, in, scale->isws[0], 0, (link->h+1)/2, 2, 0);
378  scale_slice(link, out, in, scale->isws[1], 0, link->h /2, 2, 1);
379  }else{
380  scale_slice(link, out, in, scale->sws, 0, link->h, 1, 0);
381  }
382 
383  av_frame_free(&in);
384  return ff_filter_frame(outlink, out);
385 }
386 
387 #define OFFSET(x) offsetof(ScaleContext, x)
388 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
389 
390 static const AVOption scale_options[] = {
391  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
392  { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
393  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
394  { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
395  { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS },
396  { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 1, FLAGS },
397  { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
398  { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS },
399  { NULL },
400 };
401 
403 
405  {
406  .name = "default",
407  .type = AVMEDIA_TYPE_VIDEO,
408  .filter_frame = filter_frame,
409  },
410  { NULL }
411 };
412 
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_props,
418  },
419  { NULL }
420 };
421 
423  .name = "scale",
424  .description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
425 
426  .init = init,
427  .uninit = uninit,
428 
429  .query_formats = query_formats,
430 
431  .priv_size = sizeof(ScaleContext),
432  .priv_class = &scale_class,
433 
434  .inputs = avfilter_vf_scale_inputs,
435  .outputs = avfilter_vf_scale_outputs,
436 };
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:424
#define FLAGS
Definition: vf_scale.c:388
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVOption.
Definition: opt.h:251
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:131
AVFILTER_DEFINE_CLASS(scale)
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:117
external API header
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
Definition: vf_scale.c:303
int num
numerator
Definition: rational.h:44
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
int hsub
sws flags
Definition: vf_scale.c:84
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:143
int vsub
chroma subsampling
Definition: vf_scale.c:84
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
int output_is_pal
set to 1 if the output format is paletted
Definition: vf_scale.c:87
output residual component w
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:532
char * w_expr
width expression string
Definition: vf_scale.c:90
static int query_formats(AVFilterContext *ctx)
Definition: vf_scale.c:153
uint8_t
it can be given away to ff_start_frame *A reference passed to ff_filter_frame(or the deprecated ff_start_frame) is given away and must no longer be used.*A reference created with avfilter_ref_buffer belongs to the code that created it.*A reference obtained with ff_get_video_buffer or ff_get_audio_buffer belongs to the code that requested it.*A reference given as return value by the get_video_buffer or get_audio_buffer method is given away and must no longer be used.Link reference fields---------------------The AVFilterLink structure has a few AVFilterBufferRef fields.The cur_buf and out_buf were used with the deprecated start_frame/draw_slice/end_frame API and should no longer be used.src_buf
#define av_cold
Definition: attributes.h:78
char * h_expr
height expression string
Definition: vf_scale.c:91
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
AVOptions.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:798
#define NAN
Definition: math.h:7
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
enum AVPixelFormat pix_fmt
Definition: v4l.c:63
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
external API header
A filter pad used for either input or output.
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:250
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:701
int width
width and height of the video frame
Definition: frame.h:122
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
static const AVOption scale_options[]
Definition: vf_scale.c:390
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
Definition: avfilter.h:545
simple assert() macros that are a bit more flexible than ISO C assert().
#define PIX_FMT_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:90
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:150
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:344
AVFilter avfilter_vf_scale
Definition: vf_scale.c:422
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
int w
New dimensions.
Definition: vf_scale.c:80
static int config_props(AVFilterLink *outlink)
Definition: vf_scale.c:183
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
int sws_isSupportedInput(enum AVPixelFormat pix_fmt)
Return a positive value if pix_fmt is a supported input format, 0 otherwise.
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1223
common internal API header
#define AV_LOG_VERBOSE
Definition: log.h:157
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_scale.c:143
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:118
static const AVFilterPad avfilter_vf_scale_outputs[]
Definition: vf_scale.c:413
var_name
ret
Definition: avfilter.c:821
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
int interlaced
Definition: vf_scale.c:88
char * flags_str
Definition: vf_scale.c:92
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:134
NULL
Definition: eval.c:55
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:87
#define OFFSET(x)
Definition: vf_scale.c:387
void ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:432
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
int sws_isSupportedOutput(enum AVPixelFormat pix_fmt)
Return a positive value if pix_fmt is a supported output format, 0 otherwise.
struct SwsContext * sws
software scaler context
Definition: vf_scale.c:72
uint8_t flags
Definition: pixdesc.h:76
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
static const AVFilterPad avfilter_vf_scale_inputs[]
Definition: vf_scale.c:404
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:154
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
Describe the class of an AVClass context structure.
Definition: log.h:50
Filter definition.
Definition: avfilter.h:436
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
static av_cold int init(AVFilterContext *ctx)
Definition: vf_scale.c:95
void ff_formats_unref(AVFilterFormats **ref)
If *ref is non-NULL, remove *ref as a reference to the format list it currently points to...
Definition: formats.c:468
const char * name
filter name
Definition: avfilter.h:437
#define snprintf
Definition: snprintf.h:34
int input_is_pal
set to 1 if the input format is paletted
Definition: vf_scale.c:86
static const char *const var_names[]
Definition: vf_scale.c:44
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
unsigned int flags
Definition: vf_scale.c:82
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:539
struct SwsContext * isws[2]
software scaler context for interlaced material
Definition: vf_scale.c:73
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale.c:327
void av_opt_free(void *obj)
Free all string and binary options in obj.
Definition: opt.c:1194
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common formats
Definition: swscale.txt:33
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
#define SWS_BILINEAR
Definition: swscale.h:59
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poi...
Definition: opt.h:547
char * size_str
Definition: vf_scale.c:81
Same thing on a dB scale
int den
denominator
Definition: rational.h:45
function y
Definition: D.m:1
int slice_y
top of current output slice
Definition: vf_scale.c:85
Definition: vf_scale.c:62
#define PIX_FMT_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:100
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:524
number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of...
Definition: pixfmt.h:237
int height
Definition: frame.h:122
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
#define FFSWAP(type, a, b)
Definition: common.h:61
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:1700
internal API functions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:252
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
simple arithmetic expression evaluator