annotate ffmpeg/libavfilter/vf_lut.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (c) 2011 Stefano Sabatini
yading@10 3 *
yading@10 4 * This file is part of FFmpeg.
yading@10 5 *
yading@10 6 * FFmpeg is free software; you can redistribute it and/or
yading@10 7 * modify it under the terms of the GNU Lesser General Public
yading@10 8 * License as published by the Free Software Foundation; either
yading@10 9 * version 2.1 of the License, or (at your option) any later version.
yading@10 10 *
yading@10 11 * FFmpeg is distributed in the hope that it will be useful,
yading@10 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 14 * Lesser General Public License for more details.
yading@10 15 *
yading@10 16 * You should have received a copy of the GNU Lesser General Public
yading@10 17 * License along with FFmpeg; if not, write to the Free Software
yading@10 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 19 */
yading@10 20
yading@10 21 /**
yading@10 22 * @file
yading@10 23 * Compute a look-up table for binding the input value to the output
yading@10 24 * value, and apply it to input video.
yading@10 25 */
yading@10 26
yading@10 27 #include "libavutil/common.h"
yading@10 28 #include "libavutil/eval.h"
yading@10 29 #include "libavutil/opt.h"
yading@10 30 #include "libavutil/pixdesc.h"
yading@10 31 #include "avfilter.h"
yading@10 32 #include "drawutils.h"
yading@10 33 #include "formats.h"
yading@10 34 #include "internal.h"
yading@10 35 #include "video.h"
yading@10 36
yading@10 37 static const char *const var_names[] = {
yading@10 38 "w", ///< width of the input video
yading@10 39 "h", ///< height of the input video
yading@10 40 "val", ///< input value for the pixel
yading@10 41 "maxval", ///< max value for the pixel
yading@10 42 "minval", ///< min value for the pixel
yading@10 43 "negval", ///< negated value
yading@10 44 "clipval",
yading@10 45 NULL
yading@10 46 };
yading@10 47
yading@10 48 enum var_name {
yading@10 49 VAR_W,
yading@10 50 VAR_H,
yading@10 51 VAR_VAL,
yading@10 52 VAR_MAXVAL,
yading@10 53 VAR_MINVAL,
yading@10 54 VAR_NEGVAL,
yading@10 55 VAR_CLIPVAL,
yading@10 56 VAR_VARS_NB
yading@10 57 };
yading@10 58
yading@10 59 typedef struct {
yading@10 60 const AVClass *class;
yading@10 61 uint8_t lut[4][256]; ///< lookup table for each component
yading@10 62 char *comp_expr_str[4];
yading@10 63 AVExpr *comp_expr[4];
yading@10 64 int hsub, vsub;
yading@10 65 double var_values[VAR_VARS_NB];
yading@10 66 int is_rgb, is_yuv;
yading@10 67 int step;
yading@10 68 int negate_alpha; /* only used by negate */
yading@10 69 } LutContext;
yading@10 70
yading@10 71 #define Y 0
yading@10 72 #define U 1
yading@10 73 #define V 2
yading@10 74 #define R 0
yading@10 75 #define G 1
yading@10 76 #define B 2
yading@10 77 #define A 3
yading@10 78
yading@10 79 #define OFFSET(x) offsetof(LutContext, x)
yading@10 80 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
yading@10 81
yading@10 82 static const AVOption options[] = {
yading@10 83 { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 84 { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 85 { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 86 { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 87 { "y", "set Y expression", OFFSET(comp_expr_str[Y]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 88 { "u", "set U expression", OFFSET(comp_expr_str[U]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 89 { "v", "set V expression", OFFSET(comp_expr_str[V]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 90 { "r", "set R expression", OFFSET(comp_expr_str[R]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 91 { "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 92 { "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 93 { "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
yading@10 94 { NULL },
yading@10 95 };
yading@10 96
yading@10 97 static av_cold void uninit(AVFilterContext *ctx)
yading@10 98 {
yading@10 99 LutContext *lut = ctx->priv;
yading@10 100 int i;
yading@10 101
yading@10 102 for (i = 0; i < 4; i++) {
yading@10 103 av_expr_free(lut->comp_expr[i]);
yading@10 104 lut->comp_expr[i] = NULL;
yading@10 105 av_freep(&lut->comp_expr_str[i]);
yading@10 106 }
yading@10 107 }
yading@10 108
yading@10 109 #define YUV_FORMATS \
yading@10 110 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, \
yading@10 111 AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, \
yading@10 112 AV_PIX_FMT_YUVA420P, \
yading@10 113 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
yading@10 114 AV_PIX_FMT_YUVJ440P
yading@10 115
yading@10 116 #define RGB_FORMATS \
yading@10 117 AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \
yading@10 118 AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \
yading@10 119 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24
yading@10 120
yading@10 121 static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE };
yading@10 122 static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE };
yading@10 123 static const enum AVPixelFormat all_pix_fmts[] = { RGB_FORMATS, YUV_FORMATS, AV_PIX_FMT_NONE };
yading@10 124
yading@10 125 static int query_formats(AVFilterContext *ctx)
yading@10 126 {
yading@10 127 LutContext *lut = ctx->priv;
yading@10 128
yading@10 129 const enum AVPixelFormat *pix_fmts = lut->is_rgb ? rgb_pix_fmts :
yading@10 130 lut->is_yuv ? yuv_pix_fmts : all_pix_fmts;
yading@10 131
yading@10 132 ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
yading@10 133 return 0;
yading@10 134 }
yading@10 135
yading@10 136 /**
yading@10 137 * Clip value val in the minval - maxval range.
yading@10 138 */
yading@10 139 static double clip(void *opaque, double val)
yading@10 140 {
yading@10 141 LutContext *lut = opaque;
yading@10 142 double minval = lut->var_values[VAR_MINVAL];
yading@10 143 double maxval = lut->var_values[VAR_MAXVAL];
yading@10 144
yading@10 145 return av_clip(val, minval, maxval);
yading@10 146 }
yading@10 147
yading@10 148 /**
yading@10 149 * Compute gamma correction for value val, assuming the minval-maxval
yading@10 150 * range, val is clipped to a value contained in the same interval.
yading@10 151 */
yading@10 152 static double compute_gammaval(void *opaque, double gamma)
yading@10 153 {
yading@10 154 LutContext *lut = opaque;
yading@10 155 double val = lut->var_values[VAR_CLIPVAL];
yading@10 156 double minval = lut->var_values[VAR_MINVAL];
yading@10 157 double maxval = lut->var_values[VAR_MAXVAL];
yading@10 158
yading@10 159 return pow((val-minval)/(maxval-minval), gamma) * (maxval-minval)+minval;
yading@10 160 }
yading@10 161
yading@10 162 static double (* const funcs1[])(void *, double) = {
yading@10 163 (void *)clip,
yading@10 164 (void *)compute_gammaval,
yading@10 165 NULL
yading@10 166 };
yading@10 167
yading@10 168 static const char * const funcs1_names[] = {
yading@10 169 "clip",
yading@10 170 "gammaval",
yading@10 171 NULL
yading@10 172 };
yading@10 173
yading@10 174 static int config_props(AVFilterLink *inlink)
yading@10 175 {
yading@10 176 AVFilterContext *ctx = inlink->dst;
yading@10 177 LutContext *lut = ctx->priv;
yading@10 178 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
yading@10 179 uint8_t rgba_map[4]; /* component index -> RGBA color index map */
yading@10 180 int min[4], max[4];
yading@10 181 int val, color, ret;
yading@10 182
yading@10 183 lut->hsub = desc->log2_chroma_w;
yading@10 184 lut->vsub = desc->log2_chroma_h;
yading@10 185
yading@10 186 lut->var_values[VAR_W] = inlink->w;
yading@10 187 lut->var_values[VAR_H] = inlink->h;
yading@10 188
yading@10 189 switch (inlink->format) {
yading@10 190 case AV_PIX_FMT_YUV410P:
yading@10 191 case AV_PIX_FMT_YUV411P:
yading@10 192 case AV_PIX_FMT_YUV420P:
yading@10 193 case AV_PIX_FMT_YUV422P:
yading@10 194 case AV_PIX_FMT_YUV440P:
yading@10 195 case AV_PIX_FMT_YUV444P:
yading@10 196 case AV_PIX_FMT_YUVA420P:
yading@10 197 min[Y] = min[U] = min[V] = 16;
yading@10 198 max[Y] = 235;
yading@10 199 max[U] = max[V] = 240;
yading@10 200 min[A] = 0; max[A] = 255;
yading@10 201 break;
yading@10 202 default:
yading@10 203 min[0] = min[1] = min[2] = min[3] = 0;
yading@10 204 max[0] = max[1] = max[2] = max[3] = 255;
yading@10 205 }
yading@10 206
yading@10 207 lut->is_yuv = lut->is_rgb = 0;
yading@10 208 if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1;
yading@10 209 else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1;
yading@10 210
yading@10 211 if (lut->is_rgb) {
yading@10 212 ff_fill_rgba_map(rgba_map, inlink->format);
yading@10 213 lut->step = av_get_bits_per_pixel(desc) >> 3;
yading@10 214 }
yading@10 215
yading@10 216 for (color = 0; color < desc->nb_components; color++) {
yading@10 217 double res;
yading@10 218 int comp = lut->is_rgb ? rgba_map[color] : color;
yading@10 219
yading@10 220 /* create the parsed expression */
yading@10 221 ret = av_expr_parse(&lut->comp_expr[color], lut->comp_expr_str[color],
yading@10 222 var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx);
yading@10 223 if (ret < 0) {
yading@10 224 av_log(ctx, AV_LOG_ERROR,
yading@10 225 "Error when parsing the expression '%s' for the component %d and color %d.\n",
yading@10 226 lut->comp_expr_str[comp], comp, color);
yading@10 227 return AVERROR(EINVAL);
yading@10 228 }
yading@10 229
yading@10 230 /* compute the lut */
yading@10 231 lut->var_values[VAR_MAXVAL] = max[color];
yading@10 232 lut->var_values[VAR_MINVAL] = min[color];
yading@10 233
yading@10 234 for (val = 0; val < 256; val++) {
yading@10 235 lut->var_values[VAR_VAL] = val;
yading@10 236 lut->var_values[VAR_CLIPVAL] = av_clip(val, min[color], max[color]);
yading@10 237 lut->var_values[VAR_NEGVAL] =
yading@10 238 av_clip(min[color] + max[color] - lut->var_values[VAR_VAL],
yading@10 239 min[color], max[color]);
yading@10 240
yading@10 241 res = av_expr_eval(lut->comp_expr[color], lut->var_values, lut);
yading@10 242 if (isnan(res)) {
yading@10 243 av_log(ctx, AV_LOG_ERROR,
yading@10 244 "Error when evaluating the expression '%s' for the value %d for the component %d.\n",
yading@10 245 lut->comp_expr_str[color], val, comp);
yading@10 246 return AVERROR(EINVAL);
yading@10 247 }
yading@10 248 lut->lut[comp][val] = av_clip((int)res, min[color], max[color]);
yading@10 249 av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]);
yading@10 250 }
yading@10 251 }
yading@10 252
yading@10 253 return 0;
yading@10 254 }
yading@10 255
yading@10 256 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
yading@10 257 {
yading@10 258 AVFilterContext *ctx = inlink->dst;
yading@10 259 LutContext *lut = ctx->priv;
yading@10 260 AVFilterLink *outlink = ctx->outputs[0];
yading@10 261 AVFrame *out;
yading@10 262 uint8_t *inrow, *outrow, *inrow0, *outrow0;
yading@10 263 int i, j, plane, direct = 0;
yading@10 264
yading@10 265 if (av_frame_is_writable(in)) {
yading@10 266 direct = 1;
yading@10 267 out = in;
yading@10 268 } else {
yading@10 269 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
yading@10 270 if (!out) {
yading@10 271 av_frame_free(&in);
yading@10 272 return AVERROR(ENOMEM);
yading@10 273 }
yading@10 274 av_frame_copy_props(out, in);
yading@10 275 }
yading@10 276
yading@10 277 if (lut->is_rgb) {
yading@10 278 /* packed */
yading@10 279 inrow0 = in ->data[0];
yading@10 280 outrow0 = out->data[0];
yading@10 281
yading@10 282 for (i = 0; i < in->height; i ++) {
yading@10 283 int w = inlink->w;
yading@10 284 const uint8_t (*tab)[256] = (const uint8_t (*)[256])lut->lut;
yading@10 285 inrow = inrow0;
yading@10 286 outrow = outrow0;
yading@10 287 for (j = 0; j < w; j++) {
yading@10 288 switch (lut->step) {
yading@10 289 case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
yading@10 290 case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
yading@10 291 case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
yading@10 292 default: outrow[0] = tab[0][inrow[0]];
yading@10 293 }
yading@10 294 outrow += lut->step;
yading@10 295 inrow += lut->step;
yading@10 296 }
yading@10 297 inrow0 += in ->linesize[0];
yading@10 298 outrow0 += out->linesize[0];
yading@10 299 }
yading@10 300 } else {
yading@10 301 /* planar */
yading@10 302 for (plane = 0; plane < 4 && in->data[plane]; plane++) {
yading@10 303 int vsub = plane == 1 || plane == 2 ? lut->vsub : 0;
yading@10 304 int hsub = plane == 1 || plane == 2 ? lut->hsub : 0;
yading@10 305
yading@10 306 inrow = in ->data[plane];
yading@10 307 outrow = out->data[plane];
yading@10 308
yading@10 309 for (i = 0; i < (in->height + (1<<vsub) - 1)>>vsub; i ++) {
yading@10 310 const uint8_t *tab = lut->lut[plane];
yading@10 311 int w = (inlink->w + (1<<hsub) - 1)>>hsub;
yading@10 312 for (j = 0; j < w; j++)
yading@10 313 outrow[j] = tab[inrow[j]];
yading@10 314 inrow += in ->linesize[plane];
yading@10 315 outrow += out->linesize[plane];
yading@10 316 }
yading@10 317 }
yading@10 318 }
yading@10 319
yading@10 320 if (!direct)
yading@10 321 av_frame_free(&in);
yading@10 322
yading@10 323 return ff_filter_frame(outlink, out);
yading@10 324 }
yading@10 325
yading@10 326 static const AVFilterPad inputs[] = {
yading@10 327 { .name = "default",
yading@10 328 .type = AVMEDIA_TYPE_VIDEO,
yading@10 329 .filter_frame = filter_frame,
yading@10 330 .config_props = config_props,
yading@10 331 },
yading@10 332 { .name = NULL}
yading@10 333 };
yading@10 334 static const AVFilterPad outputs[] = {
yading@10 335 { .name = "default",
yading@10 336 .type = AVMEDIA_TYPE_VIDEO, },
yading@10 337 { .name = NULL}
yading@10 338 };
yading@10 339
yading@10 340 #define DEFINE_LUT_FILTER(name_, description_) \
yading@10 341 AVFilter avfilter_vf_##name_ = { \
yading@10 342 .name = #name_, \
yading@10 343 .description = NULL_IF_CONFIG_SMALL(description_), \
yading@10 344 .priv_size = sizeof(LutContext), \
yading@10 345 .priv_class = &name_ ## _class, \
yading@10 346 \
yading@10 347 .init = name_##_init, \
yading@10 348 .uninit = uninit, \
yading@10 349 .query_formats = query_formats, \
yading@10 350 \
yading@10 351 .inputs = inputs, \
yading@10 352 .outputs = outputs, \
yading@10 353 }
yading@10 354
yading@10 355 #if CONFIG_LUT_FILTER
yading@10 356
yading@10 357 #define lut_options options
yading@10 358 AVFILTER_DEFINE_CLASS(lut);
yading@10 359
yading@10 360 static int lut_init(AVFilterContext *ctx)
yading@10 361 {
yading@10 362 return 0;
yading@10 363 }
yading@10 364
yading@10 365 DEFINE_LUT_FILTER(lut, "Compute and apply a lookup table to the RGB/YUV input video.");
yading@10 366 #endif
yading@10 367
yading@10 368 #if CONFIG_LUTYUV_FILTER
yading@10 369
yading@10 370 #define lutyuv_options options
yading@10 371 AVFILTER_DEFINE_CLASS(lutyuv);
yading@10 372
yading@10 373 static int lutyuv_init(AVFilterContext *ctx)
yading@10 374 {
yading@10 375 LutContext *lut = ctx->priv;
yading@10 376
yading@10 377 lut->is_yuv = 1;
yading@10 378
yading@10 379 return 0;
yading@10 380 }
yading@10 381
yading@10 382 DEFINE_LUT_FILTER(lutyuv, "Compute and apply a lookup table to the YUV input video.");
yading@10 383 #endif
yading@10 384
yading@10 385 #if CONFIG_LUTRGB_FILTER
yading@10 386
yading@10 387 #define lutrgb_options options
yading@10 388 AVFILTER_DEFINE_CLASS(lutrgb);
yading@10 389
yading@10 390 static int lutrgb_init(AVFilterContext *ctx)
yading@10 391 {
yading@10 392 LutContext *lut = ctx->priv;
yading@10 393
yading@10 394 lut->is_rgb = 1;
yading@10 395
yading@10 396 return 0;
yading@10 397 }
yading@10 398
yading@10 399 DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input video.");
yading@10 400 #endif
yading@10 401
yading@10 402 #if CONFIG_NEGATE_FILTER
yading@10 403
yading@10 404 static const AVOption negate_options[] = {
yading@10 405 { "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, .flags = FLAGS },
yading@10 406 { NULL },
yading@10 407 };
yading@10 408
yading@10 409 AVFILTER_DEFINE_CLASS(negate);
yading@10 410
yading@10 411 static int negate_init(AVFilterContext *ctx)
yading@10 412 {
yading@10 413 LutContext *lut = ctx->priv;
yading@10 414 int i;
yading@10 415
yading@10 416 av_log(ctx, AV_LOG_DEBUG, "negate_alpha:%d\n", lut->negate_alpha);
yading@10 417
yading@10 418 for (i = 0; i < 4; i++) {
yading@10 419 lut->comp_expr_str[i] = av_strdup((i == 3 && !lut->negate_alpha) ?
yading@10 420 "val" : "negval");
yading@10 421 if (!lut->comp_expr_str[i]) {
yading@10 422 uninit(ctx);
yading@10 423 return AVERROR(ENOMEM);
yading@10 424 }
yading@10 425 }
yading@10 426
yading@10 427 return 0;
yading@10 428 }
yading@10 429
yading@10 430 DEFINE_LUT_FILTER(negate, "Negate input video.");
yading@10 431
yading@10 432 #endif