annotate ffmpeg/libavfilter/vf_edgedetect.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (c) 2012 Clément Bœsch
yading@10 3 *
yading@10 4 * This file is part of FFmpeg.
yading@10 5 *
yading@10 6 * FFmpeg is free software; you can redistribute it and/or
yading@10 7 * modify it under the terms of the GNU Lesser General Public
yading@10 8 * License as published by the Free Software Foundation; either
yading@10 9 * version 2.1 of the License, or (at your option) any later version.
yading@10 10 *
yading@10 11 * FFmpeg is distributed in the hope that it will be useful,
yading@10 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 14 * Lesser General Public License for more details.
yading@10 15 *
yading@10 16 * You should have received a copy of the GNU Lesser General Public
yading@10 17 * License along with FFmpeg; if not, write to the Free Software
yading@10 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 19 */
yading@10 20
yading@10 21 /**
yading@10 22 * @file
yading@10 23 * Edge detection filter
yading@10 24 *
yading@10 25 * @see https://en.wikipedia.org/wiki/Canny_edge_detector
yading@10 26 */
yading@10 27
yading@10 28 #include "libavutil/opt.h"
yading@10 29 #include "avfilter.h"
yading@10 30 #include "formats.h"
yading@10 31 #include "internal.h"
yading@10 32 #include "video.h"
yading@10 33
yading@10 34 typedef struct {
yading@10 35 const AVClass *class;
yading@10 36 uint8_t *tmpbuf;
yading@10 37 uint16_t *gradients;
yading@10 38 char *directions;
yading@10 39 double low, high;
yading@10 40 uint8_t low_u8, high_u8;
yading@10 41 } EdgeDetectContext;
yading@10 42
yading@10 43 #define OFFSET(x) offsetof(EdgeDetectContext, x)
yading@10 44 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
yading@10 45 static const AVOption edgedetect_options[] = {
yading@10 46 { "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
yading@10 47 { "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
yading@10 48 { NULL },
yading@10 49 };
yading@10 50
yading@10 51 AVFILTER_DEFINE_CLASS(edgedetect);
yading@10 52
yading@10 53 static av_cold int init(AVFilterContext *ctx)
yading@10 54 {
yading@10 55 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 56
yading@10 57 edgedetect->low_u8 = edgedetect->low * 255. + .5;
yading@10 58 edgedetect->high_u8 = edgedetect->high * 255. + .5;
yading@10 59 return 0;
yading@10 60 }
yading@10 61
yading@10 62 static int query_formats(AVFilterContext *ctx)
yading@10 63 {
yading@10 64 static const enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE};
yading@10 65 ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
yading@10 66 return 0;
yading@10 67 }
yading@10 68
yading@10 69 static int config_props(AVFilterLink *inlink)
yading@10 70 {
yading@10 71 AVFilterContext *ctx = inlink->dst;
yading@10 72 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 73
yading@10 74 edgedetect->tmpbuf = av_malloc(inlink->w * inlink->h);
yading@10 75 edgedetect->gradients = av_calloc(inlink->w * inlink->h, sizeof(*edgedetect->gradients));
yading@10 76 edgedetect->directions = av_malloc(inlink->w * inlink->h);
yading@10 77 if (!edgedetect->tmpbuf || !edgedetect->gradients || !edgedetect->directions)
yading@10 78 return AVERROR(ENOMEM);
yading@10 79 return 0;
yading@10 80 }
yading@10 81
yading@10 82 static void gaussian_blur(AVFilterContext *ctx, int w, int h,
yading@10 83 uint8_t *dst, int dst_linesize,
yading@10 84 const uint8_t *src, int src_linesize)
yading@10 85 {
yading@10 86 int i, j;
yading@10 87
yading@10 88 memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
yading@10 89 memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
yading@10 90 for (j = 2; j < h - 2; j++) {
yading@10 91 dst[0] = src[0];
yading@10 92 dst[1] = src[1];
yading@10 93 for (i = 2; i < w - 2; i++) {
yading@10 94 /* Gaussian mask of size 5x5 with sigma = 1.4 */
yading@10 95 dst[i] = ((src[-2*src_linesize + i-2] + src[2*src_linesize + i-2]) * 2
yading@10 96 + (src[-2*src_linesize + i-1] + src[2*src_linesize + i-1]) * 4
yading@10 97 + (src[-2*src_linesize + i ] + src[2*src_linesize + i ]) * 5
yading@10 98 + (src[-2*src_linesize + i+1] + src[2*src_linesize + i+1]) * 4
yading@10 99 + (src[-2*src_linesize + i+2] + src[2*src_linesize + i+2]) * 2
yading@10 100
yading@10 101 + (src[ -src_linesize + i-2] + src[ src_linesize + i-2]) * 4
yading@10 102 + (src[ -src_linesize + i-1] + src[ src_linesize + i-1]) * 9
yading@10 103 + (src[ -src_linesize + i ] + src[ src_linesize + i ]) * 12
yading@10 104 + (src[ -src_linesize + i+1] + src[ src_linesize + i+1]) * 9
yading@10 105 + (src[ -src_linesize + i+2] + src[ src_linesize + i+2]) * 4
yading@10 106
yading@10 107 + src[i-2] * 5
yading@10 108 + src[i-1] * 12
yading@10 109 + src[i ] * 15
yading@10 110 + src[i+1] * 12
yading@10 111 + src[i+2] * 5) / 159;
yading@10 112 }
yading@10 113 dst[i ] = src[i ];
yading@10 114 dst[i + 1] = src[i + 1];
yading@10 115
yading@10 116 dst += dst_linesize;
yading@10 117 src += src_linesize;
yading@10 118 }
yading@10 119 memcpy(dst, src, w); dst += dst_linesize; src += src_linesize;
yading@10 120 memcpy(dst, src, w);
yading@10 121 }
yading@10 122
yading@10 123 enum {
yading@10 124 DIRECTION_45UP,
yading@10 125 DIRECTION_45DOWN,
yading@10 126 DIRECTION_HORIZONTAL,
yading@10 127 DIRECTION_VERTICAL,
yading@10 128 };
yading@10 129
yading@10 130 static int get_rounded_direction(int gx, int gy)
yading@10 131 {
yading@10 132 /* reference angles:
yading@10 133 * tan( pi/8) = sqrt(2)-1
yading@10 134 * tan(3pi/8) = sqrt(2)+1
yading@10 135 * Gy/Gx is the tangent of the angle (theta), so Gy/Gx is compared against
yading@10 136 * <ref-angle>, or more simply Gy against <ref-angle>*Gx
yading@10 137 *
yading@10 138 * Gx and Gy bounds = [-1020;1020], using 16-bit arithmetic:
yading@10 139 * round((sqrt(2)-1) * (1<<16)) = 27146
yading@10 140 * round((sqrt(2)+1) * (1<<16)) = 158218
yading@10 141 */
yading@10 142 if (gx) {
yading@10 143 int tanpi8gx, tan3pi8gx;
yading@10 144
yading@10 145 if (gx < 0)
yading@10 146 gx = -gx, gy = -gy;
yading@10 147 gy <<= 16;
yading@10 148 tanpi8gx = 27146 * gx;
yading@10 149 tan3pi8gx = 158218 * gx;
yading@10 150 if (gy > -tan3pi8gx && gy < -tanpi8gx) return DIRECTION_45UP;
yading@10 151 if (gy > -tanpi8gx && gy < tanpi8gx) return DIRECTION_HORIZONTAL;
yading@10 152 if (gy > tanpi8gx && gy < tan3pi8gx) return DIRECTION_45DOWN;
yading@10 153 }
yading@10 154 return DIRECTION_VERTICAL;
yading@10 155 }
yading@10 156
yading@10 157 static void sobel(AVFilterContext *ctx, int w, int h,
yading@10 158 uint16_t *dst, int dst_linesize,
yading@10 159 const uint8_t *src, int src_linesize)
yading@10 160 {
yading@10 161 int i, j;
yading@10 162 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 163
yading@10 164 for (j = 1; j < h - 1; j++) {
yading@10 165 dst += dst_linesize;
yading@10 166 src += src_linesize;
yading@10 167 for (i = 1; i < w - 1; i++) {
yading@10 168 const int gx =
yading@10 169 -1*src[-src_linesize + i-1] + 1*src[-src_linesize + i+1]
yading@10 170 -2*src[ i-1] + 2*src[ i+1]
yading@10 171 -1*src[ src_linesize + i-1] + 1*src[ src_linesize + i+1];
yading@10 172 const int gy =
yading@10 173 -1*src[-src_linesize + i-1] + 1*src[ src_linesize + i-1]
yading@10 174 -2*src[-src_linesize + i ] + 2*src[ src_linesize + i ]
yading@10 175 -1*src[-src_linesize + i+1] + 1*src[ src_linesize + i+1];
yading@10 176
yading@10 177 dst[i] = FFABS(gx) + FFABS(gy);
yading@10 178 edgedetect->directions[j*w + i] = get_rounded_direction(gx, gy);
yading@10 179 }
yading@10 180 }
yading@10 181 }
yading@10 182
yading@10 183 static void non_maximum_suppression(AVFilterContext *ctx, int w, int h,
yading@10 184 uint8_t *dst, int dst_linesize,
yading@10 185 const uint16_t *src, int src_linesize)
yading@10 186 {
yading@10 187 int i, j;
yading@10 188 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 189
yading@10 190 #define COPY_MAXIMA(ay, ax, by, bx) do { \
yading@10 191 if (src[i] > src[(ay)*src_linesize + i+(ax)] && \
yading@10 192 src[i] > src[(by)*src_linesize + i+(bx)]) \
yading@10 193 dst[i] = av_clip_uint8(src[i]); \
yading@10 194 } while (0)
yading@10 195
yading@10 196 for (j = 1; j < h - 1; j++) {
yading@10 197 dst += dst_linesize;
yading@10 198 src += src_linesize;
yading@10 199 for (i = 1; i < w - 1; i++) {
yading@10 200 switch (edgedetect->directions[j*w + i]) {
yading@10 201 case DIRECTION_45UP: COPY_MAXIMA( 1, -1, -1, 1); break;
yading@10 202 case DIRECTION_45DOWN: COPY_MAXIMA(-1, -1, 1, 1); break;
yading@10 203 case DIRECTION_HORIZONTAL: COPY_MAXIMA( 0, -1, 0, 1); break;
yading@10 204 case DIRECTION_VERTICAL: COPY_MAXIMA(-1, 0, 1, 0); break;
yading@10 205 }
yading@10 206 }
yading@10 207 }
yading@10 208 }
yading@10 209
yading@10 210 static void double_threshold(AVFilterContext *ctx, int w, int h,
yading@10 211 uint8_t *dst, int dst_linesize,
yading@10 212 const uint8_t *src, int src_linesize)
yading@10 213 {
yading@10 214 int i, j;
yading@10 215 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 216 const int low = edgedetect->low_u8;
yading@10 217 const int high = edgedetect->high_u8;
yading@10 218
yading@10 219 for (j = 0; j < h; j++) {
yading@10 220 for (i = 0; i < w; i++) {
yading@10 221 if (src[i] > high) {
yading@10 222 dst[i] = src[i];
yading@10 223 continue;
yading@10 224 }
yading@10 225
yading@10 226 if ((!i || i == w - 1 || !j || j == h - 1) &&
yading@10 227 src[i] > low &&
yading@10 228 (src[-src_linesize + i-1] > high ||
yading@10 229 src[-src_linesize + i ] > high ||
yading@10 230 src[-src_linesize + i+1] > high ||
yading@10 231 src[ i-1] > high ||
yading@10 232 src[ i+1] > high ||
yading@10 233 src[ src_linesize + i-1] > high ||
yading@10 234 src[ src_linesize + i ] > high ||
yading@10 235 src[ src_linesize + i+1] > high))
yading@10 236 dst[i] = src[i];
yading@10 237 else
yading@10 238 dst[i] = 0;
yading@10 239 }
yading@10 240 dst += dst_linesize;
yading@10 241 src += src_linesize;
yading@10 242 }
yading@10 243 }
yading@10 244
yading@10 245 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
yading@10 246 {
yading@10 247 AVFilterContext *ctx = inlink->dst;
yading@10 248 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 249 AVFilterLink *outlink = inlink->dst->outputs[0];
yading@10 250 uint8_t *tmpbuf = edgedetect->tmpbuf;
yading@10 251 uint16_t *gradients = edgedetect->gradients;
yading@10 252 int direct = 0;
yading@10 253 AVFrame *out;
yading@10 254
yading@10 255 if (av_frame_is_writable(in)) {
yading@10 256 direct = 1;
yading@10 257 out = in;
yading@10 258 } else {
yading@10 259 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
yading@10 260 if (!out) {
yading@10 261 av_frame_free(&in);
yading@10 262 return AVERROR(ENOMEM);
yading@10 263 }
yading@10 264 av_frame_copy_props(out, in);
yading@10 265 }
yading@10 266
yading@10 267 /* gaussian filter to reduce noise */
yading@10 268 gaussian_blur(ctx, inlink->w, inlink->h,
yading@10 269 tmpbuf, inlink->w,
yading@10 270 in->data[0], in->linesize[0]);
yading@10 271
yading@10 272 /* compute the 16-bits gradients and directions for the next step */
yading@10 273 sobel(ctx, inlink->w, inlink->h,
yading@10 274 gradients, inlink->w,
yading@10 275 tmpbuf, inlink->w);
yading@10 276
yading@10 277 /* non_maximum_suppression() will actually keep & clip what's necessary and
yading@10 278 * ignore the rest, so we need a clean output buffer */
yading@10 279 memset(tmpbuf, 0, inlink->w * inlink->h);
yading@10 280 non_maximum_suppression(ctx, inlink->w, inlink->h,
yading@10 281 tmpbuf, inlink->w,
yading@10 282 gradients, inlink->w);
yading@10 283
yading@10 284 /* keep high values, or low values surrounded by high values */
yading@10 285 double_threshold(ctx, inlink->w, inlink->h,
yading@10 286 out->data[0], out->linesize[0],
yading@10 287 tmpbuf, inlink->w);
yading@10 288
yading@10 289 if (!direct)
yading@10 290 av_frame_free(&in);
yading@10 291 return ff_filter_frame(outlink, out);
yading@10 292 }
yading@10 293
yading@10 294 static av_cold void uninit(AVFilterContext *ctx)
yading@10 295 {
yading@10 296 EdgeDetectContext *edgedetect = ctx->priv;
yading@10 297 av_freep(&edgedetect->tmpbuf);
yading@10 298 av_freep(&edgedetect->gradients);
yading@10 299 av_freep(&edgedetect->directions);
yading@10 300 }
yading@10 301
yading@10 302 static const AVFilterPad edgedetect_inputs[] = {
yading@10 303 {
yading@10 304 .name = "default",
yading@10 305 .type = AVMEDIA_TYPE_VIDEO,
yading@10 306 .config_props = config_props,
yading@10 307 .filter_frame = filter_frame,
yading@10 308 },
yading@10 309 { NULL }
yading@10 310 };
yading@10 311
yading@10 312 static const AVFilterPad edgedetect_outputs[] = {
yading@10 313 {
yading@10 314 .name = "default",
yading@10 315 .type = AVMEDIA_TYPE_VIDEO,
yading@10 316 },
yading@10 317 { NULL }
yading@10 318 };
yading@10 319
yading@10 320 AVFilter avfilter_vf_edgedetect = {
yading@10 321 .name = "edgedetect",
yading@10 322 .description = NULL_IF_CONFIG_SMALL("Detect and draw edge."),
yading@10 323 .priv_size = sizeof(EdgeDetectContext),
yading@10 324 .init = init,
yading@10 325 .uninit = uninit,
yading@10 326 .query_formats = query_formats,
yading@10 327 .inputs = edgedetect_inputs,
yading@10 328 .outputs = edgedetect_outputs,
yading@10 329 .priv_class = &edgedetect_class,
yading@10 330 };