yading@10: /* yading@10: * Copyright (c) 2003 Daniel Moreno yading@10: * Copyright (c) 2010 Baptiste Coudurier yading@10: * Copyright (c) 2012 Loren Merritt yading@10: * yading@10: * This file is part of FFmpeg, ported from MPlayer. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or modify yading@10: * it under the terms of the GNU General Public License as published by yading@10: * the Free Software Foundation; either version 2 of the License, or yading@10: * (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the yading@10: * GNU General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU General Public License along yading@10: * with FFmpeg; if not, write to the Free Software Foundation, Inc., yading@10: * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * high quality 3d video denoiser, ported from MPlayer yading@10: * libmpcodecs/vf_hqdn3d.c. yading@10: */ yading@10: yading@10: #include yading@10: yading@10: #include "config.h" yading@10: #include "libavutil/common.h" yading@10: #include "libavutil/pixdesc.h" yading@10: #include "libavutil/intreadwrite.h" yading@10: #include "libavutil/opt.h" yading@10: yading@10: #include "avfilter.h" yading@10: #include "formats.h" yading@10: #include "internal.h" yading@10: #include "video.h" yading@10: #include "vf_hqdn3d.h" yading@10: yading@10: #define LUT_BITS (depth==16 ? 8 : 4) yading@10: #define LOAD(x) (((depth == 8 ? src[x] : AV_RN16A(src + (x) * 2)) << (16 - depth))\ yading@10: + (((1 << (16 - depth)) - 1) >> 1)) yading@10: #define STORE(x,val) (depth == 8 ? dst[x] = (val) >> (16 - depth) : \ yading@10: AV_WN16A(dst + (x) * 2, (val) >> (16 - depth))) yading@10: yading@10: av_always_inline yading@10: static uint32_t lowpass(int prev, int cur, int16_t *coef, int depth) yading@10: { yading@10: int d = (prev - cur) >> (8 - LUT_BITS); yading@10: return cur + coef[d]; yading@10: } yading@10: yading@10: av_always_inline yading@10: static void denoise_temporal(uint8_t *src, uint8_t *dst, yading@10: uint16_t *frame_ant, yading@10: int w, int h, int sstride, int dstride, yading@10: int16_t *temporal, int depth) yading@10: { yading@10: long x, y; yading@10: uint32_t tmp; yading@10: yading@10: temporal += 256 << LUT_BITS; yading@10: yading@10: for (y = 0; y < h; y++) { yading@10: for (x = 0; x < w; x++) { yading@10: frame_ant[x] = tmp = lowpass(frame_ant[x], LOAD(x), temporal, depth); yading@10: STORE(x, tmp); yading@10: } yading@10: src += sstride; yading@10: dst += dstride; yading@10: frame_ant += w; yading@10: } yading@10: } yading@10: yading@10: av_always_inline yading@10: static void denoise_spatial(HQDN3DContext *hqdn3d, yading@10: uint8_t *src, uint8_t *dst, yading@10: uint16_t *line_ant, uint16_t *frame_ant, yading@10: int w, int h, int sstride, int dstride, yading@10: int16_t *spatial, int16_t *temporal, int depth) yading@10: { yading@10: long x, y; yading@10: uint32_t pixel_ant; yading@10: uint32_t tmp; yading@10: yading@10: spatial += 256 << LUT_BITS; yading@10: temporal += 256 << LUT_BITS; yading@10: yading@10: /* First line has no top neighbor. Only left one for each tmp and yading@10: * last frame */ yading@10: pixel_ant = LOAD(0); yading@10: for (x = 0; x < w; x++) { yading@10: line_ant[x] = tmp = pixel_ant = lowpass(pixel_ant, LOAD(x), spatial, depth); yading@10: frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth); yading@10: STORE(x, tmp); yading@10: } yading@10: yading@10: for (y = 1; y < h; y++) { yading@10: src += sstride; yading@10: dst += dstride; yading@10: frame_ant += w; yading@10: if (hqdn3d->denoise_row[depth]) { yading@10: hqdn3d->denoise_row[depth](src, dst, line_ant, frame_ant, w, spatial, temporal); yading@10: continue; yading@10: } yading@10: pixel_ant = LOAD(0); yading@10: for (x = 0; x < w-1; x++) { yading@10: line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth); yading@10: pixel_ant = lowpass(pixel_ant, LOAD(x+1), spatial, depth); yading@10: frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth); yading@10: STORE(x, tmp); yading@10: } yading@10: line_ant[x] = tmp = lowpass(line_ant[x], pixel_ant, spatial, depth); yading@10: frame_ant[x] = tmp = lowpass(frame_ant[x], tmp, temporal, depth); yading@10: STORE(x, tmp); yading@10: } yading@10: } yading@10: yading@10: av_always_inline yading@10: static void denoise_depth(HQDN3DContext *hqdn3d, yading@10: uint8_t *src, uint8_t *dst, yading@10: uint16_t *line_ant, uint16_t **frame_ant_ptr, yading@10: int w, int h, int sstride, int dstride, yading@10: int16_t *spatial, int16_t *temporal, int depth) yading@10: { yading@10: // FIXME: For 16bit depth, frame_ant could be a pointer to the previous yading@10: // filtered frame rather than a separate buffer. yading@10: long x, y; yading@10: uint16_t *frame_ant = *frame_ant_ptr; yading@10: if (!frame_ant) { yading@10: uint8_t *frame_src = src; yading@10: *frame_ant_ptr = frame_ant = av_malloc(w*h*sizeof(uint16_t)); yading@10: for (y = 0; y < h; y++, src += sstride, frame_ant += w) yading@10: for (x = 0; x < w; x++) yading@10: frame_ant[x] = LOAD(x); yading@10: src = frame_src; yading@10: frame_ant = *frame_ant_ptr; yading@10: } yading@10: yading@10: if (spatial[0]) yading@10: denoise_spatial(hqdn3d, src, dst, line_ant, frame_ant, yading@10: w, h, sstride, dstride, spatial, temporal, depth); yading@10: else yading@10: denoise_temporal(src, dst, frame_ant, yading@10: w, h, sstride, dstride, temporal, depth); yading@10: } yading@10: yading@10: #define denoise(...) \ yading@10: switch (hqdn3d->depth) {\ yading@10: case 8: denoise_depth(__VA_ARGS__, 8); break;\ yading@10: case 9: denoise_depth(__VA_ARGS__, 9); break;\ yading@10: case 10: denoise_depth(__VA_ARGS__, 10); break;\ yading@10: case 16: denoise_depth(__VA_ARGS__, 16); break;\ yading@10: } yading@10: yading@10: static int16_t *precalc_coefs(double dist25, int depth) yading@10: { yading@10: int i; yading@10: double gamma, simil, C; yading@10: int16_t *ct = av_malloc((512<priv; yading@10: yading@10: if (!hqdn3d->strength[LUMA_SPATIAL]) yading@10: hqdn3d->strength[LUMA_SPATIAL] = PARAM1_DEFAULT; yading@10: if (!hqdn3d->strength[CHROMA_SPATIAL]) yading@10: hqdn3d->strength[CHROMA_SPATIAL] = PARAM2_DEFAULT * hqdn3d->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; yading@10: if (!hqdn3d->strength[LUMA_TMP]) yading@10: hqdn3d->strength[LUMA_TMP] = PARAM3_DEFAULT * hqdn3d->strength[LUMA_SPATIAL] / PARAM1_DEFAULT; yading@10: if (!hqdn3d->strength[CHROMA_TMP]) yading@10: hqdn3d->strength[CHROMA_TMP] = hqdn3d->strength[LUMA_TMP] * hqdn3d->strength[CHROMA_SPATIAL] / hqdn3d->strength[LUMA_SPATIAL]; yading@10: yading@10: av_log(ctx, AV_LOG_VERBOSE, "ls:%f cs:%f lt:%f ct:%f\n", yading@10: hqdn3d->strength[LUMA_SPATIAL], hqdn3d->strength[CHROMA_SPATIAL], yading@10: hqdn3d->strength[LUMA_TMP], hqdn3d->strength[CHROMA_TMP]); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static void uninit(AVFilterContext *ctx) yading@10: { yading@10: HQDN3DContext *hqdn3d = ctx->priv; yading@10: yading@10: av_freep(&hqdn3d->coefs[0]); yading@10: av_freep(&hqdn3d->coefs[1]); yading@10: av_freep(&hqdn3d->coefs[2]); yading@10: av_freep(&hqdn3d->coefs[3]); yading@10: av_freep(&hqdn3d->line); yading@10: av_freep(&hqdn3d->frame_prev[0]); yading@10: av_freep(&hqdn3d->frame_prev[1]); yading@10: av_freep(&hqdn3d->frame_prev[2]); yading@10: } yading@10: yading@10: static int query_formats(AVFilterContext *ctx) yading@10: { yading@10: static const enum AVPixelFormat pix_fmts[] = { yading@10: AV_PIX_FMT_YUV420P, yading@10: AV_PIX_FMT_YUV422P, yading@10: AV_PIX_FMT_YUV444P, yading@10: AV_PIX_FMT_YUV410P, yading@10: AV_PIX_FMT_YUV411P, yading@10: AV_PIX_FMT_YUV440P, yading@10: AV_PIX_FMT_YUVJ420P, yading@10: AV_PIX_FMT_YUVJ422P, yading@10: AV_PIX_FMT_YUVJ444P, yading@10: AV_PIX_FMT_YUVJ440P, yading@10: AV_NE( AV_PIX_FMT_YUV420P9BE, AV_PIX_FMT_YUV420P9LE ), yading@10: AV_NE( AV_PIX_FMT_YUV422P9BE, AV_PIX_FMT_YUV422P9LE ), yading@10: AV_NE( AV_PIX_FMT_YUV444P9BE, AV_PIX_FMT_YUV444P9LE ), yading@10: AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ), yading@10: AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ), yading@10: AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ), yading@10: AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ), yading@10: AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ), yading@10: AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ), yading@10: AV_PIX_FMT_NONE yading@10: }; yading@10: yading@10: ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int config_input(AVFilterLink *inlink) yading@10: { yading@10: HQDN3DContext *hqdn3d = inlink->dst->priv; yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); yading@10: int i; yading@10: yading@10: hqdn3d->hsub = desc->log2_chroma_w; yading@10: hqdn3d->vsub = desc->log2_chroma_h; yading@10: hqdn3d->depth = desc->comp[0].depth_minus1+1; yading@10: yading@10: hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line)); yading@10: if (!hqdn3d->line) yading@10: return AVERROR(ENOMEM); yading@10: yading@10: for (i = 0; i < 4; i++) { yading@10: hqdn3d->coefs[i] = precalc_coefs(hqdn3d->strength[i], hqdn3d->depth); yading@10: if (!hqdn3d->coefs[i]) yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: if (ARCH_X86) yading@10: ff_hqdn3d_init_x86(hqdn3d); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int filter_frame(AVFilterLink *inlink, AVFrame *in) yading@10: { yading@10: HQDN3DContext *hqdn3d = inlink->dst->priv; yading@10: AVFilterLink *outlink = inlink->dst->outputs[0]; yading@10: yading@10: AVFrame *out; yading@10: int direct, c; yading@10: yading@10: if (av_frame_is_writable(in)) { yading@10: direct = 1; yading@10: out = in; yading@10: } else { yading@10: direct = 0; yading@10: out = ff_get_video_buffer(outlink, outlink->w, outlink->h); yading@10: if (!out) { yading@10: av_frame_free(&in); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: av_frame_copy_props(out, in); yading@10: } yading@10: yading@10: for (c = 0; c < 3; c++) { yading@10: denoise(hqdn3d, in->data[c], out->data[c], yading@10: hqdn3d->line, &hqdn3d->frame_prev[c], yading@10: in->width >> (!!c * hqdn3d->hsub), yading@10: in->height >> (!!c * hqdn3d->vsub), yading@10: in->linesize[c], out->linesize[c], yading@10: hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); yading@10: } yading@10: yading@10: if (!direct) yading@10: av_frame_free(&in); yading@10: yading@10: return ff_filter_frame(outlink, out); yading@10: } yading@10: yading@10: #define OFFSET(x) offsetof(HQDN3DContext, x) yading@10: #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM yading@10: static const AVOption options[] = { yading@10: { "luma_spatial", "spatial luma strength", OFFSET(strength[LUMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS }, yading@10: { "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS }, yading@10: { "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS }, yading@10: { "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS }, yading@10: { NULL }, yading@10: }; yading@10: yading@10: static const AVClass hqdn3d_class = { yading@10: .class_name = "hqdn3d", yading@10: .item_name = av_default_item_name, yading@10: .option = options, yading@10: .version = LIBAVUTIL_VERSION_INT, yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_vf_hqdn3d_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .config_props = config_input, yading@10: .filter_frame = filter_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: yading@10: static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_vf_hqdn3d = { yading@10: .name = "hqdn3d", yading@10: .description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."), yading@10: yading@10: .priv_size = sizeof(HQDN3DContext), yading@10: .priv_class = &hqdn3d_class, yading@10: .init = init, yading@10: .uninit = uninit, yading@10: .query_formats = query_formats, yading@10: yading@10: .inputs = avfilter_vf_hqdn3d_inputs, yading@10: yading@10: .outputs = avfilter_vf_hqdn3d_outputs, yading@10: };