annotate ffmpeg/libavcodec/vp8dsp.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (C) 2010 David Conrad
yading@10 3 * Copyright (C) 2010 Ronald S. Bultje
yading@10 4 *
yading@10 5 * This file is part of FFmpeg.
yading@10 6 *
yading@10 7 * FFmpeg is free software; you can redistribute it and/or
yading@10 8 * modify it under the terms of the GNU Lesser General Public
yading@10 9 * License as published by the Free Software Foundation; either
yading@10 10 * version 2.1 of the License, or (at your option) any later version.
yading@10 11 *
yading@10 12 * FFmpeg is distributed in the hope that it will be useful,
yading@10 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 15 * Lesser General Public License for more details.
yading@10 16 *
yading@10 17 * You should have received a copy of the GNU Lesser General Public
yading@10 18 * License along with FFmpeg; if not, write to the Free Software
yading@10 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 20 */
yading@10 21
yading@10 22 /**
yading@10 23 * @file
yading@10 24 * VP8 compatible video decoder
yading@10 25 */
yading@10 26
yading@10 27 #include "dsputil.h"
yading@10 28 #include "vp8dsp.h"
yading@10 29 #include "libavutil/common.h"
yading@10 30
yading@10 31 // TODO: Maybe add dequant
yading@10 32 static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
yading@10 33 {
yading@10 34 int i, t0, t1, t2, t3;
yading@10 35
yading@10 36 for (i = 0; i < 4; i++) {
yading@10 37 t0 = dc[0*4+i] + dc[3*4+i];
yading@10 38 t1 = dc[1*4+i] + dc[2*4+i];
yading@10 39 t2 = dc[1*4+i] - dc[2*4+i];
yading@10 40 t3 = dc[0*4+i] - dc[3*4+i];
yading@10 41
yading@10 42 dc[0*4+i] = t0 + t1;
yading@10 43 dc[1*4+i] = t3 + t2;
yading@10 44 dc[2*4+i] = t0 - t1;
yading@10 45 dc[3*4+i] = t3 - t2;
yading@10 46 }
yading@10 47
yading@10 48 for (i = 0; i < 4; i++) {
yading@10 49 t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding
yading@10 50 t1 = dc[i*4+1] + dc[i*4+2];
yading@10 51 t2 = dc[i*4+1] - dc[i*4+2];
yading@10 52 t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding
yading@10 53 dc[i*4+0] = 0;
yading@10 54 dc[i*4+1] = 0;
yading@10 55 dc[i*4+2] = 0;
yading@10 56 dc[i*4+3] = 0;
yading@10 57
yading@10 58 block[i][0][0] = (t0 + t1) >> 3;
yading@10 59 block[i][1][0] = (t3 + t2) >> 3;
yading@10 60 block[i][2][0] = (t0 - t1) >> 3;
yading@10 61 block[i][3][0] = (t3 - t2) >> 3;
yading@10 62 }
yading@10 63 }
yading@10 64
yading@10 65 static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
yading@10 66 {
yading@10 67 int i, val = (dc[0] + 3) >> 3;
yading@10 68 dc[0] = 0;
yading@10 69
yading@10 70 for (i = 0; i < 4; i++) {
yading@10 71 block[i][0][0] = val;
yading@10 72 block[i][1][0] = val;
yading@10 73 block[i][2][0] = val;
yading@10 74 block[i][3][0] = val;
yading@10 75 }
yading@10 76 }
yading@10 77
yading@10 78 #define MUL_20091(a) ((((a)*20091) >> 16) + (a))
yading@10 79 #define MUL_35468(a) (((a)*35468) >> 16)
yading@10 80
yading@10 81 static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
yading@10 82 {
yading@10 83 int i, t0, t1, t2, t3;
yading@10 84 int16_t tmp[16];
yading@10 85
yading@10 86 for (i = 0; i < 4; i++) {
yading@10 87 t0 = block[0*4+i] + block[2*4+i];
yading@10 88 t1 = block[0*4+i] - block[2*4+i];
yading@10 89 t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
yading@10 90 t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
yading@10 91 block[0*4+i] = 0;
yading@10 92 block[1*4+i] = 0;
yading@10 93 block[2*4+i] = 0;
yading@10 94 block[3*4+i] = 0;
yading@10 95
yading@10 96 tmp[i*4+0] = t0 + t3;
yading@10 97 tmp[i*4+1] = t1 + t2;
yading@10 98 tmp[i*4+2] = t1 - t2;
yading@10 99 tmp[i*4+3] = t0 - t3;
yading@10 100 }
yading@10 101
yading@10 102 for (i = 0; i < 4; i++) {
yading@10 103 t0 = tmp[0*4+i] + tmp[2*4+i];
yading@10 104 t1 = tmp[0*4+i] - tmp[2*4+i];
yading@10 105 t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
yading@10 106 t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
yading@10 107
yading@10 108 dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
yading@10 109 dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
yading@10 110 dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
yading@10 111 dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
yading@10 112 dst += stride;
yading@10 113 }
yading@10 114 }
yading@10 115
yading@10 116 static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
yading@10 117 {
yading@10 118 int i, dc = (block[0] + 4) >> 3;
yading@10 119 block[0] = 0;
yading@10 120
yading@10 121 for (i = 0; i < 4; i++) {
yading@10 122 dst[0] = av_clip_uint8(dst[0] + dc);
yading@10 123 dst[1] = av_clip_uint8(dst[1] + dc);
yading@10 124 dst[2] = av_clip_uint8(dst[2] + dc);
yading@10 125 dst[3] = av_clip_uint8(dst[3] + dc);
yading@10 126 dst += stride;
yading@10 127 }
yading@10 128 }
yading@10 129
yading@10 130 static void vp8_idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
yading@10 131 {
yading@10 132 vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride);
yading@10 133 vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride);
yading@10 134 vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride);
yading@10 135 vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride);
yading@10 136 }
yading@10 137
yading@10 138 static void vp8_idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
yading@10 139 {
yading@10 140 vp8_idct_dc_add_c(dst+ 0, block[0], stride);
yading@10 141 vp8_idct_dc_add_c(dst+ 4, block[1], stride);
yading@10 142 vp8_idct_dc_add_c(dst+ 8, block[2], stride);
yading@10 143 vp8_idct_dc_add_c(dst+12, block[3], stride);
yading@10 144 }
yading@10 145
yading@10 146 // because I like only having two parameters to pass functions...
yading@10 147 #define LOAD_PIXELS\
yading@10 148 int av_unused p3 = p[-4*stride];\
yading@10 149 int av_unused p2 = p[-3*stride];\
yading@10 150 int av_unused p1 = p[-2*stride];\
yading@10 151 int av_unused p0 = p[-1*stride];\
yading@10 152 int av_unused q0 = p[ 0*stride];\
yading@10 153 int av_unused q1 = p[ 1*stride];\
yading@10 154 int av_unused q2 = p[ 2*stride];\
yading@10 155 int av_unused q3 = p[ 3*stride];
yading@10 156
yading@10 157 #define clip_int8(n) (cm[n+0x80]-0x80)
yading@10 158
yading@10 159 static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap)
yading@10 160 {
yading@10 161 LOAD_PIXELS
yading@10 162 int a, f1, f2;
yading@10 163 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
yading@10 164
yading@10 165 a = 3*(q0 - p0);
yading@10 166
yading@10 167 if (is4tap)
yading@10 168 a += clip_int8(p1 - q1);
yading@10 169
yading@10 170 a = clip_int8(a);
yading@10 171
yading@10 172 // We deviate from the spec here with c(a+3) >> 3
yading@10 173 // since that's what libvpx does.
yading@10 174 f1 = FFMIN(a+4, 127) >> 3;
yading@10 175 f2 = FFMIN(a+3, 127) >> 3;
yading@10 176
yading@10 177 // Despite what the spec says, we do need to clamp here to
yading@10 178 // be bitexact with libvpx.
yading@10 179 p[-1*stride] = cm[p0 + f2];
yading@10 180 p[ 0*stride] = cm[q0 - f1];
yading@10 181
yading@10 182 // only used for _inner on blocks without high edge variance
yading@10 183 if (!is4tap) {
yading@10 184 a = (f1+1)>>1;
yading@10 185 p[-2*stride] = cm[p1 + a];
yading@10 186 p[ 1*stride] = cm[q1 - a];
yading@10 187 }
yading@10 188 }
yading@10 189
yading@10 190 static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
yading@10 191 {
yading@10 192 LOAD_PIXELS
yading@10 193 return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
yading@10 194 }
yading@10 195
yading@10 196 /**
yading@10 197 * E - limit at the macroblock edge
yading@10 198 * I - limit for interior difference
yading@10 199 */
yading@10 200 static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)
yading@10 201 {
yading@10 202 LOAD_PIXELS
yading@10 203 return simple_limit(p, stride, E)
yading@10 204 && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I
yading@10 205 && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;
yading@10 206 }
yading@10 207
yading@10 208 // high edge variance
yading@10 209 static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
yading@10 210 {
yading@10 211 LOAD_PIXELS
yading@10 212 return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
yading@10 213 }
yading@10 214
yading@10 215 static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
yading@10 216 {
yading@10 217 int a0, a1, a2, w;
yading@10 218 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
yading@10 219
yading@10 220 LOAD_PIXELS
yading@10 221
yading@10 222 w = clip_int8(p1-q1);
yading@10 223 w = clip_int8(w + 3*(q0-p0));
yading@10 224
yading@10 225 a0 = (27*w + 63) >> 7;
yading@10 226 a1 = (18*w + 63) >> 7;
yading@10 227 a2 = ( 9*w + 63) >> 7;
yading@10 228
yading@10 229 p[-3*stride] = cm[p2 + a2];
yading@10 230 p[-2*stride] = cm[p1 + a1];
yading@10 231 p[-1*stride] = cm[p0 + a0];
yading@10 232 p[ 0*stride] = cm[q0 - a0];
yading@10 233 p[ 1*stride] = cm[q1 - a1];
yading@10 234 p[ 2*stride] = cm[q2 - a2];
yading@10 235 }
yading@10 236
yading@10 237 #define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
yading@10 238 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
yading@10 239 int flim_E, int flim_I, int hev_thresh)\
yading@10 240 {\
yading@10 241 int i;\
yading@10 242 \
yading@10 243 for (i = 0; i < size; i++)\
yading@10 244 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
yading@10 245 if (hev(dst+i*stridea, strideb, hev_thresh))\
yading@10 246 filter_common(dst+i*stridea, strideb, 1);\
yading@10 247 else\
yading@10 248 filter_mbedge(dst+i*stridea, strideb);\
yading@10 249 }\
yading@10 250 }\
yading@10 251 \
yading@10 252 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
yading@10 253 int flim_E, int flim_I, int hev_thresh)\
yading@10 254 {\
yading@10 255 int i;\
yading@10 256 \
yading@10 257 for (i = 0; i < size; i++)\
yading@10 258 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
yading@10 259 int hv = hev(dst+i*stridea, strideb, hev_thresh);\
yading@10 260 if (hv) \
yading@10 261 filter_common(dst+i*stridea, strideb, 1);\
yading@10 262 else \
yading@10 263 filter_common(dst+i*stridea, strideb, 0);\
yading@10 264 }\
yading@10 265 }
yading@10 266
yading@10 267 LOOP_FILTER(v, 16, 1, stride,)
yading@10 268 LOOP_FILTER(h, 16, stride, 1,)
yading@10 269
yading@10 270 #define UV_LOOP_FILTER(dir, stridea, strideb) \
yading@10 271 LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
yading@10 272 static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
yading@10 273 int fE, int fI, int hev_thresh)\
yading@10 274 {\
yading@10 275 vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
yading@10 276 vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
yading@10 277 }\
yading@10 278 static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
yading@10 279 int fE, int fI, int hev_thresh)\
yading@10 280 {\
yading@10 281 vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
yading@10 282 vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
yading@10 283 }
yading@10 284
yading@10 285 UV_LOOP_FILTER(v, 1, stride)
yading@10 286 UV_LOOP_FILTER(h, stride, 1)
yading@10 287
yading@10 288 static void vp8_v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
yading@10 289 {
yading@10 290 int i;
yading@10 291
yading@10 292 for (i = 0; i < 16; i++)
yading@10 293 if (simple_limit(dst+i, stride, flim))
yading@10 294 filter_common(dst+i, stride, 1);
yading@10 295 }
yading@10 296
yading@10 297 static void vp8_h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)
yading@10 298 {
yading@10 299 int i;
yading@10 300
yading@10 301 for (i = 0; i < 16; i++)
yading@10 302 if (simple_limit(dst+i*stride, 1, flim))
yading@10 303 filter_common(dst+i*stride, 1, 1);
yading@10 304 }
yading@10 305
yading@10 306 static const uint8_t subpel_filters[7][6] = {
yading@10 307 { 0, 6, 123, 12, 1, 0 },
yading@10 308 { 2, 11, 108, 36, 8, 1 },
yading@10 309 { 0, 9, 93, 50, 6, 0 },
yading@10 310 { 3, 16, 77, 77, 16, 3 },
yading@10 311 { 0, 6, 50, 93, 9, 0 },
yading@10 312 { 1, 8, 36, 108, 11, 2 },
yading@10 313 { 0, 1, 12, 123, 6, 0 },
yading@10 314 };
yading@10 315
yading@10 316 #define PUT_PIXELS(WIDTH) \
yading@10 317 static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
yading@10 318 int i; \
yading@10 319 for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
yading@10 320 memcpy(dst, src, WIDTH); \
yading@10 321 } \
yading@10 322 }
yading@10 323
yading@10 324 PUT_PIXELS(16)
yading@10 325 PUT_PIXELS(8)
yading@10 326 PUT_PIXELS(4)
yading@10 327
yading@10 328 #define FILTER_6TAP(src, F, stride) \
yading@10 329 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
yading@10 330 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
yading@10 331
yading@10 332 #define FILTER_4TAP(src, F, stride) \
yading@10 333 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
yading@10 334 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
yading@10 335
yading@10 336 #define VP8_EPEL_H(SIZE, TAPS) \
yading@10 337 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
yading@10 338 { \
yading@10 339 const uint8_t *filter = subpel_filters[mx-1]; \
yading@10 340 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
yading@10 341 int x, y; \
yading@10 342 \
yading@10 343 for (y = 0; y < h; y++) { \
yading@10 344 for (x = 0; x < SIZE; x++) \
yading@10 345 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
yading@10 346 dst += dststride; \
yading@10 347 src += srcstride; \
yading@10 348 } \
yading@10 349 }
yading@10 350 #define VP8_EPEL_V(SIZE, TAPS) \
yading@10 351 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
yading@10 352 { \
yading@10 353 const uint8_t *filter = subpel_filters[my-1]; \
yading@10 354 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
yading@10 355 int x, y; \
yading@10 356 \
yading@10 357 for (y = 0; y < h; y++) { \
yading@10 358 for (x = 0; x < SIZE; x++) \
yading@10 359 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
yading@10 360 dst += dststride; \
yading@10 361 src += srcstride; \
yading@10 362 } \
yading@10 363 }
yading@10 364 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
yading@10 365 static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
yading@10 366 { \
yading@10 367 const uint8_t *filter = subpel_filters[mx-1]; \
yading@10 368 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
yading@10 369 int x, y; \
yading@10 370 uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \
yading@10 371 uint8_t *tmp = tmp_array; \
yading@10 372 src -= (2-(VTAPS==4))*srcstride; \
yading@10 373 \
yading@10 374 for (y = 0; y < h+VTAPS-1; y++) { \
yading@10 375 for (x = 0; x < SIZE; x++) \
yading@10 376 tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
yading@10 377 tmp += SIZE; \
yading@10 378 src += srcstride; \
yading@10 379 } \
yading@10 380 \
yading@10 381 tmp = tmp_array + (2-(VTAPS==4))*SIZE; \
yading@10 382 filter = subpel_filters[my-1]; \
yading@10 383 \
yading@10 384 for (y = 0; y < h; y++) { \
yading@10 385 for (x = 0; x < SIZE; x++) \
yading@10 386 dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
yading@10 387 dst += dststride; \
yading@10 388 tmp += SIZE; \
yading@10 389 } \
yading@10 390 }
yading@10 391
yading@10 392 VP8_EPEL_H(16, 4)
yading@10 393 VP8_EPEL_H(8, 4)
yading@10 394 VP8_EPEL_H(4, 4)
yading@10 395 VP8_EPEL_H(16, 6)
yading@10 396 VP8_EPEL_H(8, 6)
yading@10 397 VP8_EPEL_H(4, 6)
yading@10 398 VP8_EPEL_V(16, 4)
yading@10 399 VP8_EPEL_V(8, 4)
yading@10 400 VP8_EPEL_V(4, 4)
yading@10 401 VP8_EPEL_V(16, 6)
yading@10 402 VP8_EPEL_V(8, 6)
yading@10 403 VP8_EPEL_V(4, 6)
yading@10 404 VP8_EPEL_HV(16, 4, 4)
yading@10 405 VP8_EPEL_HV(8, 4, 4)
yading@10 406 VP8_EPEL_HV(4, 4, 4)
yading@10 407 VP8_EPEL_HV(16, 4, 6)
yading@10 408 VP8_EPEL_HV(8, 4, 6)
yading@10 409 VP8_EPEL_HV(4, 4, 6)
yading@10 410 VP8_EPEL_HV(16, 6, 4)
yading@10 411 VP8_EPEL_HV(8, 6, 4)
yading@10 412 VP8_EPEL_HV(4, 6, 4)
yading@10 413 VP8_EPEL_HV(16, 6, 6)
yading@10 414 VP8_EPEL_HV(8, 6, 6)
yading@10 415 VP8_EPEL_HV(4, 6, 6)
yading@10 416
yading@10 417 #define VP8_BILINEAR(SIZE) \
yading@10 418 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
yading@10 419 { \
yading@10 420 int a = 8-mx, b = mx; \
yading@10 421 int x, y; \
yading@10 422 \
yading@10 423 for (y = 0; y < h; y++) { \
yading@10 424 for (x = 0; x < SIZE; x++) \
yading@10 425 dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
yading@10 426 dst += stride; \
yading@10 427 src += stride; \
yading@10 428 } \
yading@10 429 } \
yading@10 430 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
yading@10 431 { \
yading@10 432 int c = 8-my, d = my; \
yading@10 433 int x, y; \
yading@10 434 \
yading@10 435 for (y = 0; y < h; y++) { \
yading@10 436 for (x = 0; x < SIZE; x++) \
yading@10 437 dst[x] = (c*src[x] + d*src[x+stride] + 4) >> 3; \
yading@10 438 dst += stride; \
yading@10 439 src += stride; \
yading@10 440 } \
yading@10 441 } \
yading@10 442 \
yading@10 443 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s2, int h, int mx, int my) \
yading@10 444 { \
yading@10 445 int a = 8-mx, b = mx; \
yading@10 446 int c = 8-my, d = my; \
yading@10 447 int x, y; \
yading@10 448 uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
yading@10 449 uint8_t *tmp = tmp_array; \
yading@10 450 \
yading@10 451 for (y = 0; y < h+1; y++) { \
yading@10 452 for (x = 0; x < SIZE; x++) \
yading@10 453 tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
yading@10 454 tmp += SIZE; \
yading@10 455 src += stride; \
yading@10 456 } \
yading@10 457 \
yading@10 458 tmp = tmp_array; \
yading@10 459 \
yading@10 460 for (y = 0; y < h; y++) { \
yading@10 461 for (x = 0; x < SIZE; x++) \
yading@10 462 dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
yading@10 463 dst += stride; \
yading@10 464 tmp += SIZE; \
yading@10 465 } \
yading@10 466 }
yading@10 467
yading@10 468 VP8_BILINEAR(16)
yading@10 469 VP8_BILINEAR(8)
yading@10 470 VP8_BILINEAR(4)
yading@10 471
yading@10 472 #define VP8_MC_FUNC(IDX, SIZE) \
yading@10 473 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
yading@10 474 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
yading@10 475 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
yading@10 476 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
yading@10 477 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
yading@10 478 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
yading@10 479 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
yading@10 480 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
yading@10 481 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
yading@10 482
yading@10 483 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
yading@10 484 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
yading@10 485 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
yading@10 486 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
yading@10 487 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
yading@10 488 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
yading@10 489 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
yading@10 490 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
yading@10 491 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
yading@10 492 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
yading@10 493
yading@10 494 av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
yading@10 495 {
yading@10 496 dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
yading@10 497 dsp->vp8_luma_dc_wht_dc = vp8_luma_dc_wht_dc_c;
yading@10 498 dsp->vp8_idct_add = vp8_idct_add_c;
yading@10 499 dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
yading@10 500 dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c;
yading@10 501 dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c;
yading@10 502
yading@10 503 dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
yading@10 504 dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
yading@10 505 dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c;
yading@10 506 dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c;
yading@10 507
yading@10 508 dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c;
yading@10 509 dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c;
yading@10 510 dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c;
yading@10 511 dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c;
yading@10 512
yading@10 513 dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
yading@10 514 dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
yading@10 515
yading@10 516 VP8_MC_FUNC(0, 16);
yading@10 517 VP8_MC_FUNC(1, 8);
yading@10 518 VP8_MC_FUNC(2, 4);
yading@10 519
yading@10 520 VP8_BILINEAR_MC_FUNC(0, 16);
yading@10 521 VP8_BILINEAR_MC_FUNC(1, 8);
yading@10 522 VP8_BILINEAR_MC_FUNC(2, 4);
yading@10 523
yading@10 524 if (ARCH_X86)
yading@10 525 ff_vp8dsp_init_x86(dsp);
yading@10 526 if (HAVE_ALTIVEC)
yading@10 527 ff_vp8dsp_init_altivec(dsp);
yading@10 528 if (ARCH_ARM)
yading@10 529 ff_vp8dsp_init_arm(dsp);
yading@10 530 }