annotate ffmpeg/libavcodec/x86/vc1dsp_mmx.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * VC-1 and WMV3 - DSP functions MMX-optimized
yading@10 3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
yading@10 4 *
yading@10 5 * Permission is hereby granted, free of charge, to any person
yading@10 6 * obtaining a copy of this software and associated documentation
yading@10 7 * files (the "Software"), to deal in the Software without
yading@10 8 * restriction, including without limitation the rights to use,
yading@10 9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
yading@10 10 * copies of the Software, and to permit persons to whom the
yading@10 11 * Software is furnished to do so, subject to the following
yading@10 12 * conditions:
yading@10 13 *
yading@10 14 * The above copyright notice and this permission notice shall be
yading@10 15 * included in all copies or substantial portions of the Software.
yading@10 16 *
yading@10 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
yading@10 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
yading@10 19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
yading@10 20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
yading@10 21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
yading@10 22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
yading@10 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
yading@10 24 * OTHER DEALINGS IN THE SOFTWARE.
yading@10 25 */
yading@10 26
yading@10 27 #include "libavutil/cpu.h"
yading@10 28 #include "libavutil/mem.h"
yading@10 29 #include "libavutil/x86/asm.h"
yading@10 30 #include "libavutil/x86/cpu.h"
yading@10 31 #include "dsputil_mmx.h"
yading@10 32 #include "libavcodec/vc1dsp.h"
yading@10 33 #include "vc1dsp.h"
yading@10 34
yading@10 35 #if HAVE_INLINE_ASM
yading@10 36
yading@10 37 #define OP_PUT(S,D)
yading@10 38 #define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
yading@10 39
yading@10 40 /** Add rounder from mm7 to mm3 and pack result at destination */
yading@10 41 #define NORMALIZE_MMX(SHIFT) \
yading@10 42 "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
yading@10 43 "paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
yading@10 44 "psraw "SHIFT", %%mm3 \n\t" \
yading@10 45 "psraw "SHIFT", %%mm4 \n\t"
yading@10 46
yading@10 47 #define TRANSFER_DO_PACK(OP) \
yading@10 48 "packuswb %%mm4, %%mm3 \n\t" \
yading@10 49 OP((%2), %%mm3) \
yading@10 50 "movq %%mm3, (%2) \n\t"
yading@10 51
yading@10 52 #define TRANSFER_DONT_PACK(OP) \
yading@10 53 OP(0(%2), %%mm3) \
yading@10 54 OP(8(%2), %%mm4) \
yading@10 55 "movq %%mm3, 0(%2) \n\t" \
yading@10 56 "movq %%mm4, 8(%2) \n\t"
yading@10 57
yading@10 58 /** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
yading@10 59 #define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
yading@10 60 #define DONT_UNPACK(reg)
yading@10 61
yading@10 62 /** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
yading@10 63 #define LOAD_ROUNDER_MMX(ROUND) \
yading@10 64 "movd "ROUND", %%mm7 \n\t" \
yading@10 65 "punpcklwd %%mm7, %%mm7 \n\t" \
yading@10 66 "punpckldq %%mm7, %%mm7 \n\t"
yading@10 67
yading@10 68 #define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
yading@10 69 "paddw %%mm"#R2", %%mm"#R1" \n\t" \
yading@10 70 "movd (%0,%3), %%mm"#R0" \n\t" \
yading@10 71 "pmullw %%mm6, %%mm"#R1" \n\t" \
yading@10 72 "punpcklbw %%mm0, %%mm"#R0" \n\t" \
yading@10 73 "movd (%0,%2), %%mm"#R3" \n\t" \
yading@10 74 "psubw %%mm"#R0", %%mm"#R1" \n\t" \
yading@10 75 "punpcklbw %%mm0, %%mm"#R3" \n\t" \
yading@10 76 "paddw %%mm7, %%mm"#R1" \n\t" \
yading@10 77 "psubw %%mm"#R3", %%mm"#R1" \n\t" \
yading@10 78 "psraw %4, %%mm"#R1" \n\t" \
yading@10 79 "movq %%mm"#R1", "#OFF"(%1) \n\t" \
yading@10 80 "add %2, %0 \n\t"
yading@10 81
yading@10 82 /** Sacrifying mm6 allows to pipeline loads from src */
yading@10 83 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
yading@10 84 const uint8_t *src, x86_reg stride,
yading@10 85 int rnd, int64_t shift)
yading@10 86 {
yading@10 87 __asm__ volatile(
yading@10 88 "mov $3, %%"REG_c" \n\t"
yading@10 89 LOAD_ROUNDER_MMX("%5")
yading@10 90 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
yading@10 91 "1: \n\t"
yading@10 92 "movd (%0), %%mm2 \n\t"
yading@10 93 "add %2, %0 \n\t"
yading@10 94 "movd (%0), %%mm3 \n\t"
yading@10 95 "punpcklbw %%mm0, %%mm2 \n\t"
yading@10 96 "punpcklbw %%mm0, %%mm3 \n\t"
yading@10 97 SHIFT2_LINE( 0, 1, 2, 3, 4)
yading@10 98 SHIFT2_LINE( 24, 2, 3, 4, 1)
yading@10 99 SHIFT2_LINE( 48, 3, 4, 1, 2)
yading@10 100 SHIFT2_LINE( 72, 4, 1, 2, 3)
yading@10 101 SHIFT2_LINE( 96, 1, 2, 3, 4)
yading@10 102 SHIFT2_LINE(120, 2, 3, 4, 1)
yading@10 103 SHIFT2_LINE(144, 3, 4, 1, 2)
yading@10 104 SHIFT2_LINE(168, 4, 1, 2, 3)
yading@10 105 "sub %6, %0 \n\t"
yading@10 106 "add $8, %1 \n\t"
yading@10 107 "dec %%"REG_c" \n\t"
yading@10 108 "jnz 1b \n\t"
yading@10 109 : "+r"(src), "+r"(dst)
yading@10 110 : "r"(stride), "r"(-2*stride),
yading@10 111 "m"(shift), "m"(rnd), "r"(9*stride-4)
yading@10 112 : "%"REG_c, "memory"
yading@10 113 );
yading@10 114 }
yading@10 115
yading@10 116 /**
yading@10 117 * Data is already unpacked, so some operations can directly be made from
yading@10 118 * memory.
yading@10 119 */
yading@10 120 #define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
yading@10 121 static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
yading@10 122 const int16_t *src, int rnd)\
yading@10 123 {\
yading@10 124 int h = 8;\
yading@10 125 \
yading@10 126 src -= 1;\
yading@10 127 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
yading@10 128 __asm__ volatile(\
yading@10 129 LOAD_ROUNDER_MMX("%4")\
yading@10 130 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
yading@10 131 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
yading@10 132 "1: \n\t"\
yading@10 133 "movq 2*0+0(%1), %%mm1 \n\t"\
yading@10 134 "movq 2*0+8(%1), %%mm2 \n\t"\
yading@10 135 "movq 2*1+0(%1), %%mm3 \n\t"\
yading@10 136 "movq 2*1+8(%1), %%mm4 \n\t"\
yading@10 137 "paddw 2*3+0(%1), %%mm1 \n\t"\
yading@10 138 "paddw 2*3+8(%1), %%mm2 \n\t"\
yading@10 139 "paddw 2*2+0(%1), %%mm3 \n\t"\
yading@10 140 "paddw 2*2+8(%1), %%mm4 \n\t"\
yading@10 141 "pmullw %%mm5, %%mm3 \n\t"\
yading@10 142 "pmullw %%mm5, %%mm4 \n\t"\
yading@10 143 "psubw %%mm1, %%mm3 \n\t"\
yading@10 144 "psubw %%mm2, %%mm4 \n\t"\
yading@10 145 NORMALIZE_MMX("$7")\
yading@10 146 /* Remove bias */\
yading@10 147 "paddw %%mm6, %%mm3 \n\t"\
yading@10 148 "paddw %%mm6, %%mm4 \n\t"\
yading@10 149 TRANSFER_DO_PACK(OP)\
yading@10 150 "add $24, %1 \n\t"\
yading@10 151 "add %3, %2 \n\t"\
yading@10 152 "decl %0 \n\t"\
yading@10 153 "jnz 1b \n\t"\
yading@10 154 : "+r"(h), "+r" (src), "+r" (dst)\
yading@10 155 : "r"(stride), "m"(rnd)\
yading@10 156 : "memory"\
yading@10 157 );\
yading@10 158 }
yading@10 159
yading@10 160 VC1_HOR_16b_SHIFT2(OP_PUT, put_)
yading@10 161 VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
yading@10 162
yading@10 163
yading@10 164 /**
yading@10 165 * Purely vertical or horizontal 1/2 shift interpolation.
yading@10 166 * Sacrify mm6 for *9 factor.
yading@10 167 */
yading@10 168 #define VC1_SHIFT2(OP, OPNAME)\
yading@10 169 static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
yading@10 170 x86_reg stride, int rnd, x86_reg offset)\
yading@10 171 {\
yading@10 172 rnd = 8-rnd;\
yading@10 173 __asm__ volatile(\
yading@10 174 "mov $8, %%"REG_c" \n\t"\
yading@10 175 LOAD_ROUNDER_MMX("%5")\
yading@10 176 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
yading@10 177 "1: \n\t"\
yading@10 178 "movd 0(%0 ), %%mm3 \n\t"\
yading@10 179 "movd 4(%0 ), %%mm4 \n\t"\
yading@10 180 "movd 0(%0,%2), %%mm1 \n\t"\
yading@10 181 "movd 4(%0,%2), %%mm2 \n\t"\
yading@10 182 "add %2, %0 \n\t"\
yading@10 183 "punpcklbw %%mm0, %%mm3 \n\t"\
yading@10 184 "punpcklbw %%mm0, %%mm4 \n\t"\
yading@10 185 "punpcklbw %%mm0, %%mm1 \n\t"\
yading@10 186 "punpcklbw %%mm0, %%mm2 \n\t"\
yading@10 187 "paddw %%mm1, %%mm3 \n\t"\
yading@10 188 "paddw %%mm2, %%mm4 \n\t"\
yading@10 189 "movd 0(%0,%3), %%mm1 \n\t"\
yading@10 190 "movd 4(%0,%3), %%mm2 \n\t"\
yading@10 191 "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
yading@10 192 "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
yading@10 193 "punpcklbw %%mm0, %%mm1 \n\t"\
yading@10 194 "punpcklbw %%mm0, %%mm2 \n\t"\
yading@10 195 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
yading@10 196 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
yading@10 197 "movd 0(%0,%2), %%mm1 \n\t"\
yading@10 198 "movd 4(%0,%2), %%mm2 \n\t"\
yading@10 199 "punpcklbw %%mm0, %%mm1 \n\t"\
yading@10 200 "punpcklbw %%mm0, %%mm2 \n\t"\
yading@10 201 "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
yading@10 202 "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
yading@10 203 NORMALIZE_MMX("$4")\
yading@10 204 "packuswb %%mm4, %%mm3 \n\t"\
yading@10 205 OP((%1), %%mm3)\
yading@10 206 "movq %%mm3, (%1) \n\t"\
yading@10 207 "add %6, %0 \n\t"\
yading@10 208 "add %4, %1 \n\t"\
yading@10 209 "dec %%"REG_c" \n\t"\
yading@10 210 "jnz 1b \n\t"\
yading@10 211 : "+r"(src), "+r"(dst)\
yading@10 212 : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
yading@10 213 "g"(stride-offset)\
yading@10 214 : "%"REG_c, "memory"\
yading@10 215 );\
yading@10 216 }
yading@10 217
yading@10 218 VC1_SHIFT2(OP_PUT, put_)
yading@10 219 VC1_SHIFT2(OP_AVG, avg_)
yading@10 220
yading@10 221 /**
yading@10 222 * Core of the 1/4 and 3/4 shift bicubic interpolation.
yading@10 223 *
yading@10 224 * @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
yading@10 225 * @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
yading@10 226 * @param A1 Address of 1st tap (beware of unpacked/packed).
yading@10 227 * @param A2 Address of 2nd tap
yading@10 228 * @param A3 Address of 3rd tap
yading@10 229 * @param A4 Address of 4th tap
yading@10 230 */
yading@10 231 #define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
yading@10 232 MOVQ "*0+"A1", %%mm1 \n\t" \
yading@10 233 MOVQ "*4+"A1", %%mm2 \n\t" \
yading@10 234 UNPACK("%%mm1") \
yading@10 235 UNPACK("%%mm2") \
yading@10 236 "pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
yading@10 237 "pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
yading@10 238 MOVQ "*0+"A2", %%mm3 \n\t" \
yading@10 239 MOVQ "*4+"A2", %%mm4 \n\t" \
yading@10 240 UNPACK("%%mm3") \
yading@10 241 UNPACK("%%mm4") \
yading@10 242 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
yading@10 243 "pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
yading@10 244 "psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
yading@10 245 "psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
yading@10 246 MOVQ "*0+"A4", %%mm1 \n\t" \
yading@10 247 MOVQ "*4+"A4", %%mm2 \n\t" \
yading@10 248 UNPACK("%%mm1") \
yading@10 249 UNPACK("%%mm2") \
yading@10 250 "psllw $2, %%mm1 \n\t" /* 4* */ \
yading@10 251 "psllw $2, %%mm2 \n\t" /* 4* */ \
yading@10 252 "psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
yading@10 253 "psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
yading@10 254 MOVQ "*0+"A3", %%mm1 \n\t" \
yading@10 255 MOVQ "*4+"A3", %%mm2 \n\t" \
yading@10 256 UNPACK("%%mm1") \
yading@10 257 UNPACK("%%mm2") \
yading@10 258 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
yading@10 259 "pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
yading@10 260 "paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
yading@10 261 "paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
yading@10 262
yading@10 263 /**
yading@10 264 * Macro to build the vertical 16bits version of vc1_put_shift[13].
yading@10 265 * Here, offset=src_stride. Parameters passed A1 to A4 must use
yading@10 266 * %3 (src_stride) and %4 (3*src_stride).
yading@10 267 *
yading@10 268 * @param NAME Either 1 or 3
yading@10 269 * @see MSPEL_FILTER13_CORE for information on A1->A4
yading@10 270 */
yading@10 271 #define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
yading@10 272 static void \
yading@10 273 vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
yading@10 274 x86_reg src_stride, \
yading@10 275 int rnd, int64_t shift) \
yading@10 276 { \
yading@10 277 int h = 8; \
yading@10 278 src -= src_stride; \
yading@10 279 __asm__ volatile( \
yading@10 280 LOAD_ROUNDER_MMX("%5") \
yading@10 281 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
yading@10 282 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
yading@10 283 ".p2align 3 \n\t" \
yading@10 284 "1: \n\t" \
yading@10 285 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
yading@10 286 NORMALIZE_MMX("%6") \
yading@10 287 TRANSFER_DONT_PACK(OP_PUT) \
yading@10 288 /* Last 3 (in fact 4) bytes on the line */ \
yading@10 289 "movd 8+"A1", %%mm1 \n\t" \
yading@10 290 DO_UNPACK("%%mm1") \
yading@10 291 "movq %%mm1, %%mm3 \n\t" \
yading@10 292 "paddw %%mm1, %%mm1 \n\t" \
yading@10 293 "paddw %%mm3, %%mm1 \n\t" /* 3* */ \
yading@10 294 "movd 8+"A2", %%mm3 \n\t" \
yading@10 295 DO_UNPACK("%%mm3") \
yading@10 296 "pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
yading@10 297 "psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
yading@10 298 "movd 8+"A3", %%mm1 \n\t" \
yading@10 299 DO_UNPACK("%%mm1") \
yading@10 300 "pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
yading@10 301 "paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
yading@10 302 "movd 8+"A4", %%mm1 \n\t" \
yading@10 303 DO_UNPACK("%%mm1") \
yading@10 304 "psllw $2, %%mm1 \n\t" /* 4* */ \
yading@10 305 "psubw %%mm1, %%mm3 \n\t" \
yading@10 306 "paddw %%mm7, %%mm3 \n\t" \
yading@10 307 "psraw %6, %%mm3 \n\t" \
yading@10 308 "movq %%mm3, 16(%2) \n\t" \
yading@10 309 "add %3, %1 \n\t" \
yading@10 310 "add $24, %2 \n\t" \
yading@10 311 "decl %0 \n\t" \
yading@10 312 "jnz 1b \n\t" \
yading@10 313 : "+r"(h), "+r" (src), "+r" (dst) \
yading@10 314 : "r"(src_stride), "r"(3*src_stride), \
yading@10 315 "m"(rnd), "m"(shift) \
yading@10 316 : "memory" \
yading@10 317 ); \
yading@10 318 }
yading@10 319
yading@10 320 /**
yading@10 321 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
yading@10 322 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
yading@10 323 *
yading@10 324 * @param NAME Either 1 or 3
yading@10 325 * @see MSPEL_FILTER13_CORE for information on A1->A4
yading@10 326 */
yading@10 327 #define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
yading@10 328 static void \
yading@10 329 OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
yading@10 330 const int16_t *src, int rnd) \
yading@10 331 { \
yading@10 332 int h = 8; \
yading@10 333 src -= 1; \
yading@10 334 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
yading@10 335 __asm__ volatile( \
yading@10 336 LOAD_ROUNDER_MMX("%4") \
yading@10 337 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
yading@10 338 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
yading@10 339 ".p2align 3 \n\t" \
yading@10 340 "1: \n\t" \
yading@10 341 MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
yading@10 342 NORMALIZE_MMX("$7") \
yading@10 343 /* Remove bias */ \
yading@10 344 "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
yading@10 345 "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
yading@10 346 TRANSFER_DO_PACK(OP) \
yading@10 347 "add $24, %1 \n\t" \
yading@10 348 "add %3, %2 \n\t" \
yading@10 349 "decl %0 \n\t" \
yading@10 350 "jnz 1b \n\t" \
yading@10 351 : "+r"(h), "+r" (src), "+r" (dst) \
yading@10 352 : "r"(stride), "m"(rnd) \
yading@10 353 : "memory" \
yading@10 354 ); \
yading@10 355 }
yading@10 356
yading@10 357 /**
yading@10 358 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
yading@10 359 * Here, offset=src_stride. Parameters passed A1 to A4 must use
yading@10 360 * %3 (offset) and %4 (3*offset).
yading@10 361 *
yading@10 362 * @param NAME Either 1 or 3
yading@10 363 * @see MSPEL_FILTER13_CORE for information on A1->A4
yading@10 364 */
yading@10 365 #define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
yading@10 366 static void \
yading@10 367 OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
yading@10 368 x86_reg stride, int rnd, x86_reg offset) \
yading@10 369 { \
yading@10 370 int h = 8; \
yading@10 371 src -= offset; \
yading@10 372 rnd = 32-rnd; \
yading@10 373 __asm__ volatile ( \
yading@10 374 LOAD_ROUNDER_MMX("%6") \
yading@10 375 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
yading@10 376 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
yading@10 377 ".p2align 3 \n\t" \
yading@10 378 "1: \n\t" \
yading@10 379 MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
yading@10 380 NORMALIZE_MMX("$6") \
yading@10 381 TRANSFER_DO_PACK(OP) \
yading@10 382 "add %5, %1 \n\t" \
yading@10 383 "add %5, %2 \n\t" \
yading@10 384 "decl %0 \n\t" \
yading@10 385 "jnz 1b \n\t" \
yading@10 386 : "+r"(h), "+r" (src), "+r" (dst) \
yading@10 387 : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
yading@10 388 : "memory" \
yading@10 389 ); \
yading@10 390 }
yading@10 391
yading@10 392 /** 1/4 shift bicubic interpolation */
yading@10 393 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
yading@10 394 MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
yading@10 395 MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
yading@10 396 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
yading@10 397 MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
yading@10 398
yading@10 399 /** 3/4 shift bicubic interpolation */
yading@10 400 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
yading@10 401 MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
yading@10 402 MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
yading@10 403 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
yading@10 404 MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
yading@10 405
yading@10 406 typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
yading@10 407 typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
yading@10 408 typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
yading@10 409
yading@10 410 /**
yading@10 411 * Interpolate fractional pel values by applying proper vertical then
yading@10 412 * horizontal filter.
yading@10 413 *
yading@10 414 * @param dst Destination buffer for interpolated pels.
yading@10 415 * @param src Source buffer.
yading@10 416 * @param stride Stride for both src and dst buffers.
yading@10 417 * @param hmode Horizontal filter (expressed in quarter pixels shift).
yading@10 418 * @param hmode Vertical filter.
yading@10 419 * @param rnd Rounding bias.
yading@10 420 */
yading@10 421 #define VC1_MSPEL_MC(OP)\
yading@10 422 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
yading@10 423 int hmode, int vmode, int rnd)\
yading@10 424 {\
yading@10 425 static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
yading@10 426 { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
yading@10 427 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
yading@10 428 { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
yading@10 429 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
yading@10 430 { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
yading@10 431 \
yading@10 432 __asm__ volatile(\
yading@10 433 "pxor %%mm0, %%mm0 \n\t"\
yading@10 434 ::: "memory"\
yading@10 435 );\
yading@10 436 \
yading@10 437 if (vmode) { /* Vertical filter to apply */\
yading@10 438 if (hmode) { /* Horizontal filter to apply, output to tmp */\
yading@10 439 static const int shift_value[] = { 0, 5, 1, 5 };\
yading@10 440 int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
yading@10 441 int r;\
yading@10 442 DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
yading@10 443 \
yading@10 444 r = (1<<(shift-1)) + rnd-1;\
yading@10 445 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
yading@10 446 \
yading@10 447 vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
yading@10 448 return;\
yading@10 449 }\
yading@10 450 else { /* No horizontal filter, output 8 lines to dst */\
yading@10 451 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
yading@10 452 return;\
yading@10 453 }\
yading@10 454 }\
yading@10 455 \
yading@10 456 /* Horizontal mode with no vertical mode */\
yading@10 457 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
yading@10 458 }
yading@10 459
yading@10 460 VC1_MSPEL_MC(put_)
yading@10 461 VC1_MSPEL_MC(avg_)
yading@10 462
yading@10 463 /** Macro to ease bicubic filter interpolation functions declarations */
yading@10 464 #define DECLARE_FUNCTION(a, b) \
yading@10 465 static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, \
yading@10 466 const uint8_t *src, \
yading@10 467 ptrdiff_t stride, \
yading@10 468 int rnd) \
yading@10 469 { \
yading@10 470 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
yading@10 471 }\
yading@10 472 static void avg_vc1_mspel_mc ## a ## b ## _mmxext(uint8_t *dst, \
yading@10 473 const uint8_t *src, \
yading@10 474 ptrdiff_t stride, \
yading@10 475 int rnd) \
yading@10 476 { \
yading@10 477 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
yading@10 478 }
yading@10 479
yading@10 480 DECLARE_FUNCTION(0, 1)
yading@10 481 DECLARE_FUNCTION(0, 2)
yading@10 482 DECLARE_FUNCTION(0, 3)
yading@10 483
yading@10 484 DECLARE_FUNCTION(1, 0)
yading@10 485 DECLARE_FUNCTION(1, 1)
yading@10 486 DECLARE_FUNCTION(1, 2)
yading@10 487 DECLARE_FUNCTION(1, 3)
yading@10 488
yading@10 489 DECLARE_FUNCTION(2, 0)
yading@10 490 DECLARE_FUNCTION(2, 1)
yading@10 491 DECLARE_FUNCTION(2, 2)
yading@10 492 DECLARE_FUNCTION(2, 3)
yading@10 493
yading@10 494 DECLARE_FUNCTION(3, 0)
yading@10 495 DECLARE_FUNCTION(3, 1)
yading@10 496 DECLARE_FUNCTION(3, 2)
yading@10 497 DECLARE_FUNCTION(3, 3)
yading@10 498
yading@10 499 static void vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest, int linesize,
yading@10 500 int16_t *block)
yading@10 501 {
yading@10 502 int dc = block[0];
yading@10 503 dc = (17 * dc + 4) >> 3;
yading@10 504 dc = (17 * dc + 64) >> 7;
yading@10 505 __asm__ volatile(
yading@10 506 "movd %0, %%mm0 \n\t"
yading@10 507 "pshufw $0, %%mm0, %%mm0 \n\t"
yading@10 508 "pxor %%mm1, %%mm1 \n\t"
yading@10 509 "psubw %%mm0, %%mm1 \n\t"
yading@10 510 "packuswb %%mm0, %%mm0 \n\t"
yading@10 511 "packuswb %%mm1, %%mm1 \n\t"
yading@10 512 ::"r"(dc)
yading@10 513 );
yading@10 514 __asm__ volatile(
yading@10 515 "movd %0, %%mm2 \n\t"
yading@10 516 "movd %1, %%mm3 \n\t"
yading@10 517 "movd %2, %%mm4 \n\t"
yading@10 518 "movd %3, %%mm5 \n\t"
yading@10 519 "paddusb %%mm0, %%mm2 \n\t"
yading@10 520 "paddusb %%mm0, %%mm3 \n\t"
yading@10 521 "paddusb %%mm0, %%mm4 \n\t"
yading@10 522 "paddusb %%mm0, %%mm5 \n\t"
yading@10 523 "psubusb %%mm1, %%mm2 \n\t"
yading@10 524 "psubusb %%mm1, %%mm3 \n\t"
yading@10 525 "psubusb %%mm1, %%mm4 \n\t"
yading@10 526 "psubusb %%mm1, %%mm5 \n\t"
yading@10 527 "movd %%mm2, %0 \n\t"
yading@10 528 "movd %%mm3, %1 \n\t"
yading@10 529 "movd %%mm4, %2 \n\t"
yading@10 530 "movd %%mm5, %3 \n\t"
yading@10 531 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 532 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 533 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 534 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 535 );
yading@10 536 }
yading@10 537
yading@10 538 static void vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest, int linesize,
yading@10 539 int16_t *block)
yading@10 540 {
yading@10 541 int dc = block[0];
yading@10 542 dc = (17 * dc + 4) >> 3;
yading@10 543 dc = (12 * dc + 64) >> 7;
yading@10 544 __asm__ volatile(
yading@10 545 "movd %0, %%mm0 \n\t"
yading@10 546 "pshufw $0, %%mm0, %%mm0 \n\t"
yading@10 547 "pxor %%mm1, %%mm1 \n\t"
yading@10 548 "psubw %%mm0, %%mm1 \n\t"
yading@10 549 "packuswb %%mm0, %%mm0 \n\t"
yading@10 550 "packuswb %%mm1, %%mm1 \n\t"
yading@10 551 ::"r"(dc)
yading@10 552 );
yading@10 553 __asm__ volatile(
yading@10 554 "movd %0, %%mm2 \n\t"
yading@10 555 "movd %1, %%mm3 \n\t"
yading@10 556 "movd %2, %%mm4 \n\t"
yading@10 557 "movd %3, %%mm5 \n\t"
yading@10 558 "paddusb %%mm0, %%mm2 \n\t"
yading@10 559 "paddusb %%mm0, %%mm3 \n\t"
yading@10 560 "paddusb %%mm0, %%mm4 \n\t"
yading@10 561 "paddusb %%mm0, %%mm5 \n\t"
yading@10 562 "psubusb %%mm1, %%mm2 \n\t"
yading@10 563 "psubusb %%mm1, %%mm3 \n\t"
yading@10 564 "psubusb %%mm1, %%mm4 \n\t"
yading@10 565 "psubusb %%mm1, %%mm5 \n\t"
yading@10 566 "movd %%mm2, %0 \n\t"
yading@10 567 "movd %%mm3, %1 \n\t"
yading@10 568 "movd %%mm4, %2 \n\t"
yading@10 569 "movd %%mm5, %3 \n\t"
yading@10 570 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 571 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 572 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 573 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 574 );
yading@10 575 dest += 4*linesize;
yading@10 576 __asm__ volatile(
yading@10 577 "movd %0, %%mm2 \n\t"
yading@10 578 "movd %1, %%mm3 \n\t"
yading@10 579 "movd %2, %%mm4 \n\t"
yading@10 580 "movd %3, %%mm5 \n\t"
yading@10 581 "paddusb %%mm0, %%mm2 \n\t"
yading@10 582 "paddusb %%mm0, %%mm3 \n\t"
yading@10 583 "paddusb %%mm0, %%mm4 \n\t"
yading@10 584 "paddusb %%mm0, %%mm5 \n\t"
yading@10 585 "psubusb %%mm1, %%mm2 \n\t"
yading@10 586 "psubusb %%mm1, %%mm3 \n\t"
yading@10 587 "psubusb %%mm1, %%mm4 \n\t"
yading@10 588 "psubusb %%mm1, %%mm5 \n\t"
yading@10 589 "movd %%mm2, %0 \n\t"
yading@10 590 "movd %%mm3, %1 \n\t"
yading@10 591 "movd %%mm4, %2 \n\t"
yading@10 592 "movd %%mm5, %3 \n\t"
yading@10 593 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 594 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 595 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 596 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 597 );
yading@10 598 }
yading@10 599
yading@10 600 static void vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest, int linesize,
yading@10 601 int16_t *block)
yading@10 602 {
yading@10 603 int dc = block[0];
yading@10 604 dc = ( 3 * dc + 1) >> 1;
yading@10 605 dc = (17 * dc + 64) >> 7;
yading@10 606 __asm__ volatile(
yading@10 607 "movd %0, %%mm0 \n\t"
yading@10 608 "pshufw $0, %%mm0, %%mm0 \n\t"
yading@10 609 "pxor %%mm1, %%mm1 \n\t"
yading@10 610 "psubw %%mm0, %%mm1 \n\t"
yading@10 611 "packuswb %%mm0, %%mm0 \n\t"
yading@10 612 "packuswb %%mm1, %%mm1 \n\t"
yading@10 613 ::"r"(dc)
yading@10 614 );
yading@10 615 __asm__ volatile(
yading@10 616 "movq %0, %%mm2 \n\t"
yading@10 617 "movq %1, %%mm3 \n\t"
yading@10 618 "movq %2, %%mm4 \n\t"
yading@10 619 "movq %3, %%mm5 \n\t"
yading@10 620 "paddusb %%mm0, %%mm2 \n\t"
yading@10 621 "paddusb %%mm0, %%mm3 \n\t"
yading@10 622 "paddusb %%mm0, %%mm4 \n\t"
yading@10 623 "paddusb %%mm0, %%mm5 \n\t"
yading@10 624 "psubusb %%mm1, %%mm2 \n\t"
yading@10 625 "psubusb %%mm1, %%mm3 \n\t"
yading@10 626 "psubusb %%mm1, %%mm4 \n\t"
yading@10 627 "psubusb %%mm1, %%mm5 \n\t"
yading@10 628 "movq %%mm2, %0 \n\t"
yading@10 629 "movq %%mm3, %1 \n\t"
yading@10 630 "movq %%mm4, %2 \n\t"
yading@10 631 "movq %%mm5, %3 \n\t"
yading@10 632 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 633 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 634 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 635 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 636 );
yading@10 637 }
yading@10 638
yading@10 639 static void vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest, int linesize,
yading@10 640 int16_t *block)
yading@10 641 {
yading@10 642 int dc = block[0];
yading@10 643 dc = (3 * dc + 1) >> 1;
yading@10 644 dc = (3 * dc + 16) >> 5;
yading@10 645 __asm__ volatile(
yading@10 646 "movd %0, %%mm0 \n\t"
yading@10 647 "pshufw $0, %%mm0, %%mm0 \n\t"
yading@10 648 "pxor %%mm1, %%mm1 \n\t"
yading@10 649 "psubw %%mm0, %%mm1 \n\t"
yading@10 650 "packuswb %%mm0, %%mm0 \n\t"
yading@10 651 "packuswb %%mm1, %%mm1 \n\t"
yading@10 652 ::"r"(dc)
yading@10 653 );
yading@10 654 __asm__ volatile(
yading@10 655 "movq %0, %%mm2 \n\t"
yading@10 656 "movq %1, %%mm3 \n\t"
yading@10 657 "movq %2, %%mm4 \n\t"
yading@10 658 "movq %3, %%mm5 \n\t"
yading@10 659 "paddusb %%mm0, %%mm2 \n\t"
yading@10 660 "paddusb %%mm0, %%mm3 \n\t"
yading@10 661 "paddusb %%mm0, %%mm4 \n\t"
yading@10 662 "paddusb %%mm0, %%mm5 \n\t"
yading@10 663 "psubusb %%mm1, %%mm2 \n\t"
yading@10 664 "psubusb %%mm1, %%mm3 \n\t"
yading@10 665 "psubusb %%mm1, %%mm4 \n\t"
yading@10 666 "psubusb %%mm1, %%mm5 \n\t"
yading@10 667 "movq %%mm2, %0 \n\t"
yading@10 668 "movq %%mm3, %1 \n\t"
yading@10 669 "movq %%mm4, %2 \n\t"
yading@10 670 "movq %%mm5, %3 \n\t"
yading@10 671 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 672 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 673 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 674 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 675 );
yading@10 676 dest += 4*linesize;
yading@10 677 __asm__ volatile(
yading@10 678 "movq %0, %%mm2 \n\t"
yading@10 679 "movq %1, %%mm3 \n\t"
yading@10 680 "movq %2, %%mm4 \n\t"
yading@10 681 "movq %3, %%mm5 \n\t"
yading@10 682 "paddusb %%mm0, %%mm2 \n\t"
yading@10 683 "paddusb %%mm0, %%mm3 \n\t"
yading@10 684 "paddusb %%mm0, %%mm4 \n\t"
yading@10 685 "paddusb %%mm0, %%mm5 \n\t"
yading@10 686 "psubusb %%mm1, %%mm2 \n\t"
yading@10 687 "psubusb %%mm1, %%mm3 \n\t"
yading@10 688 "psubusb %%mm1, %%mm4 \n\t"
yading@10 689 "psubusb %%mm1, %%mm5 \n\t"
yading@10 690 "movq %%mm2, %0 \n\t"
yading@10 691 "movq %%mm3, %1 \n\t"
yading@10 692 "movq %%mm4, %2 \n\t"
yading@10 693 "movq %%mm5, %3 \n\t"
yading@10 694 :"+m"(*(uint32_t*)(dest+0*linesize)),
yading@10 695 "+m"(*(uint32_t*)(dest+1*linesize)),
yading@10 696 "+m"(*(uint32_t*)(dest+2*linesize)),
yading@10 697 "+m"(*(uint32_t*)(dest+3*linesize))
yading@10 698 );
yading@10 699 }
yading@10 700
yading@10 701 av_cold void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
yading@10 702 {
yading@10 703 dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
yading@10 704 dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
yading@10 705 dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
yading@10 706 dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
yading@10 707
yading@10 708 dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
yading@10 709 dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
yading@10 710 dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
yading@10 711 dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
yading@10 712
yading@10 713 dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
yading@10 714 dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
yading@10 715 dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
yading@10 716 dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
yading@10 717
yading@10 718 dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
yading@10 719 dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
yading@10 720 dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
yading@10 721 dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
yading@10 722 }
yading@10 723
yading@10 724 av_cold void ff_vc1dsp_init_mmxext(VC1DSPContext *dsp)
yading@10 725 {
yading@10 726 dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmxext;
yading@10 727 dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmxext;
yading@10 728 dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmxext;
yading@10 729
yading@10 730 dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmxext;
yading@10 731 dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmxext;
yading@10 732 dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmxext;
yading@10 733 dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmxext;
yading@10 734
yading@10 735 dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmxext;
yading@10 736 dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmxext;
yading@10 737 dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmxext;
yading@10 738 dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmxext;
yading@10 739
yading@10 740 dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmxext;
yading@10 741 dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmxext;
yading@10 742 dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmxext;
yading@10 743 dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmxext;
yading@10 744
yading@10 745 dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmxext;
yading@10 746 dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmxext;
yading@10 747 dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmxext;
yading@10 748 dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmxext;
yading@10 749 }
yading@10 750 #endif /* HAVE_INLINE_ASM */