annotate ffmpeg/libavcodec/x86/dsputilenc_mmx.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * MMX optimized DSP utils
yading@10 3 * Copyright (c) 2000, 2001 Fabrice Bellard
yading@10 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
yading@10 5 *
yading@10 6 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
yading@10 7 *
yading@10 8 * This file is part of FFmpeg.
yading@10 9 *
yading@10 10 * FFmpeg is free software; you can redistribute it and/or
yading@10 11 * modify it under the terms of the GNU Lesser General Public
yading@10 12 * License as published by the Free Software Foundation; either
yading@10 13 * version 2.1 of the License, or (at your option) any later version.
yading@10 14 *
yading@10 15 * FFmpeg is distributed in the hope that it will be useful,
yading@10 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 18 * Lesser General Public License for more details.
yading@10 19 *
yading@10 20 * You should have received a copy of the GNU Lesser General Public
yading@10 21 * License along with FFmpeg; if not, write to the Free Software
yading@10 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 23 */
yading@10 24
yading@10 25 #include "libavutil/attributes.h"
yading@10 26 #include "libavutil/cpu.h"
yading@10 27 #include "libavutil/x86/asm.h"
yading@10 28 #include "libavutil/x86/cpu.h"
yading@10 29 #include "libavcodec/dct.h"
yading@10 30 #include "libavcodec/dsputil.h"
yading@10 31 #include "libavcodec/mpegvideo.h"
yading@10 32 #include "libavcodec/mathops.h"
yading@10 33 #include "dsputil_mmx.h"
yading@10 34
yading@10 35 void ff_get_pixels_mmx(int16_t *block, const uint8_t *pixels, int line_size);
yading@10 36 void ff_get_pixels_sse2(int16_t *block, const uint8_t *pixels, int line_size);
yading@10 37 void ff_diff_pixels_mmx(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride);
yading@10 38 int ff_pix_sum16_mmx(uint8_t * pix, int line_size);
yading@10 39 int ff_pix_norm1_mmx(uint8_t *pix, int line_size);
yading@10 40
yading@10 41 #if HAVE_INLINE_ASM
yading@10 42
yading@10 43 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
yading@10 44 int tmp;
yading@10 45 __asm__ volatile (
yading@10 46 "movl %4,%%ecx\n"
yading@10 47 "shr $1,%%ecx\n"
yading@10 48 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
yading@10 49 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
yading@10 50 "1:\n"
yading@10 51 "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
yading@10 52 "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
yading@10 53 "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
yading@10 54 "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
yading@10 55
yading@10 56 /* todo: mm1-mm2, mm3-mm4 */
yading@10 57 /* algo: subtract mm1 from mm2 with saturation and vice versa */
yading@10 58 /* OR the results to get absolute difference */
yading@10 59 "movq %%mm1,%%mm5\n"
yading@10 60 "movq %%mm3,%%mm6\n"
yading@10 61 "psubusb %%mm2,%%mm1\n"
yading@10 62 "psubusb %%mm4,%%mm3\n"
yading@10 63 "psubusb %%mm5,%%mm2\n"
yading@10 64 "psubusb %%mm6,%%mm4\n"
yading@10 65
yading@10 66 "por %%mm1,%%mm2\n"
yading@10 67 "por %%mm3,%%mm4\n"
yading@10 68
yading@10 69 /* now convert to 16-bit vectors so we can square them */
yading@10 70 "movq %%mm2,%%mm1\n"
yading@10 71 "movq %%mm4,%%mm3\n"
yading@10 72
yading@10 73 "punpckhbw %%mm0,%%mm2\n"
yading@10 74 "punpckhbw %%mm0,%%mm4\n"
yading@10 75 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
yading@10 76 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
yading@10 77
yading@10 78 "pmaddwd %%mm2,%%mm2\n"
yading@10 79 "pmaddwd %%mm4,%%mm4\n"
yading@10 80 "pmaddwd %%mm1,%%mm1\n"
yading@10 81 "pmaddwd %%mm3,%%mm3\n"
yading@10 82
yading@10 83 "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
yading@10 84 "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
yading@10 85
yading@10 86 "paddd %%mm2,%%mm1\n"
yading@10 87 "paddd %%mm4,%%mm3\n"
yading@10 88 "paddd %%mm1,%%mm7\n"
yading@10 89 "paddd %%mm3,%%mm7\n"
yading@10 90
yading@10 91 "decl %%ecx\n"
yading@10 92 "jnz 1b\n"
yading@10 93
yading@10 94 "movq %%mm7,%%mm1\n"
yading@10 95 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
yading@10 96 "paddd %%mm7,%%mm1\n"
yading@10 97 "movd %%mm1,%2\n"
yading@10 98 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
yading@10 99 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 100 : "%ecx");
yading@10 101 return tmp;
yading@10 102 }
yading@10 103
yading@10 104 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
yading@10 105 int tmp;
yading@10 106 __asm__ volatile (
yading@10 107 "movl %4,%%ecx\n"
yading@10 108 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */
yading@10 109 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
yading@10 110 "1:\n"
yading@10 111 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
yading@10 112 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
yading@10 113 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
yading@10 114 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
yading@10 115
yading@10 116 /* todo: mm1-mm2, mm3-mm4 */
yading@10 117 /* algo: subtract mm1 from mm2 with saturation and vice versa */
yading@10 118 /* OR the results to get absolute difference */
yading@10 119 "movq %%mm1,%%mm5\n"
yading@10 120 "movq %%mm3,%%mm6\n"
yading@10 121 "psubusb %%mm2,%%mm1\n"
yading@10 122 "psubusb %%mm4,%%mm3\n"
yading@10 123 "psubusb %%mm5,%%mm2\n"
yading@10 124 "psubusb %%mm6,%%mm4\n"
yading@10 125
yading@10 126 "por %%mm1,%%mm2\n"
yading@10 127 "por %%mm3,%%mm4\n"
yading@10 128
yading@10 129 /* now convert to 16-bit vectors so we can square them */
yading@10 130 "movq %%mm2,%%mm1\n"
yading@10 131 "movq %%mm4,%%mm3\n"
yading@10 132
yading@10 133 "punpckhbw %%mm0,%%mm2\n"
yading@10 134 "punpckhbw %%mm0,%%mm4\n"
yading@10 135 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
yading@10 136 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
yading@10 137
yading@10 138 "pmaddwd %%mm2,%%mm2\n"
yading@10 139 "pmaddwd %%mm4,%%mm4\n"
yading@10 140 "pmaddwd %%mm1,%%mm1\n"
yading@10 141 "pmaddwd %%mm3,%%mm3\n"
yading@10 142
yading@10 143 "add %3,%0\n"
yading@10 144 "add %3,%1\n"
yading@10 145
yading@10 146 "paddd %%mm2,%%mm1\n"
yading@10 147 "paddd %%mm4,%%mm3\n"
yading@10 148 "paddd %%mm1,%%mm7\n"
yading@10 149 "paddd %%mm3,%%mm7\n"
yading@10 150
yading@10 151 "decl %%ecx\n"
yading@10 152 "jnz 1b\n"
yading@10 153
yading@10 154 "movq %%mm7,%%mm1\n"
yading@10 155 "psrlq $32, %%mm7\n" /* shift hi dword to lo */
yading@10 156 "paddd %%mm7,%%mm1\n"
yading@10 157 "movd %%mm1,%2\n"
yading@10 158 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
yading@10 159 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 160 : "%ecx");
yading@10 161 return tmp;
yading@10 162 }
yading@10 163
yading@10 164 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
yading@10 165 int tmp;
yading@10 166 __asm__ volatile (
yading@10 167 "movl %3,%%ecx\n"
yading@10 168 "pxor %%mm7,%%mm7\n"
yading@10 169 "pxor %%mm6,%%mm6\n"
yading@10 170
yading@10 171 "movq (%0),%%mm0\n"
yading@10 172 "movq %%mm0, %%mm1\n"
yading@10 173 "psllq $8, %%mm0\n"
yading@10 174 "psrlq $8, %%mm1\n"
yading@10 175 "psrlq $8, %%mm0\n"
yading@10 176 "movq %%mm0, %%mm2\n"
yading@10 177 "movq %%mm1, %%mm3\n"
yading@10 178 "punpcklbw %%mm7,%%mm0\n"
yading@10 179 "punpcklbw %%mm7,%%mm1\n"
yading@10 180 "punpckhbw %%mm7,%%mm2\n"
yading@10 181 "punpckhbw %%mm7,%%mm3\n"
yading@10 182 "psubw %%mm1, %%mm0\n"
yading@10 183 "psubw %%mm3, %%mm2\n"
yading@10 184
yading@10 185 "add %2,%0\n"
yading@10 186
yading@10 187 "movq (%0),%%mm4\n"
yading@10 188 "movq %%mm4, %%mm1\n"
yading@10 189 "psllq $8, %%mm4\n"
yading@10 190 "psrlq $8, %%mm1\n"
yading@10 191 "psrlq $8, %%mm4\n"
yading@10 192 "movq %%mm4, %%mm5\n"
yading@10 193 "movq %%mm1, %%mm3\n"
yading@10 194 "punpcklbw %%mm7,%%mm4\n"
yading@10 195 "punpcklbw %%mm7,%%mm1\n"
yading@10 196 "punpckhbw %%mm7,%%mm5\n"
yading@10 197 "punpckhbw %%mm7,%%mm3\n"
yading@10 198 "psubw %%mm1, %%mm4\n"
yading@10 199 "psubw %%mm3, %%mm5\n"
yading@10 200 "psubw %%mm4, %%mm0\n"
yading@10 201 "psubw %%mm5, %%mm2\n"
yading@10 202 "pxor %%mm3, %%mm3\n"
yading@10 203 "pxor %%mm1, %%mm1\n"
yading@10 204 "pcmpgtw %%mm0, %%mm3\n\t"
yading@10 205 "pcmpgtw %%mm2, %%mm1\n\t"
yading@10 206 "pxor %%mm3, %%mm0\n"
yading@10 207 "pxor %%mm1, %%mm2\n"
yading@10 208 "psubw %%mm3, %%mm0\n"
yading@10 209 "psubw %%mm1, %%mm2\n"
yading@10 210 "paddw %%mm0, %%mm2\n"
yading@10 211 "paddw %%mm2, %%mm6\n"
yading@10 212
yading@10 213 "add %2,%0\n"
yading@10 214 "1:\n"
yading@10 215
yading@10 216 "movq (%0),%%mm0\n"
yading@10 217 "movq %%mm0, %%mm1\n"
yading@10 218 "psllq $8, %%mm0\n"
yading@10 219 "psrlq $8, %%mm1\n"
yading@10 220 "psrlq $8, %%mm0\n"
yading@10 221 "movq %%mm0, %%mm2\n"
yading@10 222 "movq %%mm1, %%mm3\n"
yading@10 223 "punpcklbw %%mm7,%%mm0\n"
yading@10 224 "punpcklbw %%mm7,%%mm1\n"
yading@10 225 "punpckhbw %%mm7,%%mm2\n"
yading@10 226 "punpckhbw %%mm7,%%mm3\n"
yading@10 227 "psubw %%mm1, %%mm0\n"
yading@10 228 "psubw %%mm3, %%mm2\n"
yading@10 229 "psubw %%mm0, %%mm4\n"
yading@10 230 "psubw %%mm2, %%mm5\n"
yading@10 231 "pxor %%mm3, %%mm3\n"
yading@10 232 "pxor %%mm1, %%mm1\n"
yading@10 233 "pcmpgtw %%mm4, %%mm3\n\t"
yading@10 234 "pcmpgtw %%mm5, %%mm1\n\t"
yading@10 235 "pxor %%mm3, %%mm4\n"
yading@10 236 "pxor %%mm1, %%mm5\n"
yading@10 237 "psubw %%mm3, %%mm4\n"
yading@10 238 "psubw %%mm1, %%mm5\n"
yading@10 239 "paddw %%mm4, %%mm5\n"
yading@10 240 "paddw %%mm5, %%mm6\n"
yading@10 241
yading@10 242 "add %2,%0\n"
yading@10 243
yading@10 244 "movq (%0),%%mm4\n"
yading@10 245 "movq %%mm4, %%mm1\n"
yading@10 246 "psllq $8, %%mm4\n"
yading@10 247 "psrlq $8, %%mm1\n"
yading@10 248 "psrlq $8, %%mm4\n"
yading@10 249 "movq %%mm4, %%mm5\n"
yading@10 250 "movq %%mm1, %%mm3\n"
yading@10 251 "punpcklbw %%mm7,%%mm4\n"
yading@10 252 "punpcklbw %%mm7,%%mm1\n"
yading@10 253 "punpckhbw %%mm7,%%mm5\n"
yading@10 254 "punpckhbw %%mm7,%%mm3\n"
yading@10 255 "psubw %%mm1, %%mm4\n"
yading@10 256 "psubw %%mm3, %%mm5\n"
yading@10 257 "psubw %%mm4, %%mm0\n"
yading@10 258 "psubw %%mm5, %%mm2\n"
yading@10 259 "pxor %%mm3, %%mm3\n"
yading@10 260 "pxor %%mm1, %%mm1\n"
yading@10 261 "pcmpgtw %%mm0, %%mm3\n\t"
yading@10 262 "pcmpgtw %%mm2, %%mm1\n\t"
yading@10 263 "pxor %%mm3, %%mm0\n"
yading@10 264 "pxor %%mm1, %%mm2\n"
yading@10 265 "psubw %%mm3, %%mm0\n"
yading@10 266 "psubw %%mm1, %%mm2\n"
yading@10 267 "paddw %%mm0, %%mm2\n"
yading@10 268 "paddw %%mm2, %%mm6\n"
yading@10 269
yading@10 270 "add %2,%0\n"
yading@10 271 "subl $2, %%ecx\n"
yading@10 272 " jnz 1b\n"
yading@10 273
yading@10 274 "movq %%mm6, %%mm0\n"
yading@10 275 "punpcklwd %%mm7,%%mm0\n"
yading@10 276 "punpckhwd %%mm7,%%mm6\n"
yading@10 277 "paddd %%mm0, %%mm6\n"
yading@10 278
yading@10 279 "movq %%mm6,%%mm0\n"
yading@10 280 "psrlq $32, %%mm6\n"
yading@10 281 "paddd %%mm6,%%mm0\n"
yading@10 282 "movd %%mm0,%1\n"
yading@10 283 : "+r" (pix1), "=r"(tmp)
yading@10 284 : "r" ((x86_reg)line_size) , "g" (h-2)
yading@10 285 : "%ecx");
yading@10 286 return tmp;
yading@10 287 }
yading@10 288
yading@10 289 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
yading@10 290 int tmp;
yading@10 291 uint8_t * pix= pix1;
yading@10 292 __asm__ volatile (
yading@10 293 "movl %3,%%ecx\n"
yading@10 294 "pxor %%mm7,%%mm7\n"
yading@10 295 "pxor %%mm6,%%mm6\n"
yading@10 296
yading@10 297 "movq (%0),%%mm0\n"
yading@10 298 "movq 1(%0),%%mm1\n"
yading@10 299 "movq %%mm0, %%mm2\n"
yading@10 300 "movq %%mm1, %%mm3\n"
yading@10 301 "punpcklbw %%mm7,%%mm0\n"
yading@10 302 "punpcklbw %%mm7,%%mm1\n"
yading@10 303 "punpckhbw %%mm7,%%mm2\n"
yading@10 304 "punpckhbw %%mm7,%%mm3\n"
yading@10 305 "psubw %%mm1, %%mm0\n"
yading@10 306 "psubw %%mm3, %%mm2\n"
yading@10 307
yading@10 308 "add %2,%0\n"
yading@10 309
yading@10 310 "movq (%0),%%mm4\n"
yading@10 311 "movq 1(%0),%%mm1\n"
yading@10 312 "movq %%mm4, %%mm5\n"
yading@10 313 "movq %%mm1, %%mm3\n"
yading@10 314 "punpcklbw %%mm7,%%mm4\n"
yading@10 315 "punpcklbw %%mm7,%%mm1\n"
yading@10 316 "punpckhbw %%mm7,%%mm5\n"
yading@10 317 "punpckhbw %%mm7,%%mm3\n"
yading@10 318 "psubw %%mm1, %%mm4\n"
yading@10 319 "psubw %%mm3, %%mm5\n"
yading@10 320 "psubw %%mm4, %%mm0\n"
yading@10 321 "psubw %%mm5, %%mm2\n"
yading@10 322 "pxor %%mm3, %%mm3\n"
yading@10 323 "pxor %%mm1, %%mm1\n"
yading@10 324 "pcmpgtw %%mm0, %%mm3\n\t"
yading@10 325 "pcmpgtw %%mm2, %%mm1\n\t"
yading@10 326 "pxor %%mm3, %%mm0\n"
yading@10 327 "pxor %%mm1, %%mm2\n"
yading@10 328 "psubw %%mm3, %%mm0\n"
yading@10 329 "psubw %%mm1, %%mm2\n"
yading@10 330 "paddw %%mm0, %%mm2\n"
yading@10 331 "paddw %%mm2, %%mm6\n"
yading@10 332
yading@10 333 "add %2,%0\n"
yading@10 334 "1:\n"
yading@10 335
yading@10 336 "movq (%0),%%mm0\n"
yading@10 337 "movq 1(%0),%%mm1\n"
yading@10 338 "movq %%mm0, %%mm2\n"
yading@10 339 "movq %%mm1, %%mm3\n"
yading@10 340 "punpcklbw %%mm7,%%mm0\n"
yading@10 341 "punpcklbw %%mm7,%%mm1\n"
yading@10 342 "punpckhbw %%mm7,%%mm2\n"
yading@10 343 "punpckhbw %%mm7,%%mm3\n"
yading@10 344 "psubw %%mm1, %%mm0\n"
yading@10 345 "psubw %%mm3, %%mm2\n"
yading@10 346 "psubw %%mm0, %%mm4\n"
yading@10 347 "psubw %%mm2, %%mm5\n"
yading@10 348 "pxor %%mm3, %%mm3\n"
yading@10 349 "pxor %%mm1, %%mm1\n"
yading@10 350 "pcmpgtw %%mm4, %%mm3\n\t"
yading@10 351 "pcmpgtw %%mm5, %%mm1\n\t"
yading@10 352 "pxor %%mm3, %%mm4\n"
yading@10 353 "pxor %%mm1, %%mm5\n"
yading@10 354 "psubw %%mm3, %%mm4\n"
yading@10 355 "psubw %%mm1, %%mm5\n"
yading@10 356 "paddw %%mm4, %%mm5\n"
yading@10 357 "paddw %%mm5, %%mm6\n"
yading@10 358
yading@10 359 "add %2,%0\n"
yading@10 360
yading@10 361 "movq (%0),%%mm4\n"
yading@10 362 "movq 1(%0),%%mm1\n"
yading@10 363 "movq %%mm4, %%mm5\n"
yading@10 364 "movq %%mm1, %%mm3\n"
yading@10 365 "punpcklbw %%mm7,%%mm4\n"
yading@10 366 "punpcklbw %%mm7,%%mm1\n"
yading@10 367 "punpckhbw %%mm7,%%mm5\n"
yading@10 368 "punpckhbw %%mm7,%%mm3\n"
yading@10 369 "psubw %%mm1, %%mm4\n"
yading@10 370 "psubw %%mm3, %%mm5\n"
yading@10 371 "psubw %%mm4, %%mm0\n"
yading@10 372 "psubw %%mm5, %%mm2\n"
yading@10 373 "pxor %%mm3, %%mm3\n"
yading@10 374 "pxor %%mm1, %%mm1\n"
yading@10 375 "pcmpgtw %%mm0, %%mm3\n\t"
yading@10 376 "pcmpgtw %%mm2, %%mm1\n\t"
yading@10 377 "pxor %%mm3, %%mm0\n"
yading@10 378 "pxor %%mm1, %%mm2\n"
yading@10 379 "psubw %%mm3, %%mm0\n"
yading@10 380 "psubw %%mm1, %%mm2\n"
yading@10 381 "paddw %%mm0, %%mm2\n"
yading@10 382 "paddw %%mm2, %%mm6\n"
yading@10 383
yading@10 384 "add %2,%0\n"
yading@10 385 "subl $2, %%ecx\n"
yading@10 386 " jnz 1b\n"
yading@10 387
yading@10 388 "movq %%mm6, %%mm0\n"
yading@10 389 "punpcklwd %%mm7,%%mm0\n"
yading@10 390 "punpckhwd %%mm7,%%mm6\n"
yading@10 391 "paddd %%mm0, %%mm6\n"
yading@10 392
yading@10 393 "movq %%mm6,%%mm0\n"
yading@10 394 "psrlq $32, %%mm6\n"
yading@10 395 "paddd %%mm6,%%mm0\n"
yading@10 396 "movd %%mm0,%1\n"
yading@10 397 : "+r" (pix1), "=r"(tmp)
yading@10 398 : "r" ((x86_reg)line_size) , "g" (h-2)
yading@10 399 : "%ecx");
yading@10 400 return tmp + hf_noise8_mmx(pix+8, line_size, h);
yading@10 401 }
yading@10 402
yading@10 403 static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
yading@10 404 MpegEncContext *c = p;
yading@10 405 int score1, score2;
yading@10 406
yading@10 407 if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
yading@10 408 else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
yading@10 409 score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
yading@10 410
yading@10 411 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
yading@10 412 else return score1 + FFABS(score2)*8;
yading@10 413 }
yading@10 414
yading@10 415 static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
yading@10 416 MpegEncContext *c = p;
yading@10 417 int score1= sse8_mmx(c, pix1, pix2, line_size, h);
yading@10 418 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
yading@10 419
yading@10 420 if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
yading@10 421 else return score1 + FFABS(score2)*8;
yading@10 422 }
yading@10 423
yading@10 424 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
yading@10 425 int tmp;
yading@10 426
yading@10 427 av_assert2( (((int)pix) & 7) == 0);
yading@10 428 av_assert2((line_size &7) ==0);
yading@10 429
yading@10 430 #define SUM(in0, in1, out0, out1) \
yading@10 431 "movq (%0), %%mm2\n"\
yading@10 432 "movq 8(%0), %%mm3\n"\
yading@10 433 "add %2,%0\n"\
yading@10 434 "movq %%mm2, " #out0 "\n"\
yading@10 435 "movq %%mm3, " #out1 "\n"\
yading@10 436 "psubusb " #in0 ", %%mm2\n"\
yading@10 437 "psubusb " #in1 ", %%mm3\n"\
yading@10 438 "psubusb " #out0 ", " #in0 "\n"\
yading@10 439 "psubusb " #out1 ", " #in1 "\n"\
yading@10 440 "por %%mm2, " #in0 "\n"\
yading@10 441 "por %%mm3, " #in1 "\n"\
yading@10 442 "movq " #in0 ", %%mm2\n"\
yading@10 443 "movq " #in1 ", %%mm3\n"\
yading@10 444 "punpcklbw %%mm7, " #in0 "\n"\
yading@10 445 "punpcklbw %%mm7, " #in1 "\n"\
yading@10 446 "punpckhbw %%mm7, %%mm2\n"\
yading@10 447 "punpckhbw %%mm7, %%mm3\n"\
yading@10 448 "paddw " #in1 ", " #in0 "\n"\
yading@10 449 "paddw %%mm3, %%mm2\n"\
yading@10 450 "paddw %%mm2, " #in0 "\n"\
yading@10 451 "paddw " #in0 ", %%mm6\n"
yading@10 452
yading@10 453
yading@10 454 __asm__ volatile (
yading@10 455 "movl %3,%%ecx\n"
yading@10 456 "pxor %%mm6,%%mm6\n"
yading@10 457 "pxor %%mm7,%%mm7\n"
yading@10 458 "movq (%0),%%mm0\n"
yading@10 459 "movq 8(%0),%%mm1\n"
yading@10 460 "add %2,%0\n"
yading@10 461 "jmp 2f\n"
yading@10 462 "1:\n"
yading@10 463
yading@10 464 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
yading@10 465 "2:\n"
yading@10 466 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
yading@10 467
yading@10 468 "subl $2, %%ecx\n"
yading@10 469 "jnz 1b\n"
yading@10 470
yading@10 471 "movq %%mm6,%%mm0\n"
yading@10 472 "psrlq $32, %%mm6\n"
yading@10 473 "paddw %%mm6,%%mm0\n"
yading@10 474 "movq %%mm0,%%mm6\n"
yading@10 475 "psrlq $16, %%mm0\n"
yading@10 476 "paddw %%mm6,%%mm0\n"
yading@10 477 "movd %%mm0,%1\n"
yading@10 478 : "+r" (pix), "=r"(tmp)
yading@10 479 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 480 : "%ecx");
yading@10 481 return tmp & 0xFFFF;
yading@10 482 }
yading@10 483 #undef SUM
yading@10 484
yading@10 485 static int vsad_intra16_mmxext(void *v, uint8_t *pix, uint8_t *dummy,
yading@10 486 int line_size, int h)
yading@10 487 {
yading@10 488 int tmp;
yading@10 489
yading@10 490 av_assert2( (((int)pix) & 7) == 0);
yading@10 491 av_assert2((line_size &7) ==0);
yading@10 492
yading@10 493 #define SUM(in0, in1, out0, out1) \
yading@10 494 "movq (%0), " #out0 "\n"\
yading@10 495 "movq 8(%0), " #out1 "\n"\
yading@10 496 "add %2,%0\n"\
yading@10 497 "psadbw " #out0 ", " #in0 "\n"\
yading@10 498 "psadbw " #out1 ", " #in1 "\n"\
yading@10 499 "paddw " #in1 ", " #in0 "\n"\
yading@10 500 "paddw " #in0 ", %%mm6\n"
yading@10 501
yading@10 502 __asm__ volatile (
yading@10 503 "movl %3,%%ecx\n"
yading@10 504 "pxor %%mm6,%%mm6\n"
yading@10 505 "pxor %%mm7,%%mm7\n"
yading@10 506 "movq (%0),%%mm0\n"
yading@10 507 "movq 8(%0),%%mm1\n"
yading@10 508 "add %2,%0\n"
yading@10 509 "jmp 2f\n"
yading@10 510 "1:\n"
yading@10 511
yading@10 512 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
yading@10 513 "2:\n"
yading@10 514 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
yading@10 515
yading@10 516 "subl $2, %%ecx\n"
yading@10 517 "jnz 1b\n"
yading@10 518
yading@10 519 "movd %%mm6,%1\n"
yading@10 520 : "+r" (pix), "=r"(tmp)
yading@10 521 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 522 : "%ecx");
yading@10 523 return tmp;
yading@10 524 }
yading@10 525 #undef SUM
yading@10 526
yading@10 527 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
yading@10 528 int tmp;
yading@10 529
yading@10 530 av_assert2( (((int)pix1) & 7) == 0);
yading@10 531 av_assert2( (((int)pix2) & 7) == 0);
yading@10 532 av_assert2((line_size &7) ==0);
yading@10 533
yading@10 534 #define SUM(in0, in1, out0, out1) \
yading@10 535 "movq (%0),%%mm2\n"\
yading@10 536 "movq (%1)," #out0 "\n"\
yading@10 537 "movq 8(%0),%%mm3\n"\
yading@10 538 "movq 8(%1)," #out1 "\n"\
yading@10 539 "add %3,%0\n"\
yading@10 540 "add %3,%1\n"\
yading@10 541 "psubb " #out0 ", %%mm2\n"\
yading@10 542 "psubb " #out1 ", %%mm3\n"\
yading@10 543 "pxor %%mm7, %%mm2\n"\
yading@10 544 "pxor %%mm7, %%mm3\n"\
yading@10 545 "movq %%mm2, " #out0 "\n"\
yading@10 546 "movq %%mm3, " #out1 "\n"\
yading@10 547 "psubusb " #in0 ", %%mm2\n"\
yading@10 548 "psubusb " #in1 ", %%mm3\n"\
yading@10 549 "psubusb " #out0 ", " #in0 "\n"\
yading@10 550 "psubusb " #out1 ", " #in1 "\n"\
yading@10 551 "por %%mm2, " #in0 "\n"\
yading@10 552 "por %%mm3, " #in1 "\n"\
yading@10 553 "movq " #in0 ", %%mm2\n"\
yading@10 554 "movq " #in1 ", %%mm3\n"\
yading@10 555 "punpcklbw %%mm7, " #in0 "\n"\
yading@10 556 "punpcklbw %%mm7, " #in1 "\n"\
yading@10 557 "punpckhbw %%mm7, %%mm2\n"\
yading@10 558 "punpckhbw %%mm7, %%mm3\n"\
yading@10 559 "paddw " #in1 ", " #in0 "\n"\
yading@10 560 "paddw %%mm3, %%mm2\n"\
yading@10 561 "paddw %%mm2, " #in0 "\n"\
yading@10 562 "paddw " #in0 ", %%mm6\n"
yading@10 563
yading@10 564
yading@10 565 __asm__ volatile (
yading@10 566 "movl %4,%%ecx\n"
yading@10 567 "pxor %%mm6,%%mm6\n"
yading@10 568 "pcmpeqw %%mm7,%%mm7\n"
yading@10 569 "psllw $15, %%mm7\n"
yading@10 570 "packsswb %%mm7, %%mm7\n"
yading@10 571 "movq (%0),%%mm0\n"
yading@10 572 "movq (%1),%%mm2\n"
yading@10 573 "movq 8(%0),%%mm1\n"
yading@10 574 "movq 8(%1),%%mm3\n"
yading@10 575 "add %3,%0\n"
yading@10 576 "add %3,%1\n"
yading@10 577 "psubb %%mm2, %%mm0\n"
yading@10 578 "psubb %%mm3, %%mm1\n"
yading@10 579 "pxor %%mm7, %%mm0\n"
yading@10 580 "pxor %%mm7, %%mm1\n"
yading@10 581 "jmp 2f\n"
yading@10 582 "1:\n"
yading@10 583
yading@10 584 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
yading@10 585 "2:\n"
yading@10 586 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
yading@10 587
yading@10 588 "subl $2, %%ecx\n"
yading@10 589 "jnz 1b\n"
yading@10 590
yading@10 591 "movq %%mm6,%%mm0\n"
yading@10 592 "psrlq $32, %%mm6\n"
yading@10 593 "paddw %%mm6,%%mm0\n"
yading@10 594 "movq %%mm0,%%mm6\n"
yading@10 595 "psrlq $16, %%mm0\n"
yading@10 596 "paddw %%mm6,%%mm0\n"
yading@10 597 "movd %%mm0,%2\n"
yading@10 598 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
yading@10 599 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 600 : "%ecx");
yading@10 601 return tmp & 0x7FFF;
yading@10 602 }
yading@10 603 #undef SUM
yading@10 604
yading@10 605 static int vsad16_mmxext(void *v, uint8_t *pix1, uint8_t *pix2,
yading@10 606 int line_size, int h)
yading@10 607 {
yading@10 608 int tmp;
yading@10 609
yading@10 610 av_assert2( (((int)pix1) & 7) == 0);
yading@10 611 av_assert2( (((int)pix2) & 7) == 0);
yading@10 612 av_assert2((line_size &7) ==0);
yading@10 613
yading@10 614 #define SUM(in0, in1, out0, out1) \
yading@10 615 "movq (%0)," #out0 "\n"\
yading@10 616 "movq (%1),%%mm2\n"\
yading@10 617 "movq 8(%0)," #out1 "\n"\
yading@10 618 "movq 8(%1),%%mm3\n"\
yading@10 619 "add %3,%0\n"\
yading@10 620 "add %3,%1\n"\
yading@10 621 "psubb %%mm2, " #out0 "\n"\
yading@10 622 "psubb %%mm3, " #out1 "\n"\
yading@10 623 "pxor %%mm7, " #out0 "\n"\
yading@10 624 "pxor %%mm7, " #out1 "\n"\
yading@10 625 "psadbw " #out0 ", " #in0 "\n"\
yading@10 626 "psadbw " #out1 ", " #in1 "\n"\
yading@10 627 "paddw " #in1 ", " #in0 "\n"\
yading@10 628 "paddw " #in0 ", %%mm6\n"
yading@10 629
yading@10 630 __asm__ volatile (
yading@10 631 "movl %4,%%ecx\n"
yading@10 632 "pxor %%mm6,%%mm6\n"
yading@10 633 "pcmpeqw %%mm7,%%mm7\n"
yading@10 634 "psllw $15, %%mm7\n"
yading@10 635 "packsswb %%mm7, %%mm7\n"
yading@10 636 "movq (%0),%%mm0\n"
yading@10 637 "movq (%1),%%mm2\n"
yading@10 638 "movq 8(%0),%%mm1\n"
yading@10 639 "movq 8(%1),%%mm3\n"
yading@10 640 "add %3,%0\n"
yading@10 641 "add %3,%1\n"
yading@10 642 "psubb %%mm2, %%mm0\n"
yading@10 643 "psubb %%mm3, %%mm1\n"
yading@10 644 "pxor %%mm7, %%mm0\n"
yading@10 645 "pxor %%mm7, %%mm1\n"
yading@10 646 "jmp 2f\n"
yading@10 647 "1:\n"
yading@10 648
yading@10 649 SUM(%%mm4, %%mm5, %%mm0, %%mm1)
yading@10 650 "2:\n"
yading@10 651 SUM(%%mm0, %%mm1, %%mm4, %%mm5)
yading@10 652
yading@10 653 "subl $2, %%ecx\n"
yading@10 654 "jnz 1b\n"
yading@10 655
yading@10 656 "movd %%mm6,%2\n"
yading@10 657 : "+r" (pix1), "+r" (pix2), "=r"(tmp)
yading@10 658 : "r" ((x86_reg)line_size) , "m" (h)
yading@10 659 : "%ecx");
yading@10 660 return tmp;
yading@10 661 }
yading@10 662 #undef SUM
yading@10 663
yading@10 664 static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w){
yading@10 665 x86_reg i=0;
yading@10 666 if(w>=16)
yading@10 667 __asm__ volatile(
yading@10 668 "1: \n\t"
yading@10 669 "movq (%2, %0), %%mm0 \n\t"
yading@10 670 "movq (%1, %0), %%mm1 \n\t"
yading@10 671 "psubb %%mm0, %%mm1 \n\t"
yading@10 672 "movq %%mm1, (%3, %0) \n\t"
yading@10 673 "movq 8(%2, %0), %%mm0 \n\t"
yading@10 674 "movq 8(%1, %0), %%mm1 \n\t"
yading@10 675 "psubb %%mm0, %%mm1 \n\t"
yading@10 676 "movq %%mm1, 8(%3, %0) \n\t"
yading@10 677 "add $16, %0 \n\t"
yading@10 678 "cmp %4, %0 \n\t"
yading@10 679 " jb 1b \n\t"
yading@10 680 : "+r" (i)
yading@10 681 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w-15)
yading@10 682 );
yading@10 683 for(; i<w; i++)
yading@10 684 dst[i+0] = src1[i+0]-src2[i+0];
yading@10 685 }
yading@10 686
yading@10 687 static void sub_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *src1,
yading@10 688 const uint8_t *src2, int w,
yading@10 689 int *left, int *left_top)
yading@10 690 {
yading@10 691 x86_reg i=0;
yading@10 692 uint8_t l, lt;
yading@10 693
yading@10 694 __asm__ volatile(
yading@10 695 "movq (%1, %0), %%mm0 \n\t" // LT
yading@10 696 "psllq $8, %%mm0 \n\t"
yading@10 697 "1: \n\t"
yading@10 698 "movq (%1, %0), %%mm1 \n\t" // T
yading@10 699 "movq -1(%2, %0), %%mm2 \n\t" // L
yading@10 700 "movq (%2, %0), %%mm3 \n\t" // X
yading@10 701 "movq %%mm2, %%mm4 \n\t" // L
yading@10 702 "psubb %%mm0, %%mm2 \n\t"
yading@10 703 "paddb %%mm1, %%mm2 \n\t" // L + T - LT
yading@10 704 "movq %%mm4, %%mm5 \n\t" // L
yading@10 705 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
yading@10 706 "pminub %%mm5, %%mm1 \n\t" // min(T, L)
yading@10 707 "pminub %%mm2, %%mm4 \n\t"
yading@10 708 "pmaxub %%mm1, %%mm4 \n\t"
yading@10 709 "psubb %%mm4, %%mm3 \n\t" // dst - pred
yading@10 710 "movq %%mm3, (%3, %0) \n\t"
yading@10 711 "add $8, %0 \n\t"
yading@10 712 "movq -1(%1, %0), %%mm0 \n\t" // LT
yading@10 713 "cmp %4, %0 \n\t"
yading@10 714 " jb 1b \n\t"
yading@10 715 : "+r" (i)
yading@10 716 : "r"(src1), "r"(src2), "r"(dst), "r"((x86_reg)w)
yading@10 717 );
yading@10 718
yading@10 719 l= *left;
yading@10 720 lt= *left_top;
yading@10 721
yading@10 722 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
yading@10 723
yading@10 724 *left_top= src1[w-1];
yading@10 725 *left = src2[w-1];
yading@10 726 }
yading@10 727
yading@10 728 #define MMABS_MMX(a,z)\
yading@10 729 "pxor " #z ", " #z " \n\t"\
yading@10 730 "pcmpgtw " #a ", " #z " \n\t"\
yading@10 731 "pxor " #z ", " #a " \n\t"\
yading@10 732 "psubw " #z ", " #a " \n\t"
yading@10 733
yading@10 734 #define MMABS_MMXEXT(a, z) \
yading@10 735 "pxor " #z ", " #z " \n\t"\
yading@10 736 "psubw " #a ", " #z " \n\t"\
yading@10 737 "pmaxsw " #z ", " #a " \n\t"
yading@10 738
yading@10 739 #define MMABS_SSSE3(a,z)\
yading@10 740 "pabsw " #a ", " #a " \n\t"
yading@10 741
yading@10 742 #define MMABS_SUM(a,z, sum)\
yading@10 743 MMABS(a,z)\
yading@10 744 "paddusw " #a ", " #sum " \n\t"
yading@10 745
yading@10 746 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get up to
yading@10 747 * about 100k on extreme inputs. But that's very unlikely to occur in natural video,
yading@10 748 * and it's even more unlikely to not have any alternative mvs/modes with lower cost. */
yading@10 749 #define HSUM_MMX(a, t, dst)\
yading@10 750 "movq "#a", "#t" \n\t"\
yading@10 751 "psrlq $32, "#a" \n\t"\
yading@10 752 "paddusw "#t", "#a" \n\t"\
yading@10 753 "movq "#a", "#t" \n\t"\
yading@10 754 "psrlq $16, "#a" \n\t"\
yading@10 755 "paddusw "#t", "#a" \n\t"\
yading@10 756 "movd "#a", "#dst" \n\t"\
yading@10 757
yading@10 758 #define HSUM_MMXEXT(a, t, dst) \
yading@10 759 "pshufw $0x0E, "#a", "#t" \n\t"\
yading@10 760 "paddusw "#t", "#a" \n\t"\
yading@10 761 "pshufw $0x01, "#a", "#t" \n\t"\
yading@10 762 "paddusw "#t", "#a" \n\t"\
yading@10 763 "movd "#a", "#dst" \n\t"\
yading@10 764
yading@10 765 #define HSUM_SSE2(a, t, dst)\
yading@10 766 "movhlps "#a", "#t" \n\t"\
yading@10 767 "paddusw "#t", "#a" \n\t"\
yading@10 768 "pshuflw $0x0E, "#a", "#t" \n\t"\
yading@10 769 "paddusw "#t", "#a" \n\t"\
yading@10 770 "pshuflw $0x01, "#a", "#t" \n\t"\
yading@10 771 "paddusw "#t", "#a" \n\t"\
yading@10 772 "movd "#a", "#dst" \n\t"\
yading@10 773
yading@10 774 #define DCT_SAD4(m,mm,o)\
yading@10 775 "mov"#m" "#o"+ 0(%1), "#mm"2 \n\t"\
yading@10 776 "mov"#m" "#o"+16(%1), "#mm"3 \n\t"\
yading@10 777 "mov"#m" "#o"+32(%1), "#mm"4 \n\t"\
yading@10 778 "mov"#m" "#o"+48(%1), "#mm"5 \n\t"\
yading@10 779 MMABS_SUM(mm##2, mm##6, mm##0)\
yading@10 780 MMABS_SUM(mm##3, mm##7, mm##1)\
yading@10 781 MMABS_SUM(mm##4, mm##6, mm##0)\
yading@10 782 MMABS_SUM(mm##5, mm##7, mm##1)\
yading@10 783
yading@10 784 #define DCT_SAD_MMX\
yading@10 785 "pxor %%mm0, %%mm0 \n\t"\
yading@10 786 "pxor %%mm1, %%mm1 \n\t"\
yading@10 787 DCT_SAD4(q, %%mm, 0)\
yading@10 788 DCT_SAD4(q, %%mm, 8)\
yading@10 789 DCT_SAD4(q, %%mm, 64)\
yading@10 790 DCT_SAD4(q, %%mm, 72)\
yading@10 791 "paddusw %%mm1, %%mm0 \n\t"\
yading@10 792 HSUM(%%mm0, %%mm1, %0)
yading@10 793
yading@10 794 #define DCT_SAD_SSE2\
yading@10 795 "pxor %%xmm0, %%xmm0 \n\t"\
yading@10 796 "pxor %%xmm1, %%xmm1 \n\t"\
yading@10 797 DCT_SAD4(dqa, %%xmm, 0)\
yading@10 798 DCT_SAD4(dqa, %%xmm, 64)\
yading@10 799 "paddusw %%xmm1, %%xmm0 \n\t"\
yading@10 800 HSUM(%%xmm0, %%xmm1, %0)
yading@10 801
yading@10 802 #define DCT_SAD_FUNC(cpu) \
yading@10 803 static int sum_abs_dctelem_##cpu(int16_t *block){\
yading@10 804 int sum;\
yading@10 805 __asm__ volatile(\
yading@10 806 DCT_SAD\
yading@10 807 :"=r"(sum)\
yading@10 808 :"r"(block)\
yading@10 809 );\
yading@10 810 return sum&0xFFFF;\
yading@10 811 }
yading@10 812
yading@10 813 #define DCT_SAD DCT_SAD_MMX
yading@10 814 #define HSUM(a,t,dst) HSUM_MMX(a,t,dst)
yading@10 815 #define MMABS(a,z) MMABS_MMX(a,z)
yading@10 816 DCT_SAD_FUNC(mmx)
yading@10 817 #undef MMABS
yading@10 818 #undef HSUM
yading@10 819
yading@10 820 #define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
yading@10 821 #define MMABS(a,z) MMABS_MMXEXT(a,z)
yading@10 822 DCT_SAD_FUNC(mmxext)
yading@10 823 #undef HSUM
yading@10 824 #undef DCT_SAD
yading@10 825
yading@10 826 #define DCT_SAD DCT_SAD_SSE2
yading@10 827 #define HSUM(a,t,dst) HSUM_SSE2(a,t,dst)
yading@10 828 DCT_SAD_FUNC(sse2)
yading@10 829 #undef MMABS
yading@10 830
yading@10 831 #if HAVE_SSSE3_INLINE
yading@10 832 #define MMABS(a,z) MMABS_SSSE3(a,z)
yading@10 833 DCT_SAD_FUNC(ssse3)
yading@10 834 #undef MMABS
yading@10 835 #endif
yading@10 836 #undef HSUM
yading@10 837 #undef DCT_SAD
yading@10 838
yading@10 839 static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int size){
yading@10 840 int sum;
yading@10 841 x86_reg i=size;
yading@10 842 __asm__ volatile(
yading@10 843 "pxor %%mm4, %%mm4 \n"
yading@10 844 "1: \n"
yading@10 845 "sub $8, %0 \n"
yading@10 846 "movq (%2,%0), %%mm2 \n"
yading@10 847 "movq (%3,%0,2), %%mm0 \n"
yading@10 848 "movq 8(%3,%0,2), %%mm1 \n"
yading@10 849 "punpckhbw %%mm2, %%mm3 \n"
yading@10 850 "punpcklbw %%mm2, %%mm2 \n"
yading@10 851 "psraw $8, %%mm3 \n"
yading@10 852 "psraw $8, %%mm2 \n"
yading@10 853 "psubw %%mm3, %%mm1 \n"
yading@10 854 "psubw %%mm2, %%mm0 \n"
yading@10 855 "pmaddwd %%mm1, %%mm1 \n"
yading@10 856 "pmaddwd %%mm0, %%mm0 \n"
yading@10 857 "paddd %%mm1, %%mm4 \n"
yading@10 858 "paddd %%mm0, %%mm4 \n"
yading@10 859 "jg 1b \n"
yading@10 860 "movq %%mm4, %%mm3 \n"
yading@10 861 "psrlq $32, %%mm3 \n"
yading@10 862 "paddd %%mm3, %%mm4 \n"
yading@10 863 "movd %%mm4, %1 \n"
yading@10 864 :"+r"(i), "=r"(sum)
yading@10 865 :"r"(pix1), "r"(pix2)
yading@10 866 );
yading@10 867 return sum;
yading@10 868 }
yading@10 869
yading@10 870 #define PHADDD(a, t)\
yading@10 871 "movq "#a", "#t" \n\t"\
yading@10 872 "psrlq $32, "#a" \n\t"\
yading@10 873 "paddd "#t", "#a" \n\t"
yading@10 874 /*
yading@10 875 pmulhw: dst[0-15]=(src[0-15]*dst[0-15])[16-31]
yading@10 876 pmulhrw: dst[0-15]=(src[0-15]*dst[0-15] + 0x8000)[16-31]
yading@10 877 pmulhrsw: dst[0-15]=(src[0-15]*dst[0-15] + 0x4000)[15-30]
yading@10 878 */
yading@10 879 #define PMULHRW(x, y, s, o)\
yading@10 880 "pmulhw " #s ", "#x " \n\t"\
yading@10 881 "pmulhw " #s ", "#y " \n\t"\
yading@10 882 "paddw " #o ", "#x " \n\t"\
yading@10 883 "paddw " #o ", "#y " \n\t"\
yading@10 884 "psraw $1, "#x " \n\t"\
yading@10 885 "psraw $1, "#y " \n\t"
yading@10 886 #define DEF(x) x ## _mmx
yading@10 887 #define SET_RND MOVQ_WONE
yading@10 888 #define SCALE_OFFSET 1
yading@10 889
yading@10 890 #include "dsputil_qns_template.c"
yading@10 891
yading@10 892 #undef DEF
yading@10 893 #undef SET_RND
yading@10 894 #undef SCALE_OFFSET
yading@10 895 #undef PMULHRW
yading@10 896
yading@10 897 #define DEF(x) x ## _3dnow
yading@10 898 #define SET_RND(x)
yading@10 899 #define SCALE_OFFSET 0
yading@10 900 #define PMULHRW(x, y, s, o)\
yading@10 901 "pmulhrw " #s ", "#x " \n\t"\
yading@10 902 "pmulhrw " #s ", "#y " \n\t"
yading@10 903
yading@10 904 #include "dsputil_qns_template.c"
yading@10 905
yading@10 906 #undef DEF
yading@10 907 #undef SET_RND
yading@10 908 #undef SCALE_OFFSET
yading@10 909 #undef PMULHRW
yading@10 910
yading@10 911 #if HAVE_SSSE3_INLINE
yading@10 912 #undef PHADDD
yading@10 913 #define DEF(x) x ## _ssse3
yading@10 914 #define SET_RND(x)
yading@10 915 #define SCALE_OFFSET -1
yading@10 916 #define PHADDD(a, t)\
yading@10 917 "pshufw $0x0E, "#a", "#t" \n\t"\
yading@10 918 "paddd "#t", "#a" \n\t" /* faster than phaddd on core2 */
yading@10 919 #define PMULHRW(x, y, s, o)\
yading@10 920 "pmulhrsw " #s ", "#x " \n\t"\
yading@10 921 "pmulhrsw " #s ", "#y " \n\t"
yading@10 922
yading@10 923 #include "dsputil_qns_template.c"
yading@10 924
yading@10 925 #undef DEF
yading@10 926 #undef SET_RND
yading@10 927 #undef SCALE_OFFSET
yading@10 928 #undef PMULHRW
yading@10 929 #undef PHADDD
yading@10 930 #endif /* HAVE_SSSE3_INLINE */
yading@10 931
yading@10 932 #endif /* HAVE_INLINE_ASM */
yading@10 933
yading@10 934 int ff_sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
yading@10 935
yading@10 936 #define hadamard_func(cpu) \
yading@10 937 int ff_hadamard8_diff_##cpu (void *s, uint8_t *src1, uint8_t *src2, \
yading@10 938 int stride, int h); \
yading@10 939 int ff_hadamard8_diff16_##cpu(void *s, uint8_t *src1, uint8_t *src2, \
yading@10 940 int stride, int h);
yading@10 941
yading@10 942 hadamard_func(mmx)
yading@10 943 hadamard_func(mmxext)
yading@10 944 hadamard_func(sse2)
yading@10 945 hadamard_func(ssse3)
yading@10 946
yading@10 947 av_cold void ff_dsputilenc_init_mmx(DSPContext *c, AVCodecContext *avctx)
yading@10 948 {
yading@10 949 int mm_flags = av_get_cpu_flags();
yading@10 950 int bit_depth = avctx->bits_per_raw_sample;
yading@10 951
yading@10 952 #if HAVE_YASM
yading@10 953 if (EXTERNAL_MMX(mm_flags)) {
yading@10 954 if (bit_depth <= 8)
yading@10 955 c->get_pixels = ff_get_pixels_mmx;
yading@10 956 c->diff_pixels = ff_diff_pixels_mmx;
yading@10 957 c->pix_sum = ff_pix_sum16_mmx;
yading@10 958
yading@10 959 c->pix_norm1 = ff_pix_norm1_mmx;
yading@10 960 }
yading@10 961 if (EXTERNAL_SSE2(mm_flags))
yading@10 962 if (bit_depth <= 8)
yading@10 963 c->get_pixels = ff_get_pixels_sse2;
yading@10 964 #endif /* HAVE_YASM */
yading@10 965
yading@10 966 #if HAVE_INLINE_ASM
yading@10 967 if (mm_flags & AV_CPU_FLAG_MMX) {
yading@10 968 const int dct_algo = avctx->dct_algo;
yading@10 969 if (avctx->bits_per_raw_sample <= 8 &&
yading@10 970 (dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX)) {
yading@10 971 if(mm_flags & AV_CPU_FLAG_SSE2){
yading@10 972 c->fdct = ff_fdct_sse2;
yading@10 973 } else if (mm_flags & AV_CPU_FLAG_MMXEXT) {
yading@10 974 c->fdct = ff_fdct_mmxext;
yading@10 975 }else{
yading@10 976 c->fdct = ff_fdct_mmx;
yading@10 977 }
yading@10 978 }
yading@10 979
yading@10 980
yading@10 981 c->diff_bytes= diff_bytes_mmx;
yading@10 982 c->sum_abs_dctelem= sum_abs_dctelem_mmx;
yading@10 983
yading@10 984 c->sse[0] = sse16_mmx;
yading@10 985 c->sse[1] = sse8_mmx;
yading@10 986 c->vsad[4]= vsad_intra16_mmx;
yading@10 987
yading@10 988 c->nsse[0] = nsse16_mmx;
yading@10 989 c->nsse[1] = nsse8_mmx;
yading@10 990 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
yading@10 991 c->vsad[0] = vsad16_mmx;
yading@10 992 }
yading@10 993
yading@10 994 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
yading@10 995 c->try_8x8basis= try_8x8basis_mmx;
yading@10 996 }
yading@10 997 c->add_8x8basis= add_8x8basis_mmx;
yading@10 998
yading@10 999 c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
yading@10 1000
yading@10 1001 if (mm_flags & AV_CPU_FLAG_MMXEXT) {
yading@10 1002 c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
yading@10 1003 c->vsad[4] = vsad_intra16_mmxext;
yading@10 1004
yading@10 1005 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
yading@10 1006 c->vsad[0] = vsad16_mmxext;
yading@10 1007 }
yading@10 1008
yading@10 1009 c->sub_hfyu_median_prediction = sub_hfyu_median_prediction_mmxext;
yading@10 1010 }
yading@10 1011
yading@10 1012 if(mm_flags & AV_CPU_FLAG_SSE2){
yading@10 1013 c->sum_abs_dctelem= sum_abs_dctelem_sse2;
yading@10 1014 }
yading@10 1015
yading@10 1016 #if HAVE_SSSE3_INLINE
yading@10 1017 if(mm_flags & AV_CPU_FLAG_SSSE3){
yading@10 1018 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
yading@10 1019 c->try_8x8basis= try_8x8basis_ssse3;
yading@10 1020 }
yading@10 1021 c->add_8x8basis= add_8x8basis_ssse3;
yading@10 1022 c->sum_abs_dctelem= sum_abs_dctelem_ssse3;
yading@10 1023 }
yading@10 1024 #endif
yading@10 1025
yading@10 1026 if(mm_flags & AV_CPU_FLAG_3DNOW){
yading@10 1027 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
yading@10 1028 c->try_8x8basis= try_8x8basis_3dnow;
yading@10 1029 }
yading@10 1030 c->add_8x8basis= add_8x8basis_3dnow;
yading@10 1031 }
yading@10 1032 }
yading@10 1033 #endif /* HAVE_INLINE_ASM */
yading@10 1034
yading@10 1035 if (EXTERNAL_MMX(mm_flags)) {
yading@10 1036 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
yading@10 1037 c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
yading@10 1038
yading@10 1039 if (EXTERNAL_MMXEXT(mm_flags)) {
yading@10 1040 c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
yading@10 1041 c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
yading@10 1042 }
yading@10 1043
yading@10 1044 if (EXTERNAL_SSE2(mm_flags)) {
yading@10 1045 c->sse[0] = ff_sse16_sse2;
yading@10 1046
yading@10 1047 #if HAVE_ALIGNED_STACK
yading@10 1048 c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
yading@10 1049 c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
yading@10 1050 #endif
yading@10 1051 }
yading@10 1052
yading@10 1053 if (EXTERNAL_SSSE3(mm_flags) && HAVE_ALIGNED_STACK) {
yading@10 1054 c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
yading@10 1055 c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
yading@10 1056 }
yading@10 1057 }
yading@10 1058
yading@10 1059 ff_dsputil_init_pix_mmx(c, avctx);
yading@10 1060 }