yading@10: /* yading@10: * MMX optimized motion estimation yading@10: * Copyright (c) 2001 Fabrice Bellard yading@10: * Copyright (c) 2002-2004 Michael Niedermayer yading@10: * yading@10: * mostly by Michael Niedermayer yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #include "libavutil/attributes.h" yading@10: #include "libavutil/avassert.h" yading@10: #include "libavutil/mem.h" yading@10: #include "libavutil/x86/asm.h" yading@10: #include "dsputil_mmx.h" yading@10: yading@10: #if HAVE_INLINE_ASM yading@10: yading@10: DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={ yading@10: 0x0000000000000000ULL, yading@10: 0x0001000100010001ULL, yading@10: 0x0002000200020002ULL, yading@10: }; yading@10: yading@10: DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL; yading@10: yading@10: static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) yading@10: { yading@10: x86_reg len= -(x86_reg)stride*h; yading@10: __asm__ volatile( yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1, %%"REG_a"), %%mm0 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm2 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm4 \n\t" yading@10: "add %3, %%"REG_a" \n\t" yading@10: "psubusb %%mm0, %%mm2 \n\t" yading@10: "psubusb %%mm4, %%mm0 \n\t" yading@10: "movq (%1, %%"REG_a"), %%mm1 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm3 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm5 \n\t" yading@10: "psubusb %%mm1, %%mm3 \n\t" yading@10: "psubusb %%mm5, %%mm1 \n\t" yading@10: "por %%mm2, %%mm0 \n\t" yading@10: "por %%mm1, %%mm3 \n\t" yading@10: "movq %%mm0, %%mm1 \n\t" yading@10: "movq %%mm3, %%mm2 \n\t" yading@10: "punpcklbw %%mm7, %%mm0 \n\t" yading@10: "punpckhbw %%mm7, %%mm1 \n\t" yading@10: "punpcklbw %%mm7, %%mm3 \n\t" yading@10: "punpckhbw %%mm7, %%mm2 \n\t" yading@10: "paddw %%mm1, %%mm0 \n\t" yading@10: "paddw %%mm3, %%mm2 \n\t" yading@10: "paddw %%mm2, %%mm0 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "add %3, %%"REG_a" \n\t" yading@10: " js 1b \n\t" yading@10: : "+a" (len) yading@10: : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2, yading@10: int stride, int h) yading@10: { yading@10: __asm__ volatile( yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1), %%mm0 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "psadbw (%2), %%mm0 \n\t" yading@10: "psadbw (%2, %3), %%mm1 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "paddw %%mm1, %%mm6 \n\t" yading@10: "lea (%1,%3,2), %1 \n\t" yading@10: "lea (%2,%3,2), %2 \n\t" yading@10: "sub $2, %0 \n\t" yading@10: " jg 1b \n\t" yading@10: : "+r" (h), "+r" (blk1), "+r" (blk2) yading@10: : "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) yading@10: { yading@10: int ret; yading@10: __asm__ volatile( yading@10: "pxor %%xmm2, %%xmm2 \n\t" yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movdqu (%1), %%xmm0 \n\t" yading@10: "movdqu (%1, %4), %%xmm1 \n\t" yading@10: "psadbw (%2), %%xmm0 \n\t" yading@10: "psadbw (%2, %4), %%xmm1 \n\t" yading@10: "paddw %%xmm0, %%xmm2 \n\t" yading@10: "paddw %%xmm1, %%xmm2 \n\t" yading@10: "lea (%1,%4,2), %1 \n\t" yading@10: "lea (%2,%4,2), %2 \n\t" yading@10: "sub $2, %0 \n\t" yading@10: " jg 1b \n\t" yading@10: "movhlps %%xmm2, %%xmm0 \n\t" yading@10: "paddw %%xmm0, %%xmm2 \n\t" yading@10: "movd %%xmm2, %3 \n\t" yading@10: : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret) yading@10: : "r" ((x86_reg)stride) yading@10: ); yading@10: return ret; yading@10: } yading@10: yading@10: static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2, yading@10: int stride, int h) yading@10: { yading@10: __asm__ volatile( yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1), %%mm0 \n\t" yading@10: "movq (%1, %3), %%mm1 \n\t" yading@10: "pavgb 1(%1), %%mm0 \n\t" yading@10: "pavgb 1(%1, %3), %%mm1 \n\t" yading@10: "psadbw (%2), %%mm0 \n\t" yading@10: "psadbw (%2, %3), %%mm1 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "paddw %%mm1, %%mm6 \n\t" yading@10: "lea (%1,%3,2), %1 \n\t" yading@10: "lea (%2,%3,2), %2 \n\t" yading@10: "sub $2, %0 \n\t" yading@10: " jg 1b \n\t" yading@10: : "+r" (h), "+r" (blk1), "+r" (blk2) yading@10: : "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2, yading@10: int stride, int h) yading@10: { yading@10: __asm__ volatile( yading@10: "movq (%1), %%mm0 \n\t" yading@10: "add %3, %1 \n\t" yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1), %%mm1 \n\t" yading@10: "movq (%1, %3), %%mm2 \n\t" yading@10: "pavgb %%mm1, %%mm0 \n\t" yading@10: "pavgb %%mm2, %%mm1 \n\t" yading@10: "psadbw (%2), %%mm0 \n\t" yading@10: "psadbw (%2, %3), %%mm1 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "paddw %%mm1, %%mm6 \n\t" yading@10: "movq %%mm2, %%mm0 \n\t" yading@10: "lea (%1,%3,2), %1 \n\t" yading@10: "lea (%2,%3,2), %2 \n\t" yading@10: "sub $2, %0 \n\t" yading@10: " jg 1b \n\t" yading@10: : "+r" (h), "+r" (blk1), "+r" (blk2) yading@10: : "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2, yading@10: int stride, int h) yading@10: { yading@10: __asm__ volatile( yading@10: "movq "MANGLE(bone)", %%mm5 \n\t" yading@10: "movq (%1), %%mm0 \n\t" yading@10: "pavgb 1(%1), %%mm0 \n\t" yading@10: "add %3, %1 \n\t" yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1), %%mm1 \n\t" yading@10: "movq (%1,%3), %%mm2 \n\t" yading@10: "pavgb 1(%1), %%mm1 \n\t" yading@10: "pavgb 1(%1,%3), %%mm2 \n\t" yading@10: "psubusb %%mm5, %%mm1 \n\t" yading@10: "pavgb %%mm1, %%mm0 \n\t" yading@10: "pavgb %%mm2, %%mm1 \n\t" yading@10: "psadbw (%2), %%mm0 \n\t" yading@10: "psadbw (%2,%3), %%mm1 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "paddw %%mm1, %%mm6 \n\t" yading@10: "movq %%mm2, %%mm0 \n\t" yading@10: "lea (%1,%3,2), %1 \n\t" yading@10: "lea (%2,%3,2), %2 \n\t" yading@10: "sub $2, %0 \n\t" yading@10: " jg 1b \n\t" yading@10: : "+r" (h), "+r" (blk1), "+r" (blk2) yading@10: : "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) yading@10: { yading@10: x86_reg len= -(x86_reg)stride*h; yading@10: __asm__ volatile( yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%1, %%"REG_a"), %%mm0 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm1 \n\t" yading@10: "movq (%1, %%"REG_a"), %%mm2 \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm3 \n\t" yading@10: "punpcklbw %%mm7, %%mm0 \n\t" yading@10: "punpcklbw %%mm7, %%mm1 \n\t" yading@10: "punpckhbw %%mm7, %%mm2 \n\t" yading@10: "punpckhbw %%mm7, %%mm3 \n\t" yading@10: "paddw %%mm0, %%mm1 \n\t" yading@10: "paddw %%mm2, %%mm3 \n\t" yading@10: "movq (%3, %%"REG_a"), %%mm4 \n\t" yading@10: "movq (%3, %%"REG_a"), %%mm2 \n\t" yading@10: "paddw %%mm5, %%mm1 \n\t" yading@10: "paddw %%mm5, %%mm3 \n\t" yading@10: "psrlw $1, %%mm1 \n\t" yading@10: "psrlw $1, %%mm3 \n\t" yading@10: "packuswb %%mm3, %%mm1 \n\t" yading@10: "psubusb %%mm1, %%mm4 \n\t" yading@10: "psubusb %%mm2, %%mm1 \n\t" yading@10: "por %%mm4, %%mm1 \n\t" yading@10: "movq %%mm1, %%mm0 \n\t" yading@10: "punpcklbw %%mm7, %%mm0 \n\t" yading@10: "punpckhbw %%mm7, %%mm1 \n\t" yading@10: "paddw %%mm1, %%mm0 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "add %4, %%"REG_a" \n\t" yading@10: " js 1b \n\t" yading@10: : "+a" (len) yading@10: : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) yading@10: { yading@10: x86_reg len= -(x86_reg)stride*h; yading@10: __asm__ volatile( yading@10: "movq (%1, %%"REG_a"), %%mm0 \n\t" yading@10: "movq 1(%1, %%"REG_a"), %%mm2 \n\t" yading@10: "movq %%mm0, %%mm1 \n\t" yading@10: "movq %%mm2, %%mm3 \n\t" yading@10: "punpcklbw %%mm7, %%mm0 \n\t" yading@10: "punpckhbw %%mm7, %%mm1 \n\t" yading@10: "punpcklbw %%mm7, %%mm2 \n\t" yading@10: "punpckhbw %%mm7, %%mm3 \n\t" yading@10: "paddw %%mm2, %%mm0 \n\t" yading@10: "paddw %%mm3, %%mm1 \n\t" yading@10: ".p2align 4 \n\t" yading@10: "1: \n\t" yading@10: "movq (%2, %%"REG_a"), %%mm2 \n\t" yading@10: "movq 1(%2, %%"REG_a"), %%mm4 \n\t" yading@10: "movq %%mm2, %%mm3 \n\t" yading@10: "movq %%mm4, %%mm5 \n\t" yading@10: "punpcklbw %%mm7, %%mm2 \n\t" yading@10: "punpckhbw %%mm7, %%mm3 \n\t" yading@10: "punpcklbw %%mm7, %%mm4 \n\t" yading@10: "punpckhbw %%mm7, %%mm5 \n\t" yading@10: "paddw %%mm4, %%mm2 \n\t" yading@10: "paddw %%mm5, %%mm3 \n\t" yading@10: "movq 16+"MANGLE(round_tab)", %%mm5 \n\t" yading@10: "paddw %%mm2, %%mm0 \n\t" yading@10: "paddw %%mm3, %%mm1 \n\t" yading@10: "paddw %%mm5, %%mm0 \n\t" yading@10: "paddw %%mm5, %%mm1 \n\t" yading@10: "movq (%3, %%"REG_a"), %%mm4 \n\t" yading@10: "movq (%3, %%"REG_a"), %%mm5 \n\t" yading@10: "psrlw $2, %%mm0 \n\t" yading@10: "psrlw $2, %%mm1 \n\t" yading@10: "packuswb %%mm1, %%mm0 \n\t" yading@10: "psubusb %%mm0, %%mm4 \n\t" yading@10: "psubusb %%mm5, %%mm0 \n\t" yading@10: "por %%mm4, %%mm0 \n\t" yading@10: "movq %%mm0, %%mm4 \n\t" yading@10: "punpcklbw %%mm7, %%mm0 \n\t" yading@10: "punpckhbw %%mm7, %%mm4 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "paddw %%mm4, %%mm6 \n\t" yading@10: "movq %%mm2, %%mm0 \n\t" yading@10: "movq %%mm3, %%mm1 \n\t" yading@10: "add %4, %%"REG_a" \n\t" yading@10: " js 1b \n\t" yading@10: : "+a" (len) yading@10: : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride) yading@10: ); yading@10: } yading@10: yading@10: static inline int sum_mmx(void) yading@10: { yading@10: int ret; yading@10: __asm__ volatile( yading@10: "movq %%mm6, %%mm0 \n\t" yading@10: "psrlq $32, %%mm6 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "movq %%mm6, %%mm0 \n\t" yading@10: "psrlq $16, %%mm6 \n\t" yading@10: "paddw %%mm0, %%mm6 \n\t" yading@10: "movd %%mm6, %0 \n\t" yading@10: : "=r" (ret) yading@10: ); yading@10: return ret&0xFFFF; yading@10: } yading@10: yading@10: static inline int sum_mmxext(void) yading@10: { yading@10: int ret; yading@10: __asm__ volatile( yading@10: "movd %%mm6, %0 \n\t" yading@10: : "=r" (ret) yading@10: ); yading@10: return ret; yading@10: } yading@10: yading@10: static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) yading@10: { yading@10: sad8_2_mmx(blk1, blk1+1, blk2, stride, h); yading@10: } yading@10: static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) yading@10: { yading@10: sad8_2_mmx(blk1, blk1+stride, blk2, stride, h); yading@10: } yading@10: yading@10: yading@10: #define PIX_SAD(suf)\ yading@10: static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: av_assert2(h==8);\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t":);\ yading@10: \ yading@10: sad8_1_ ## suf(blk1, blk2, stride, 8);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: av_assert2(h==8);\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: "movq %0, %%mm5 \n\t"\ yading@10: :: "m"(round_tab[1]) \ yading@10: );\ yading@10: \ yading@10: sad8_x2a_ ## suf(blk1, blk2, stride, 8);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: \ yading@10: static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: av_assert2(h==8);\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: "movq %0, %%mm5 \n\t"\ yading@10: :: "m"(round_tab[1]) \ yading@10: );\ yading@10: \ yading@10: sad8_y2a_ ## suf(blk1, blk2, stride, 8);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: \ yading@10: static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: av_assert2(h==8);\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: ::);\ yading@10: \ yading@10: sad8_4_ ## suf(blk1, blk2, stride, 8);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: \ yading@10: static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t":);\ yading@10: \ yading@10: sad8_1_ ## suf(blk1 , blk2 , stride, h);\ yading@10: sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: "movq %0, %%mm5 \n\t"\ yading@10: :: "m"(round_tab[1]) \ yading@10: );\ yading@10: \ yading@10: sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\ yading@10: sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: "movq %0, %%mm5 \n\t"\ yading@10: :: "m"(round_tab[1]) \ yading@10: );\ yading@10: \ yading@10: sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\ yading@10: sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ yading@10: {\ yading@10: __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ yading@10: "pxor %%mm6, %%mm6 \n\t"\ yading@10: ::);\ yading@10: \ yading@10: sad8_4_ ## suf(blk1 , blk2 , stride, h);\ yading@10: sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\ yading@10: \ yading@10: return sum_ ## suf();\ yading@10: }\ yading@10: yading@10: PIX_SAD(mmx) yading@10: PIX_SAD(mmxext) yading@10: yading@10: #endif /* HAVE_INLINE_ASM */ yading@10: yading@10: av_cold void ff_dsputil_init_pix_mmx(DSPContext *c, AVCodecContext *avctx) yading@10: { yading@10: #if HAVE_INLINE_ASM yading@10: int mm_flags = av_get_cpu_flags(); yading@10: yading@10: if (mm_flags & AV_CPU_FLAG_MMX) { yading@10: c->pix_abs[0][0] = sad16_mmx; yading@10: c->pix_abs[0][1] = sad16_x2_mmx; yading@10: c->pix_abs[0][2] = sad16_y2_mmx; yading@10: c->pix_abs[0][3] = sad16_xy2_mmx; yading@10: c->pix_abs[1][0] = sad8_mmx; yading@10: c->pix_abs[1][1] = sad8_x2_mmx; yading@10: c->pix_abs[1][2] = sad8_y2_mmx; yading@10: c->pix_abs[1][3] = sad8_xy2_mmx; yading@10: yading@10: c->sad[0]= sad16_mmx; yading@10: c->sad[1]= sad8_mmx; yading@10: } yading@10: if (mm_flags & AV_CPU_FLAG_MMXEXT) { yading@10: c->pix_abs[0][0] = sad16_mmxext; yading@10: c->pix_abs[1][0] = sad8_mmxext; yading@10: yading@10: c->sad[0] = sad16_mmxext; yading@10: c->sad[1] = sad8_mmxext; yading@10: yading@10: if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ yading@10: c->pix_abs[0][1] = sad16_x2_mmxext; yading@10: c->pix_abs[0][2] = sad16_y2_mmxext; yading@10: c->pix_abs[0][3] = sad16_xy2_mmxext; yading@10: c->pix_abs[1][1] = sad8_x2_mmxext; yading@10: c->pix_abs[1][2] = sad8_y2_mmxext; yading@10: c->pix_abs[1][3] = sad8_xy2_mmxext; yading@10: } yading@10: } yading@10: if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) { yading@10: c->sad[0]= sad16_sse2; yading@10: } yading@10: #endif /* HAVE_INLINE_ASM */ yading@10: }