yading@10: ;****************************************************************************** yading@10: ;* MMX/SSSE3-optimized functions for H264 chroma MC yading@10: ;* Copyright (c) 2005 Zoltan Hidvegi , yading@10: ;* 2005-2008 Loren Merritt yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: yading@10: rnd_rv40_2d_tbl: times 4 dw 0 yading@10: times 4 dw 16 yading@10: times 4 dw 32 yading@10: times 4 dw 16 yading@10: times 4 dw 32 yading@10: times 4 dw 28 yading@10: times 4 dw 32 yading@10: times 4 dw 28 yading@10: times 4 dw 0 yading@10: times 4 dw 32 yading@10: times 4 dw 16 yading@10: times 4 dw 32 yading@10: times 4 dw 32 yading@10: times 4 dw 28 yading@10: times 4 dw 32 yading@10: times 4 dw 28 yading@10: rnd_rv40_1d_tbl: times 4 dw 0 yading@10: times 4 dw 2 yading@10: times 4 dw 4 yading@10: times 4 dw 2 yading@10: times 4 dw 4 yading@10: times 4 dw 3 yading@10: times 4 dw 4 yading@10: times 4 dw 3 yading@10: times 4 dw 0 yading@10: times 4 dw 4 yading@10: times 4 dw 2 yading@10: times 4 dw 4 yading@10: times 4 dw 4 yading@10: times 4 dw 3 yading@10: times 4 dw 4 yading@10: times 4 dw 3 yading@10: yading@10: cextern pw_3 yading@10: cextern pw_4 yading@10: cextern pw_8 yading@10: pw_28: times 8 dw 28 yading@10: cextern pw_32 yading@10: cextern pw_64 yading@10: yading@10: SECTION .text yading@10: yading@10: %macro mv0_pixels_mc8 0 yading@10: lea r4, [r2*2 ] yading@10: .next4rows: yading@10: movq mm0, [r1 ] yading@10: movq mm1, [r1+r2] yading@10: add r1, r4 yading@10: CHROMAMC_AVG mm0, [r0 ] yading@10: CHROMAMC_AVG mm1, [r0+r2] yading@10: movq [r0 ], mm0 yading@10: movq [r0+r2], mm1 yading@10: add r0, r4 yading@10: movq mm0, [r1 ] yading@10: movq mm1, [r1+r2] yading@10: add r1, r4 yading@10: CHROMAMC_AVG mm0, [r0 ] yading@10: CHROMAMC_AVG mm1, [r0+r2] yading@10: movq [r0 ], mm0 yading@10: movq [r0+r2], mm1 yading@10: add r0, r4 yading@10: sub r3d, 4 yading@10: jne .next4rows yading@10: %endmacro yading@10: yading@10: %macro chroma_mc8_mmx_func 2-3 yading@10: %ifidn %2, rv40 yading@10: %ifdef PIC yading@10: %define rnd_1d_rv40 r8 yading@10: %define rnd_2d_rv40 r8 yading@10: %define extra_regs 2 yading@10: %else ; no-PIC yading@10: %define rnd_1d_rv40 rnd_rv40_1d_tbl yading@10: %define rnd_2d_rv40 rnd_rv40_2d_tbl yading@10: %define extra_regs 1 yading@10: %endif ; PIC yading@10: %else yading@10: %define extra_regs 0 yading@10: %endif ; rv40 yading@10: ; put/avg_h264_chroma_mc8_*(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/, yading@10: ; int stride, int h, int mx, int my) yading@10: cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0 yading@10: %if ARCH_X86_64 yading@10: movsxd r2, r2d yading@10: %endif yading@10: mov r6d, r5d yading@10: or r6d, r4d yading@10: jne .at_least_one_non_zero yading@10: ; mx == 0 AND my == 0 - no filter needed yading@10: mv0_pixels_mc8 yading@10: REP_RET yading@10: yading@10: .at_least_one_non_zero: yading@10: %ifidn %2, rv40 yading@10: %if ARCH_X86_64 yading@10: mov r7, r5 yading@10: and r7, 6 ; &~1 for mx/my=[0,7] yading@10: lea r7, [r7*4+r4] yading@10: sar r7d, 1 yading@10: %define rnd_bias r7 yading@10: %define dest_reg r0 yading@10: %else ; x86-32 yading@10: mov r0, r5 yading@10: and r0, 6 ; &~1 for mx/my=[0,7] yading@10: lea r0, [r0*4+r4] yading@10: sar r0d, 1 yading@10: %define rnd_bias r0 yading@10: %define dest_reg r5 yading@10: %endif yading@10: %else ; vc1, h264 yading@10: %define rnd_bias 0 yading@10: %define dest_reg r0 yading@10: %endif yading@10: yading@10: test r5d, r5d yading@10: mov r6, 1 yading@10: je .my_is_zero yading@10: test r4d, r4d yading@10: mov r6, r2 ; dxy = x ? 1 : stride yading@10: jne .both_non_zero yading@10: .my_is_zero: yading@10: ; mx == 0 XOR my == 0 - 1 dimensional filter only yading@10: or r4d, r5d ; x + y yading@10: yading@10: %ifidn %2, rv40 yading@10: %ifdef PIC yading@10: lea r8, [rnd_rv40_1d_tbl] yading@10: %endif yading@10: %if ARCH_X86_64 == 0 yading@10: mov r5, r0m yading@10: %endif yading@10: %endif yading@10: yading@10: movd m5, r4d yading@10: movq m4, [pw_8] yading@10: movq m6, [rnd_1d_%2+rnd_bias*8] ; mm6 = rnd >> 3 yading@10: punpcklwd m5, m5 yading@10: punpckldq m5, m5 ; mm5 = B = x yading@10: pxor m7, m7 yading@10: psubw m4, m5 ; mm4 = A = 8-x yading@10: yading@10: .next1drow: yading@10: movq m0, [r1 ] ; mm0 = src[0..7] yading@10: movq m2, [r1+r6] ; mm1 = src[1..8] yading@10: yading@10: movq m1, m0 yading@10: movq m3, m2 yading@10: punpcklbw m0, m7 yading@10: punpckhbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: punpckhbw m3, m7 yading@10: pmullw m0, m4 ; [mm0,mm1] = A * src[0..7] yading@10: pmullw m1, m4 yading@10: pmullw m2, m5 ; [mm2,mm3] = B * src[1..8] yading@10: pmullw m3, m5 yading@10: yading@10: paddw m0, m6 yading@10: paddw m1, m6 yading@10: paddw m0, m2 yading@10: paddw m1, m3 yading@10: psrlw m0, 3 yading@10: psrlw m1, 3 yading@10: packuswb m0, m1 yading@10: CHROMAMC_AVG m0, [dest_reg] yading@10: movq [dest_reg], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3 yading@10: yading@10: add dest_reg, r2 yading@10: add r1, r2 yading@10: dec r3d yading@10: jne .next1drow yading@10: REP_RET yading@10: yading@10: .both_non_zero: ; general case, bilinear yading@10: movd m4, r4d ; x yading@10: movd m6, r5d ; y yading@10: %ifidn %2, rv40 yading@10: %ifdef PIC yading@10: lea r8, [rnd_rv40_2d_tbl] yading@10: %endif yading@10: %if ARCH_X86_64 == 0 yading@10: mov r5, r0m yading@10: %endif yading@10: %endif yading@10: mov r6, rsp ; backup stack pointer yading@10: and rsp, ~(mmsize-1) ; align stack yading@10: sub rsp, 16 ; AA and DD yading@10: yading@10: punpcklwd m4, m4 yading@10: punpcklwd m6, m6 yading@10: punpckldq m4, m4 ; mm4 = x words yading@10: punpckldq m6, m6 ; mm6 = y words yading@10: movq m5, m4 yading@10: pmullw m4, m6 ; mm4 = x * y yading@10: psllw m5, 3 yading@10: psllw m6, 3 yading@10: movq m7, m5 yading@10: paddw m7, m6 yading@10: movq [rsp+8], m4 ; DD = x * y yading@10: psubw m5, m4 ; mm5 = B = 8x - xy yading@10: psubw m6, m4 ; mm6 = C = 8y - xy yading@10: paddw m4, [pw_64] yading@10: psubw m4, m7 ; mm4 = A = xy - (8x+8y) + 64 yading@10: pxor m7, m7 yading@10: movq [rsp ], m4 yading@10: yading@10: movq m0, [r1 ] ; mm0 = src[0..7] yading@10: movq m1, [r1+1] ; mm1 = src[1..8] yading@10: .next2drow: yading@10: add r1, r2 yading@10: yading@10: movq m2, m0 yading@10: movq m3, m1 yading@10: punpckhbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: punpckhbw m3, m7 yading@10: pmullw m0, [rsp] yading@10: pmullw m2, [rsp] yading@10: pmullw m1, m5 yading@10: pmullw m3, m5 yading@10: paddw m2, m1 ; mm2 = A * src[0..3] + B * src[1..4] yading@10: paddw m3, m0 ; mm3 = A * src[4..7] + B * src[5..8] yading@10: yading@10: movq m0, [r1] yading@10: movq m1, m0 yading@10: punpcklbw m0, m7 yading@10: punpckhbw m1, m7 yading@10: pmullw m0, m6 yading@10: pmullw m1, m6 yading@10: paddw m2, m0 yading@10: paddw m3, m1 ; [mm2,mm3] += C * src[0..7] yading@10: yading@10: movq m1, [r1+1] yading@10: movq m0, m1 yading@10: movq m4, m1 yading@10: punpcklbw m0, m7 yading@10: punpckhbw m4, m7 yading@10: pmullw m0, [rsp+8] yading@10: pmullw m4, [rsp+8] yading@10: paddw m2, m0 yading@10: paddw m3, m4 ; [mm2,mm3] += D * src[1..8] yading@10: movq m0, [r1] yading@10: yading@10: paddw m2, [rnd_2d_%2+rnd_bias*8] yading@10: paddw m3, [rnd_2d_%2+rnd_bias*8] yading@10: psrlw m2, 6 yading@10: psrlw m3, 6 yading@10: packuswb m2, m3 yading@10: CHROMAMC_AVG m2, [dest_reg] yading@10: movq [dest_reg], m2 ; dst[0..7] = ([mm2,mm3] + rnd) >> 6 yading@10: yading@10: add dest_reg, r2 yading@10: dec r3d yading@10: jne .next2drow yading@10: mov rsp, r6 ; restore stack pointer yading@10: RET yading@10: %endmacro yading@10: yading@10: %macro chroma_mc4_mmx_func 2 yading@10: %define extra_regs 0 yading@10: %ifidn %2, rv40 yading@10: %ifdef PIC yading@10: %define extra_regs 1 yading@10: %endif ; PIC yading@10: %endif ; rv40 yading@10: cglobal %1_%2_chroma_mc4, 6, 6 + extra_regs, 0 yading@10: %if ARCH_X86_64 yading@10: movsxd r2, r2d yading@10: %endif yading@10: pxor m7, m7 yading@10: movd m2, r4d ; x yading@10: movd m3, r5d ; y yading@10: movq m4, [pw_8] yading@10: movq m5, [pw_8] yading@10: punpcklwd m2, m2 yading@10: punpcklwd m3, m3 yading@10: punpcklwd m2, m2 yading@10: punpcklwd m3, m3 yading@10: psubw m4, m2 yading@10: psubw m5, m3 yading@10: yading@10: %ifidn %2, rv40 yading@10: %ifdef PIC yading@10: lea r6, [rnd_rv40_2d_tbl] yading@10: %define rnd_2d_rv40 r6 yading@10: %else yading@10: %define rnd_2d_rv40 rnd_rv40_2d_tbl yading@10: %endif yading@10: and r5, 6 ; &~1 for mx/my=[0,7] yading@10: lea r5, [r5*4+r4] yading@10: sar r5d, 1 yading@10: %define rnd_bias r5 yading@10: %else ; vc1, h264 yading@10: %define rnd_bias 0 yading@10: %endif yading@10: yading@10: movd m0, [r1 ] yading@10: movd m6, [r1+1] yading@10: add r1, r2 yading@10: punpcklbw m0, m7 yading@10: punpcklbw m6, m7 yading@10: pmullw m0, m4 yading@10: pmullw m6, m2 yading@10: paddw m6, m0 yading@10: yading@10: .next2rows: yading@10: movd m0, [r1 ] yading@10: movd m1, [r1+1] yading@10: add r1, r2 yading@10: punpcklbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: pmullw m0, m4 yading@10: pmullw m1, m2 yading@10: paddw m1, m0 yading@10: movq m0, m1 yading@10: yading@10: pmullw m6, m5 yading@10: pmullw m1, m3 yading@10: paddw m6, [rnd_2d_%2+rnd_bias*8] yading@10: paddw m1, m6 yading@10: psrlw m1, 6 yading@10: packuswb m1, m1 yading@10: CHROMAMC_AVG4 m1, m6, [r0] yading@10: movd [r0], m1 yading@10: add r0, r2 yading@10: yading@10: movd m6, [r1 ] yading@10: movd m1, [r1+1] yading@10: add r1, r2 yading@10: punpcklbw m6, m7 yading@10: punpcklbw m1, m7 yading@10: pmullw m6, m4 yading@10: pmullw m1, m2 yading@10: paddw m1, m6 yading@10: movq m6, m1 yading@10: pmullw m0, m5 yading@10: pmullw m1, m3 yading@10: paddw m0, [rnd_2d_%2+rnd_bias*8] yading@10: paddw m1, m0 yading@10: psrlw m1, 6 yading@10: packuswb m1, m1 yading@10: CHROMAMC_AVG4 m1, m0, [r0] yading@10: movd [r0], m1 yading@10: add r0, r2 yading@10: sub r3d, 2 yading@10: jnz .next2rows yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: %macro chroma_mc2_mmx_func 2 yading@10: cglobal %1_%2_chroma_mc2, 6, 7, 0 yading@10: %if ARCH_X86_64 yading@10: movsxd r2, r2d yading@10: %endif yading@10: yading@10: mov r6d, r4d yading@10: shl r4d, 16 yading@10: sub r4d, r6d yading@10: add r4d, 8 yading@10: imul r5d, r4d ; x*y<<16 | y*(8-x) yading@10: shl r4d, 3 yading@10: sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y) yading@10: yading@10: movd m5, r4d yading@10: movd m6, r5d yading@10: punpckldq m5, m5 ; mm5 = {A,B,A,B} yading@10: punpckldq m6, m6 ; mm6 = {C,D,C,D} yading@10: pxor m7, m7 yading@10: movd m2, [r1] yading@10: punpcklbw m2, m7 yading@10: pshufw m2, m2, 0x94 ; mm0 = src[0,1,1,2] yading@10: yading@10: .nextrow: yading@10: add r1, r2 yading@10: movq m1, m2 yading@10: pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2] yading@10: movd m0, [r1] yading@10: punpcklbw m0, m7 yading@10: pshufw m0, m0, 0x94 ; mm0 = src[0,1,1,2] yading@10: movq m2, m0 yading@10: pmaddwd m0, m6 yading@10: paddw m1, [rnd_2d_%2] yading@10: paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2] yading@10: psrlw m1, 6 yading@10: packssdw m1, m7 yading@10: packuswb m1, m7 yading@10: CHROMAMC_AVG4 m1, m3, [r0] yading@10: movd r5d, m1 yading@10: mov [r0], r5w yading@10: add r0, r2 yading@10: sub r3d, 1 yading@10: jnz .nextrow yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: %define rnd_1d_h264 pw_4 yading@10: %define rnd_2d_h264 pw_32 yading@10: %define rnd_1d_vc1 pw_3 yading@10: %define rnd_2d_vc1 pw_28 yading@10: yading@10: %macro NOTHING 2-3 yading@10: %endmacro yading@10: %macro DIRECT_AVG 2 yading@10: PAVGB %1, %2 yading@10: %endmacro yading@10: %macro COPY_AVG 3 yading@10: movd %2, %3 yading@10: PAVGB %1, %2 yading@10: %endmacro yading@10: yading@10: INIT_MMX mmx yading@10: %define CHROMAMC_AVG NOTHING yading@10: %define CHROMAMC_AVG4 NOTHING yading@10: chroma_mc8_mmx_func put, h264, _rnd yading@10: chroma_mc8_mmx_func put, vc1, _nornd yading@10: chroma_mc8_mmx_func put, rv40 yading@10: chroma_mc4_mmx_func put, h264 yading@10: chroma_mc4_mmx_func put, rv40 yading@10: yading@10: INIT_MMX mmxext yading@10: chroma_mc2_mmx_func put, h264 yading@10: yading@10: %define CHROMAMC_AVG DIRECT_AVG yading@10: %define CHROMAMC_AVG4 COPY_AVG yading@10: chroma_mc8_mmx_func avg, h264, _rnd yading@10: chroma_mc8_mmx_func avg, vc1, _nornd yading@10: chroma_mc8_mmx_func avg, rv40 yading@10: chroma_mc4_mmx_func avg, h264 yading@10: chroma_mc4_mmx_func avg, rv40 yading@10: chroma_mc2_mmx_func avg, h264 yading@10: yading@10: INIT_MMX 3dnow yading@10: chroma_mc8_mmx_func avg, h264, _rnd yading@10: chroma_mc8_mmx_func avg, vc1, _nornd yading@10: chroma_mc8_mmx_func avg, rv40 yading@10: chroma_mc4_mmx_func avg, h264 yading@10: chroma_mc4_mmx_func avg, rv40 yading@10: yading@10: %macro chroma_mc8_ssse3_func 2-3 yading@10: cglobal %1_%2_chroma_mc8%3, 6, 7, 8 yading@10: %if ARCH_X86_64 yading@10: movsxd r2, r2d yading@10: %endif yading@10: mov r6d, r5d yading@10: or r6d, r4d yading@10: jne .at_least_one_non_zero yading@10: ; mx == 0 AND my == 0 - no filter needed yading@10: mv0_pixels_mc8 yading@10: REP_RET yading@10: yading@10: .at_least_one_non_zero: yading@10: test r5d, r5d yading@10: je .my_is_zero yading@10: test r4d, r4d yading@10: je .mx_is_zero yading@10: yading@10: ; general case, bilinear yading@10: mov r6d, r4d yading@10: shl r4d, 8 yading@10: sub r4, r6 yading@10: mov r6, 8 yading@10: add r4, 8 ; x*288+8 = x<<8 | (8-x) yading@10: sub r6d, r5d yading@10: imul r6, r4 ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) yading@10: imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) yading@10: yading@10: movd m7, r6d yading@10: movd m6, r4d yading@10: movdqa m5, [rnd_2d_%2] yading@10: movq m0, [r1 ] yading@10: movq m1, [r1+1] yading@10: pshuflw m7, m7, 0 yading@10: pshuflw m6, m6, 0 yading@10: punpcklbw m0, m1 yading@10: movlhps m7, m7 yading@10: movlhps m6, m6 yading@10: yading@10: .next2rows: yading@10: movq m1, [r1+r2*1 ] yading@10: movq m2, [r1+r2*1+1] yading@10: movq m3, [r1+r2*2 ] yading@10: movq m4, [r1+r2*2+1] yading@10: lea r1, [r1+r2*2] yading@10: punpcklbw m1, m2 yading@10: movdqa m2, m1 yading@10: punpcklbw m3, m4 yading@10: movdqa m4, m3 yading@10: pmaddubsw m0, m7 yading@10: pmaddubsw m1, m6 yading@10: pmaddubsw m2, m7 yading@10: pmaddubsw m3, m6 yading@10: paddw m0, m5 yading@10: paddw m2, m5 yading@10: paddw m1, m0 yading@10: paddw m3, m2 yading@10: psrlw m1, 6 yading@10: movdqa m0, m4 yading@10: psrlw m3, 6 yading@10: %ifidn %1, avg yading@10: movq m2, [r0 ] yading@10: movhps m2, [r0+r2] yading@10: %endif yading@10: packuswb m1, m3 yading@10: CHROMAMC_AVG m1, m2 yading@10: movq [r0 ], m1 yading@10: movhps [r0+r2], m1 yading@10: sub r3d, 2 yading@10: lea r0, [r0+r2*2] yading@10: jg .next2rows yading@10: REP_RET yading@10: yading@10: .my_is_zero: yading@10: mov r5d, r4d yading@10: shl r4d, 8 yading@10: add r4, 8 yading@10: sub r4, r5 ; 255*x+8 = x<<8 | (8-x) yading@10: movd m7, r4d yading@10: movdqa m6, [rnd_1d_%2] yading@10: pshuflw m7, m7, 0 yading@10: movlhps m7, m7 yading@10: yading@10: .next2xrows: yading@10: movq m0, [r1 ] yading@10: movq m1, [r1 +1] yading@10: movq m2, [r1+r2 ] yading@10: movq m3, [r1+r2+1] yading@10: punpcklbw m0, m1 yading@10: punpcklbw m2, m3 yading@10: pmaddubsw m0, m7 yading@10: pmaddubsw m2, m7 yading@10: %ifidn %1, avg yading@10: movq m4, [r0 ] yading@10: movhps m4, [r0+r2] yading@10: %endif yading@10: paddw m0, m6 yading@10: paddw m2, m6 yading@10: psrlw m0, 3 yading@10: psrlw m2, 3 yading@10: packuswb m0, m2 yading@10: CHROMAMC_AVG m0, m4 yading@10: movq [r0 ], m0 yading@10: movhps [r0+r2], m0 yading@10: sub r3d, 2 yading@10: lea r0, [r0+r2*2] yading@10: lea r1, [r1+r2*2] yading@10: jg .next2xrows yading@10: REP_RET yading@10: yading@10: .mx_is_zero: yading@10: mov r4d, r5d yading@10: shl r5d, 8 yading@10: add r5, 8 yading@10: sub r5, r4 ; 255*y+8 = y<<8 | (8-y) yading@10: movd m7, r5d yading@10: movdqa m6, [rnd_1d_%2] yading@10: pshuflw m7, m7, 0 yading@10: movlhps m7, m7 yading@10: yading@10: .next2yrows: yading@10: movq m0, [r1 ] yading@10: movq m1, [r1+r2 ] yading@10: movdqa m2, m1 yading@10: movq m3, [r1+r2*2] yading@10: lea r1, [r1+r2*2] yading@10: punpcklbw m0, m1 yading@10: punpcklbw m2, m3 yading@10: pmaddubsw m0, m7 yading@10: pmaddubsw m2, m7 yading@10: %ifidn %1, avg yading@10: movq m4, [r0 ] yading@10: movhps m4, [r0+r2] yading@10: %endif yading@10: paddw m0, m6 yading@10: paddw m2, m6 yading@10: psrlw m0, 3 yading@10: psrlw m2, 3 yading@10: packuswb m0, m2 yading@10: CHROMAMC_AVG m0, m4 yading@10: movq [r0 ], m0 yading@10: movhps [r0+r2], m0 yading@10: sub r3d, 2 yading@10: lea r0, [r0+r2*2] yading@10: jg .next2yrows yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: %macro chroma_mc4_ssse3_func 2 yading@10: cglobal %1_%2_chroma_mc4, 6, 7, 0 yading@10: %if ARCH_X86_64 yading@10: movsxd r2, r2d yading@10: %endif yading@10: mov r6, r4 yading@10: shl r4d, 8 yading@10: sub r4d, r6d yading@10: mov r6, 8 yading@10: add r4d, 8 ; x*288+8 yading@10: sub r6d, r5d yading@10: imul r6d, r4d ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x) yading@10: imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x) yading@10: yading@10: movd m7, r6d yading@10: movd m6, r4d yading@10: movq m5, [pw_32] yading@10: movd m0, [r1 ] yading@10: pshufw m7, m7, 0 yading@10: punpcklbw m0, [r1+1] yading@10: pshufw m6, m6, 0 yading@10: yading@10: .next2rows: yading@10: movd m1, [r1+r2*1 ] yading@10: movd m3, [r1+r2*2 ] yading@10: punpcklbw m1, [r1+r2*1+1] yading@10: punpcklbw m3, [r1+r2*2+1] yading@10: lea r1, [r1+r2*2] yading@10: movq m2, m1 yading@10: movq m4, m3 yading@10: pmaddubsw m0, m7 yading@10: pmaddubsw m1, m6 yading@10: pmaddubsw m2, m7 yading@10: pmaddubsw m3, m6 yading@10: paddw m0, m5 yading@10: paddw m2, m5 yading@10: paddw m1, m0 yading@10: paddw m3, m2 yading@10: psrlw m1, 6 yading@10: movq m0, m4 yading@10: psrlw m3, 6 yading@10: packuswb m1, m1 yading@10: packuswb m3, m3 yading@10: CHROMAMC_AVG m1, [r0 ] yading@10: CHROMAMC_AVG m3, [r0+r2] yading@10: movd [r0 ], m1 yading@10: movd [r0+r2], m3 yading@10: sub r3d, 2 yading@10: lea r0, [r0+r2*2] yading@10: jg .next2rows yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: %define CHROMAMC_AVG NOTHING yading@10: INIT_XMM ssse3 yading@10: chroma_mc8_ssse3_func put, h264, _rnd yading@10: chroma_mc8_ssse3_func put, vc1, _nornd yading@10: INIT_MMX ssse3 yading@10: chroma_mc4_ssse3_func put, h264 yading@10: yading@10: %define CHROMAMC_AVG DIRECT_AVG yading@10: INIT_XMM ssse3 yading@10: chroma_mc8_ssse3_func avg, h264, _rnd yading@10: chroma_mc8_ssse3_func avg, vc1, _nornd yading@10: INIT_MMX ssse3 yading@10: chroma_mc4_ssse3_func avg, h264