yading@11: ;****************************************************************************** yading@11: ;* Copyright (c) 2012 Michael Niedermayer yading@11: ;* yading@11: ;* This file is part of FFmpeg. yading@11: ;* yading@11: ;* FFmpeg is free software; you can redistribute it and/or yading@11: ;* modify it under the terms of the GNU Lesser General Public yading@11: ;* License as published by the Free Software Foundation; either yading@11: ;* version 2.1 of the License, or (at your option) any later version. yading@11: ;* yading@11: ;* FFmpeg is distributed in the hope that it will be useful, yading@11: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@11: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@11: ;* Lesser General Public License for more details. yading@11: ;* yading@11: ;* You should have received a copy of the GNU Lesser General Public yading@11: ;* License along with FFmpeg; if not, write to the Free Software yading@11: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@11: ;****************************************************************************** yading@11: yading@11: %include "libavutil/x86/x86util.asm" yading@11: yading@11: yading@11: SECTION_RODATA yading@11: align 32 yading@11: dw1: times 8 dd 1 yading@11: w1 : times 16 dw 1 yading@11: yading@11: SECTION .text yading@11: yading@11: %macro MIX2_FLT 1 yading@11: cglobal mix_2_1_%1_float, 7, 7, 6, out, in1, in2, coeffp, index1, index2, len yading@11: %ifidn %1, a yading@11: test in1q, mmsize-1 yading@11: jne mix_2_1_float_u_int %+ SUFFIX yading@11: test in2q, mmsize-1 yading@11: jne mix_2_1_float_u_int %+ SUFFIX yading@11: test outq, mmsize-1 yading@11: jne mix_2_1_float_u_int %+ SUFFIX yading@11: %else yading@11: mix_2_1_float_u_int %+ SUFFIX yading@11: %endif yading@11: VBROADCASTSS m4, [coeffpq + 4*index1q] yading@11: VBROADCASTSS m5, [coeffpq + 4*index2q] yading@11: shl lend , 2 yading@11: add in1q , lenq yading@11: add in2q , lenq yading@11: add outq , lenq yading@11: neg lenq yading@11: .next: yading@11: %ifidn %1, a yading@11: mulps m0, m4, [in1q + lenq ] yading@11: mulps m1, m5, [in2q + lenq ] yading@11: mulps m2, m4, [in1q + lenq + mmsize] yading@11: mulps m3, m5, [in2q + lenq + mmsize] yading@11: %else yading@11: movu m0, [in1q + lenq ] yading@11: movu m1, [in2q + lenq ] yading@11: movu m2, [in1q + lenq + mmsize] yading@11: movu m3, [in2q + lenq + mmsize] yading@11: mulps m0, m0, m4 yading@11: mulps m1, m1, m5 yading@11: mulps m2, m2, m4 yading@11: mulps m3, m3, m5 yading@11: %endif yading@11: addps m0, m0, m1 yading@11: addps m2, m2, m3 yading@11: mov%1 [outq + lenq ], m0 yading@11: mov%1 [outq + lenq + mmsize], m2 yading@11: add lenq, mmsize*2 yading@11: jl .next yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: %macro MIX1_FLT 1 yading@11: cglobal mix_1_1_%1_float, 5, 5, 3, out, in, coeffp, index, len yading@11: %ifidn %1, a yading@11: test inq, mmsize-1 yading@11: jne mix_1_1_float_u_int %+ SUFFIX yading@11: test outq, mmsize-1 yading@11: jne mix_1_1_float_u_int %+ SUFFIX yading@11: %else yading@11: mix_1_1_float_u_int %+ SUFFIX yading@11: %endif yading@11: VBROADCASTSS m2, [coeffpq + 4*indexq] yading@11: shl lenq , 2 yading@11: add inq , lenq yading@11: add outq , lenq yading@11: neg lenq yading@11: .next: yading@11: %ifidn %1, a yading@11: mulps m0, m2, [inq + lenq ] yading@11: mulps m1, m2, [inq + lenq + mmsize] yading@11: %else yading@11: movu m0, [inq + lenq ] yading@11: movu m1, [inq + lenq + mmsize] yading@11: mulps m0, m0, m2 yading@11: mulps m1, m1, m2 yading@11: %endif yading@11: mov%1 [outq + lenq ], m0 yading@11: mov%1 [outq + lenq + mmsize], m1 yading@11: add lenq, mmsize*2 yading@11: jl .next yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: %macro MIX1_INT16 1 yading@11: cglobal mix_1_1_%1_int16, 5, 5, 6, out, in, coeffp, index, len yading@11: %ifidn %1, a yading@11: test inq, mmsize-1 yading@11: jne mix_1_1_int16_u_int %+ SUFFIX yading@11: test outq, mmsize-1 yading@11: jne mix_1_1_int16_u_int %+ SUFFIX yading@11: %else yading@11: mix_1_1_int16_u_int %+ SUFFIX yading@11: %endif yading@11: movd m4, [coeffpq + 4*indexq] yading@11: SPLATW m5, m4 yading@11: psllq m4, 32 yading@11: psrlq m4, 48 yading@11: mova m0, [w1] yading@11: psllw m0, m4 yading@11: psrlw m0, 1 yading@11: punpcklwd m5, m0 yading@11: add lenq , lenq yading@11: add inq , lenq yading@11: add outq , lenq yading@11: neg lenq yading@11: .next: yading@11: mov%1 m0, [inq + lenq ] yading@11: mov%1 m2, [inq + lenq + mmsize] yading@11: mova m1, m0 yading@11: mova m3, m2 yading@11: punpcklwd m0, [w1] yading@11: punpckhwd m1, [w1] yading@11: punpcklwd m2, [w1] yading@11: punpckhwd m3, [w1] yading@11: pmaddwd m0, m5 yading@11: pmaddwd m1, m5 yading@11: pmaddwd m2, m5 yading@11: pmaddwd m3, m5 yading@11: psrad m0, m4 yading@11: psrad m1, m4 yading@11: psrad m2, m4 yading@11: psrad m3, m4 yading@11: packssdw m0, m1 yading@11: packssdw m2, m3 yading@11: mov%1 [outq + lenq ], m0 yading@11: mov%1 [outq + lenq + mmsize], m2 yading@11: add lenq, mmsize*2 yading@11: jl .next yading@11: %if mmsize == 8 yading@11: emms yading@11: RET yading@11: %else yading@11: REP_RET yading@11: %endif yading@11: %endmacro yading@11: yading@11: %macro MIX2_INT16 1 yading@11: cglobal mix_2_1_%1_int16, 7, 7, 8, out, in1, in2, coeffp, index1, index2, len yading@11: %ifidn %1, a yading@11: test in1q, mmsize-1 yading@11: jne mix_2_1_int16_u_int %+ SUFFIX yading@11: test in2q, mmsize-1 yading@11: jne mix_2_1_int16_u_int %+ SUFFIX yading@11: test outq, mmsize-1 yading@11: jne mix_2_1_int16_u_int %+ SUFFIX yading@11: %else yading@11: mix_2_1_int16_u_int %+ SUFFIX yading@11: %endif yading@11: movd m4, [coeffpq + 4*index1q] yading@11: movd m6, [coeffpq + 4*index2q] yading@11: SPLATW m5, m4 yading@11: SPLATW m6, m6 yading@11: psllq m4, 32 yading@11: psrlq m4, 48 yading@11: mova m7, [dw1] yading@11: pslld m7, m4 yading@11: psrld m7, 1 yading@11: punpcklwd m5, m6 yading@11: add lend , lend yading@11: add in1q , lenq yading@11: add in2q , lenq yading@11: add outq , lenq yading@11: neg lenq yading@11: .next: yading@11: mov%1 m0, [in1q + lenq ] yading@11: mov%1 m2, [in2q + lenq ] yading@11: mova m1, m0 yading@11: punpcklwd m0, m2 yading@11: punpckhwd m1, m2 yading@11: yading@11: mov%1 m2, [in1q + lenq + mmsize] yading@11: mov%1 m6, [in2q + lenq + mmsize] yading@11: mova m3, m2 yading@11: punpcklwd m2, m6 yading@11: punpckhwd m3, m6 yading@11: yading@11: pmaddwd m0, m5 yading@11: pmaddwd m1, m5 yading@11: pmaddwd m2, m5 yading@11: pmaddwd m3, m5 yading@11: paddd m0, m7 yading@11: paddd m1, m7 yading@11: paddd m2, m7 yading@11: paddd m3, m7 yading@11: psrad m0, m4 yading@11: psrad m1, m4 yading@11: psrad m2, m4 yading@11: psrad m3, m4 yading@11: packssdw m0, m1 yading@11: packssdw m2, m3 yading@11: mov%1 [outq + lenq ], m0 yading@11: mov%1 [outq + lenq + mmsize], m2 yading@11: add lenq, mmsize*2 yading@11: jl .next yading@11: %if mmsize == 8 yading@11: emms yading@11: RET yading@11: %else yading@11: REP_RET yading@11: %endif yading@11: %endmacro yading@11: yading@11: yading@11: INIT_MMX mmx yading@11: MIX1_INT16 u yading@11: MIX1_INT16 a yading@11: MIX2_INT16 u yading@11: MIX2_INT16 a yading@11: yading@11: INIT_XMM sse yading@11: MIX2_FLT u yading@11: MIX2_FLT a yading@11: MIX1_FLT u yading@11: MIX1_FLT a yading@11: yading@11: INIT_XMM sse2 yading@11: MIX1_INT16 u yading@11: MIX1_INT16 a yading@11: MIX2_INT16 u yading@11: MIX2_INT16 a yading@11: yading@11: %if HAVE_AVX_EXTERNAL yading@11: INIT_YMM avx yading@11: MIX2_FLT u yading@11: MIX2_FLT a yading@11: MIX1_FLT u yading@11: MIX1_FLT a yading@11: %endif