yading@11: ;****************************************************************************** yading@11: ;* x86 optimized channel mixing yading@11: ;* Copyright (c) 2012 Justin Ruggles yading@11: ;* yading@11: ;* This file is part of Libav. yading@11: ;* yading@11: ;* Libav is free software; you can redistribute it and/or yading@11: ;* modify it under the terms of the GNU Lesser General Public yading@11: ;* License as published by the Free Software Foundation; either yading@11: ;* version 2.1 of the License, or (at your option) any later version. yading@11: ;* yading@11: ;* Libav is distributed in the hope that it will be useful, yading@11: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@11: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@11: ;* Lesser General Public License for more details. yading@11: ;* yading@11: ;* You should have received a copy of the GNU Lesser General Public yading@11: ;* License along with Libav; if not, write to the Free Software yading@11: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@11: ;****************************************************************************** yading@11: yading@11: %include "libavutil/x86/x86util.asm" yading@11: %include "util.asm" yading@11: yading@11: SECTION_TEXT yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_2_to_1_fltp_flt(float **src, float **matrix, int len, yading@11: ; int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: %macro MIX_2_TO_1_FLTP_FLT 0 yading@11: cglobal mix_2_to_1_fltp_flt, 3,4,6, src, matrix, len, src1 yading@11: mov src1q, [srcq+gprsize] yading@11: mov srcq, [srcq ] yading@11: sub src1q, srcq yading@11: mov matrixq, [matrixq ] yading@11: VBROADCASTSS m4, [matrixq ] yading@11: VBROADCASTSS m5, [matrixq+4] yading@11: ALIGN 16 yading@11: .loop: yading@11: mulps m0, m4, [srcq ] yading@11: mulps m1, m5, [srcq+src1q ] yading@11: mulps m2, m4, [srcq+ mmsize] yading@11: mulps m3, m5, [srcq+src1q+mmsize] yading@11: addps m0, m0, m1 yading@11: addps m2, m2, m3 yading@11: mova [srcq ], m0 yading@11: mova [srcq+mmsize], m2 yading@11: add srcq, mmsize*2 yading@11: sub lend, mmsize*2/4 yading@11: jg .loop yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: INIT_XMM sse yading@11: MIX_2_TO_1_FLTP_FLT yading@11: %if HAVE_AVX_EXTERNAL yading@11: INIT_YMM avx yading@11: MIX_2_TO_1_FLTP_FLT yading@11: %endif yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_2_to_1_s16p_flt(int16_t **src, float **matrix, int len, yading@11: ; int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: %macro MIX_2_TO_1_S16P_FLT 0 yading@11: cglobal mix_2_to_1_s16p_flt, 3,4,6, src, matrix, len, src1 yading@11: mov src1q, [srcq+gprsize] yading@11: mov srcq, [srcq] yading@11: sub src1q, srcq yading@11: mov matrixq, [matrixq ] yading@11: VBROADCASTSS m4, [matrixq ] yading@11: VBROADCASTSS m5, [matrixq+4] yading@11: ALIGN 16 yading@11: .loop: yading@11: mova m0, [srcq ] yading@11: mova m2, [srcq+src1q] yading@11: S16_TO_S32_SX 0, 1 yading@11: S16_TO_S32_SX 2, 3 yading@11: cvtdq2ps m0, m0 yading@11: cvtdq2ps m1, m1 yading@11: cvtdq2ps m2, m2 yading@11: cvtdq2ps m3, m3 yading@11: mulps m0, m4 yading@11: mulps m1, m4 yading@11: mulps m2, m5 yading@11: mulps m3, m5 yading@11: addps m0, m2 yading@11: addps m1, m3 yading@11: cvtps2dq m0, m0 yading@11: cvtps2dq m1, m1 yading@11: packssdw m0, m1 yading@11: mova [srcq], m0 yading@11: add srcq, mmsize yading@11: sub lend, mmsize/2 yading@11: jg .loop yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: INIT_XMM sse2 yading@11: MIX_2_TO_1_S16P_FLT yading@11: INIT_XMM sse4 yading@11: MIX_2_TO_1_S16P_FLT yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_2_to_1_s16p_q8(int16_t **src, int16_t **matrix, int len, yading@11: ; int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: INIT_XMM sse2 yading@11: cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1 yading@11: mov src1q, [srcq+gprsize] yading@11: mov srcq, [srcq] yading@11: sub src1q, srcq yading@11: mov matrixq, [matrixq] yading@11: movd m4, [matrixq] yading@11: movd m5, [matrixq] yading@11: SPLATW m4, m4, 0 yading@11: SPLATW m5, m5, 1 yading@11: pxor m0, m0 yading@11: punpcklwd m4, m0 yading@11: punpcklwd m5, m0 yading@11: ALIGN 16 yading@11: .loop: yading@11: mova m0, [srcq ] yading@11: mova m2, [srcq+src1q] yading@11: punpckhwd m1, m0, m0 yading@11: punpcklwd m0, m0 yading@11: punpckhwd m3, m2, m2 yading@11: punpcklwd m2, m2 yading@11: pmaddwd m0, m4 yading@11: pmaddwd m1, m4 yading@11: pmaddwd m2, m5 yading@11: pmaddwd m3, m5 yading@11: paddd m0, m2 yading@11: paddd m1, m3 yading@11: psrad m0, 8 yading@11: psrad m1, 8 yading@11: packssdw m0, m1 yading@11: mova [srcq], m0 yading@11: add srcq, mmsize yading@11: sub lend, mmsize/2 yading@11: jg .loop yading@11: REP_RET yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len, yading@11: ; int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: %macro MIX_1_TO_2_FLTP_FLT 0 yading@11: cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1 yading@11: mov src1q, [src0q+gprsize] yading@11: mov src0q, [src0q] yading@11: sub src1q, src0q yading@11: mov matrix1q, [matrix0q+gprsize] yading@11: mov matrix0q, [matrix0q] yading@11: VBROADCASTSS m2, [matrix0q] yading@11: VBROADCASTSS m3, [matrix1q] yading@11: ALIGN 16 yading@11: .loop: yading@11: mova m0, [src0q] yading@11: mulps m1, m0, m3 yading@11: mulps m0, m0, m2 yading@11: mova [src0q ], m0 yading@11: mova [src0q+src1q], m1 yading@11: add src0q, mmsize yading@11: sub lend, mmsize/4 yading@11: jg .loop yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: INIT_XMM sse yading@11: MIX_1_TO_2_FLTP_FLT yading@11: %if HAVE_AVX_EXTERNAL yading@11: INIT_YMM avx yading@11: MIX_1_TO_2_FLTP_FLT yading@11: %endif yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len, yading@11: ; int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: %macro MIX_1_TO_2_S16P_FLT 0 yading@11: cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1 yading@11: mov src1q, [src0q+gprsize] yading@11: mov src0q, [src0q] yading@11: sub src1q, src0q yading@11: mov matrix1q, [matrix0q+gprsize] yading@11: mov matrix0q, [matrix0q] yading@11: VBROADCASTSS m4, [matrix0q] yading@11: VBROADCASTSS m5, [matrix1q] yading@11: ALIGN 16 yading@11: .loop: yading@11: mova m0, [src0q] yading@11: S16_TO_S32_SX 0, 2 yading@11: cvtdq2ps m0, m0 yading@11: cvtdq2ps m2, m2 yading@11: mulps m1, m0, m5 yading@11: mulps m0, m0, m4 yading@11: mulps m3, m2, m5 yading@11: mulps m2, m2, m4 yading@11: cvtps2dq m0, m0 yading@11: cvtps2dq m1, m1 yading@11: cvtps2dq m2, m2 yading@11: cvtps2dq m3, m3 yading@11: packssdw m0, m2 yading@11: packssdw m1, m3 yading@11: mova [src0q ], m0 yading@11: mova [src0q+src1q], m1 yading@11: add src0q, mmsize yading@11: sub lend, mmsize/2 yading@11: jg .loop yading@11: REP_RET yading@11: %endmacro yading@11: yading@11: INIT_XMM sse2 yading@11: MIX_1_TO_2_S16P_FLT yading@11: INIT_XMM sse4 yading@11: MIX_1_TO_2_S16P_FLT yading@11: %if HAVE_AVX_EXTERNAL yading@11: INIT_XMM avx yading@11: MIX_1_TO_2_S16P_FLT yading@11: %endif yading@11: yading@11: ;----------------------------------------------------------------------------- yading@11: ; void ff_mix_3_8_to_1_2_fltp/s16p_flt(float/int16_t **src, float **matrix, yading@11: ; int len, int out_ch, int in_ch); yading@11: ;----------------------------------------------------------------------------- yading@11: yading@11: %macro MIX_3_8_TO_1_2_FLT 3 ; %1 = in channels, %2 = out channels, %3 = s16p or fltp yading@11: ; define some names to make the code clearer yading@11: %assign in_channels %1 yading@11: %assign out_channels %2 yading@11: %assign stereo out_channels - 1 yading@11: %ifidn %3, s16p yading@11: %assign is_s16 1 yading@11: %else yading@11: %assign is_s16 0 yading@11: %endif yading@11: yading@11: ; determine how many matrix elements must go on the stack vs. mmregs yading@11: %assign matrix_elements in_channels * out_channels yading@11: %if is_s16 yading@11: %if stereo yading@11: %assign needed_mmregs 7 yading@11: %else yading@11: %assign needed_mmregs 5 yading@11: %endif yading@11: %else yading@11: %if stereo yading@11: %assign needed_mmregs 4 yading@11: %else yading@11: %assign needed_mmregs 3 yading@11: %endif yading@11: %endif yading@11: %assign matrix_elements_mm num_mmregs - needed_mmregs yading@11: %if matrix_elements < matrix_elements_mm yading@11: %assign matrix_elements_mm matrix_elements yading@11: %endif yading@11: %if matrix_elements_mm < matrix_elements yading@11: %assign matrix_elements_stack matrix_elements - matrix_elements_mm yading@11: %else yading@11: %assign matrix_elements_stack 0 yading@11: %endif yading@11: %assign matrix_stack_size matrix_elements_stack * mmsize yading@11: yading@11: %assign needed_stack_size -1 * matrix_stack_size yading@11: %if ARCH_X86_32 && in_channels >= 7 yading@11: %assign needed_stack_size needed_stack_size - 16 yading@11: %endif yading@11: yading@11: cglobal mix_%1_to_%2_%3_flt, 3,in_channels+2,needed_mmregs+matrix_elements_mm, needed_stack_size, src0, src1, len, src2, src3, src4, src5, src6, src7 yading@11: yading@11: ; define src pointers on stack if needed yading@11: %if matrix_elements_stack > 0 && ARCH_X86_32 && in_channels >= 7 yading@11: %define src5m [rsp+matrix_stack_size+0] yading@11: %define src6m [rsp+matrix_stack_size+4] yading@11: %define src7m [rsp+matrix_stack_size+8] yading@11: %endif yading@11: yading@11: ; load matrix pointers yading@11: %define matrix0q r1q yading@11: %define matrix1q r3q yading@11: %if stereo yading@11: mov matrix1q, [matrix0q+gprsize] yading@11: %endif yading@11: mov matrix0q, [matrix0q] yading@11: yading@11: ; define matrix coeff names yading@11: %assign %%i 0 yading@11: %assign %%j needed_mmregs yading@11: %rep in_channels yading@11: %if %%i >= matrix_elements_mm yading@11: CAT_XDEFINE mx_stack_0_, %%i, 1 yading@11: CAT_XDEFINE mx_0_, %%i, [rsp+(%%i-matrix_elements_mm)*mmsize] yading@11: %else yading@11: CAT_XDEFINE mx_stack_0_, %%i, 0 yading@11: CAT_XDEFINE mx_0_, %%i, m %+ %%j yading@11: %assign %%j %%j+1 yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: %if stereo yading@11: %assign %%i 0 yading@11: %rep in_channels yading@11: %if in_channels + %%i >= matrix_elements_mm yading@11: CAT_XDEFINE mx_stack_1_, %%i, 1 yading@11: CAT_XDEFINE mx_1_, %%i, [rsp+(in_channels+%%i-matrix_elements_mm)*mmsize] yading@11: %else yading@11: CAT_XDEFINE mx_stack_1_, %%i, 0 yading@11: CAT_XDEFINE mx_1_, %%i, m %+ %%j yading@11: %assign %%j %%j+1 yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: %endif yading@11: yading@11: ; load/splat matrix coeffs yading@11: %assign %%i 0 yading@11: %rep in_channels yading@11: %if mx_stack_0_ %+ %%i yading@11: VBROADCASTSS m0, [matrix0q+4*%%i] yading@11: mova mx_0_ %+ %%i, m0 yading@11: %else yading@11: VBROADCASTSS mx_0_ %+ %%i, [matrix0q+4*%%i] yading@11: %endif yading@11: %if stereo yading@11: %if mx_stack_1_ %+ %%i yading@11: VBROADCASTSS m0, [matrix1q+4*%%i] yading@11: mova mx_1_ %+ %%i, m0 yading@11: %else yading@11: VBROADCASTSS mx_1_ %+ %%i, [matrix1q+4*%%i] yading@11: %endif yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: yading@11: ; load channel pointers to registers as offsets from the first channel pointer yading@11: %if ARCH_X86_64 yading@11: movsxd lenq, r2d yading@11: %endif yading@11: shl lenq, 2-is_s16 yading@11: %assign %%i 1 yading@11: %rep (in_channels - 1) yading@11: %if ARCH_X86_32 && in_channels >= 7 && %%i >= 5 yading@11: mov src5q, [src0q+%%i*gprsize] yading@11: add src5q, lenq yading@11: mov src %+ %%i %+ m, src5q yading@11: %else yading@11: mov src %+ %%i %+ q, [src0q+%%i*gprsize] yading@11: add src %+ %%i %+ q, lenq yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: mov src0q, [src0q] yading@11: add src0q, lenq yading@11: neg lenq yading@11: .loop: yading@11: ; for x86-32 with 7-8 channels we do not have enough gp registers for all src yading@11: ; pointers, so we have to load some of them from the stack each time yading@11: %define copy_src_from_stack ARCH_X86_32 && in_channels >= 7 && %%i >= 5 yading@11: %if is_s16 yading@11: ; mix with s16p input yading@11: mova m0, [src0q+lenq] yading@11: S16_TO_S32_SX 0, 1 yading@11: cvtdq2ps m0, m0 yading@11: cvtdq2ps m1, m1 yading@11: %if stereo yading@11: mulps m2, m0, mx_1_0 yading@11: mulps m3, m1, mx_1_0 yading@11: %endif yading@11: mulps m0, m0, mx_0_0 yading@11: mulps m1, m1, mx_0_0 yading@11: %assign %%i 1 yading@11: %rep (in_channels - 1) yading@11: %if copy_src_from_stack yading@11: %define src_ptr src5q yading@11: %else yading@11: %define src_ptr src %+ %%i %+ q yading@11: %endif yading@11: %if stereo yading@11: %if copy_src_from_stack yading@11: mov src_ptr, src %+ %%i %+ m yading@11: %endif yading@11: mova m4, [src_ptr+lenq] yading@11: S16_TO_S32_SX 4, 5 yading@11: cvtdq2ps m4, m4 yading@11: cvtdq2ps m5, m5 yading@11: fmaddps m2, m4, mx_1_ %+ %%i, m2, m6 yading@11: fmaddps m3, m5, mx_1_ %+ %%i, m3, m6 yading@11: fmaddps m0, m4, mx_0_ %+ %%i, m0, m4 yading@11: fmaddps m1, m5, mx_0_ %+ %%i, m1, m5 yading@11: %else yading@11: %if copy_src_from_stack yading@11: mov src_ptr, src %+ %%i %+ m yading@11: %endif yading@11: mova m2, [src_ptr+lenq] yading@11: S16_TO_S32_SX 2, 3 yading@11: cvtdq2ps m2, m2 yading@11: cvtdq2ps m3, m3 yading@11: fmaddps m0, m2, mx_0_ %+ %%i, m0, m4 yading@11: fmaddps m1, m3, mx_0_ %+ %%i, m1, m4 yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: %if stereo yading@11: cvtps2dq m2, m2 yading@11: cvtps2dq m3, m3 yading@11: packssdw m2, m3 yading@11: mova [src1q+lenq], m2 yading@11: %endif yading@11: cvtps2dq m0, m0 yading@11: cvtps2dq m1, m1 yading@11: packssdw m0, m1 yading@11: mova [src0q+lenq], m0 yading@11: %else yading@11: ; mix with fltp input yading@11: %if stereo || mx_stack_0_0 yading@11: mova m0, [src0q+lenq] yading@11: %endif yading@11: %if stereo yading@11: mulps m1, m0, mx_1_0 yading@11: %endif yading@11: %if stereo || mx_stack_0_0 yading@11: mulps m0, m0, mx_0_0 yading@11: %else yading@11: mulps m0, [src0q+lenq], mx_0_0 yading@11: %endif yading@11: %assign %%i 1 yading@11: %rep (in_channels - 1) yading@11: %if copy_src_from_stack yading@11: %define src_ptr src5q yading@11: mov src_ptr, src %+ %%i %+ m yading@11: %else yading@11: %define src_ptr src %+ %%i %+ q yading@11: %endif yading@11: ; avoid extra load for mono if matrix is in a mm register yading@11: %if stereo || mx_stack_0_ %+ %%i yading@11: mova m2, [src_ptr+lenq] yading@11: %endif yading@11: %if stereo yading@11: fmaddps m1, m2, mx_1_ %+ %%i, m1, m3 yading@11: %endif yading@11: %if stereo || mx_stack_0_ %+ %%i yading@11: fmaddps m0, m2, mx_0_ %+ %%i, m0, m2 yading@11: %else yading@11: fmaddps m0, mx_0_ %+ %%i, [src_ptr+lenq], m0, m1 yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: mova [src0q+lenq], m0 yading@11: %if stereo yading@11: mova [src1q+lenq], m1 yading@11: %endif yading@11: %endif yading@11: yading@11: add lenq, mmsize yading@11: jl .loop yading@11: ; zero ymm high halves yading@11: %if mmsize == 32 yading@11: vzeroupper yading@11: %endif yading@11: RET yading@11: %endmacro yading@11: yading@11: %macro MIX_3_8_TO_1_2_FLT_FUNCS 0 yading@11: %assign %%i 3 yading@11: %rep 6 yading@11: INIT_XMM sse yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, fltp yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, fltp yading@11: INIT_XMM sse2 yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, s16p yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, s16p yading@11: INIT_XMM sse4 yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, s16p yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, s16p yading@11: ; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues yading@11: %if HAVE_AVX_EXTERNAL yading@11: %if ARCH_X86_64 || %%i < 6 yading@11: INIT_YMM avx yading@11: %else yading@11: INIT_XMM avx yading@11: %endif yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, fltp yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, fltp yading@11: INIT_XMM avx yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, s16p yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, s16p yading@11: %endif yading@11: %if HAVE_FMA4_EXTERNAL yading@11: %if ARCH_X86_64 || %%i < 6 yading@11: INIT_YMM fma4 yading@11: %else yading@11: INIT_XMM fma4 yading@11: %endif yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, fltp yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, fltp yading@11: INIT_XMM fma4 yading@11: MIX_3_8_TO_1_2_FLT %%i, 1, s16p yading@11: MIX_3_8_TO_1_2_FLT %%i, 2, s16p yading@11: %endif yading@11: %assign %%i %%i+1 yading@11: %endrep yading@11: %endmacro yading@11: yading@11: MIX_3_8_TO_1_2_FLT_FUNCS