yading@10: ;****************************************************************************** yading@10: ;* Vorbis x86 optimizations yading@10: ;* Copyright (C) 2006 Loren Merritt yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: yading@10: pdw_80000000: times 4 dd 0x80000000 yading@10: yading@10: SECTION .text yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX 3dnow yading@10: cglobal vorbis_inverse_coupling, 3, 3, 6, mag, ang, block_size yading@10: pxor m7, m7 yading@10: lea magq, [magq+block_sizeq*4] yading@10: lea angq, [angq+block_sizeq*4] yading@10: neg block_sizeq yading@10: .loop: yading@10: mova m0, [magq+block_sizeq*4] yading@10: mova m1, [angq+block_sizeq*4] yading@10: mova m2, m0 yading@10: mova m3, m1 yading@10: pfcmpge m2, m7 ; m <= 0.0 yading@10: pfcmpge m3, m7 ; a <= 0.0 yading@10: pslld m2, 31 ; keep only the sign bit yading@10: pxor m1, m2 yading@10: mova m4, m3 yading@10: pand m3, m1 yading@10: pandn m4, m1 yading@10: pfadd m3, m0 ; a = m + ((a < 0) & (a ^ sign(m))) yading@10: pfsub m0, m4 ; m = m + ((a > 0) & (a ^ sign(m))) yading@10: mova [angq+block_sizeq*4], m3 yading@10: mova [magq+block_sizeq*4], m0 yading@10: add block_sizeq, 2 yading@10: jl .loop yading@10: femms yading@10: RET yading@10: %endif yading@10: yading@10: INIT_XMM sse yading@10: cglobal vorbis_inverse_coupling, 3, 4, 6, mag, ang, block_size, cntr yading@10: mova m5, [pdw_80000000] yading@10: xor cntrq, cntrq yading@10: align 16 yading@10: .loop: yading@10: mova m0, [magq+cntrq*4] yading@10: mova m1, [angq+cntrq*4] yading@10: xorps m2, m2 yading@10: xorps m3, m3 yading@10: cmpleps m2, m0 ; m <= 0.0 yading@10: cmpleps m3, m1 ; a <= 0.0 yading@10: andps m2, m5 ; keep only the sign bit yading@10: xorps m1, m2 yading@10: mova m4, m3 yading@10: andps m3, m1 yading@10: andnps m4, m1 yading@10: addps m3, m0 ; a = m + ((a < 0) & (a ^ sign(m))) yading@10: subps m0, m4 ; m = m + ((a > 0) & (a ^ sign(m))) yading@10: mova [angq+cntrq*4], m3 yading@10: mova [magq+cntrq*4], m0 yading@10: add cntrq, 4 yading@10: cmp cntrq, block_sizeq yading@10: jl .loop yading@10: RET