yading@10: /* yading@10: * ARM NEON optimised integer operations yading@10: * Copyright (c) 2009 Kostya Shishkov yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #include "libavutil/arm/asm.S" yading@10: yading@10: .fpu neon yading@10: yading@10: function ff_scalarproduct_int16_neon, export=1 yading@10: vmov.i16 q0, #0 yading@10: vmov.i16 q1, #0 yading@10: vmov.i16 q2, #0 yading@10: vmov.i16 q3, #0 yading@10: 1: vld1.16 {d16-d17}, [r0]! yading@10: vld1.16 {d20-d21}, [r1,:128]! yading@10: vmlal.s16 q0, d16, d20 yading@10: vld1.16 {d18-d19}, [r0]! yading@10: vmlal.s16 q1, d17, d21 yading@10: vld1.16 {d22-d23}, [r1,:128]! yading@10: vmlal.s16 q2, d18, d22 yading@10: vmlal.s16 q3, d19, d23 yading@10: subs r2, r2, #16 yading@10: bne 1b yading@10: yading@10: vpadd.s32 d16, d0, d1 yading@10: vpadd.s32 d17, d2, d3 yading@10: vpadd.s32 d10, d4, d5 yading@10: vpadd.s32 d11, d6, d7 yading@10: vpadd.s32 d0, d16, d17 yading@10: vpadd.s32 d1, d10, d11 yading@10: vpadd.s32 d2, d0, d1 yading@10: vpaddl.s32 d3, d2 yading@10: vmov.32 r0, d3[0] yading@10: bx lr yading@10: endfunc yading@10: yading@10: @ scalarproduct_and_madd_int16(/*aligned*/v0,v1,v2,order,mul) yading@10: function ff_scalarproduct_and_madd_int16_neon, export=1 yading@10: vld1.16 {d28[],d29[]}, [sp] yading@10: vmov.i16 q0, #0 yading@10: vmov.i16 q1, #0 yading@10: vmov.i16 q2, #0 yading@10: vmov.i16 q3, #0 yading@10: mov r12, r0 yading@10: yading@10: 1: vld1.16 {d16-d17}, [r0,:128]! yading@10: vld1.16 {d18-d19}, [r1]! yading@10: vld1.16 {d20-d21}, [r2]! yading@10: vld1.16 {d22-d23}, [r0,:128]! yading@10: vld1.16 {d24-d25}, [r1]! yading@10: vld1.16 {d26-d27}, [r2]! yading@10: vmul.s16 q10, q10, q14 yading@10: vmul.s16 q13, q13, q14 yading@10: vmlal.s16 q0, d16, d18 yading@10: vmlal.s16 q1, d17, d19 yading@10: vadd.s16 q10, q8, q10 yading@10: vadd.s16 q13, q11, q13 yading@10: vmlal.s16 q2, d22, d24 yading@10: vmlal.s16 q3, d23, d25 yading@10: vst1.16 {q10}, [r12,:128]! yading@10: subs r3, r3, #16 yading@10: vst1.16 {q13}, [r12,:128]! yading@10: bne 1b yading@10: yading@10: vpadd.s32 d16, d0, d1 yading@10: vpadd.s32 d17, d2, d3 yading@10: vpadd.s32 d10, d4, d5 yading@10: vpadd.s32 d11, d6, d7 yading@10: vpadd.s32 d0, d16, d17 yading@10: vpadd.s32 d1, d10, d11 yading@10: vpadd.s32 d2, d0, d1 yading@10: vpaddl.s32 d3, d2 yading@10: vmov.32 r0, d3[0] yading@10: bx lr yading@10: endfunc