yading@10: /* yading@10: * Copyright (c) 2011 Mans Rullgard yading@10: * yading@10: * This file is part of Libav. yading@10: * yading@10: * Libav is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * Libav is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with Libav; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #include "libavutil/arm/asm.S" yading@10: yading@10: .macro prerot dst, rt yading@10: lsr r3, r6, #2 @ n4 yading@10: add \rt, r4, r6, lsr #1 @ revtab + n4 yading@10: add r9, r3, r3, lsl #1 @ n3 yading@10: add r8, r7, r6 @ tcos + n4 yading@10: add r3, r2, r6, lsr #1 @ in + n4 yading@10: add r9, r2, r9, lsl #1 @ in + n3 yading@10: sub r8, r8, #16 yading@10: sub r10, r3, #16 yading@10: sub r11, r9, #16 yading@10: mov r12, #-16 yading@10: 1: yading@10: vld2.16 {d0,d1}, [r9, :128]! yading@10: vld2.16 {d2,d3}, [r11,:128], r12 yading@10: vld2.16 {d4,d5}, [r3, :128]! yading@10: vld2.16 {d6,d7}, [r10,:128], r12 yading@10: vld2.16 {d16,d17},[r7, :128]! @ cos, sin yading@10: vld2.16 {d18,d19},[r8, :128], r12 yading@10: vrev64.16 q1, q1 yading@10: vrev64.16 q3, q3 yading@10: vrev64.16 q9, q9 yading@10: vneg.s16 d0, d0 yading@10: vneg.s16 d2, d2 yading@10: vneg.s16 d16, d16 yading@10: vneg.s16 d18, d18 yading@10: vhsub.s16 d0, d0, d3 @ re yading@10: vhsub.s16 d4, d7, d4 @ im yading@10: vhsub.s16 d6, d6, d5 yading@10: vhsub.s16 d2, d2, d1 yading@10: vmull.s16 q10, d0, d16 yading@10: vmlsl.s16 q10, d4, d17 yading@10: vmull.s16 q11, d0, d17 yading@10: vmlal.s16 q11, d4, d16 yading@10: vmull.s16 q12, d6, d18 yading@10: vmlsl.s16 q12, d2, d19 yading@10: vmull.s16 q13, d6, d19 yading@10: vmlal.s16 q13, d2, d18 yading@10: vshrn.s32 d0, q10, #15 yading@10: vshrn.s32 d1, q11, #15 yading@10: vshrn.s32 d2, q12, #15 yading@10: vshrn.s32 d3, q13, #15 yading@10: vzip.16 d0, d1 yading@10: vzip.16 d2, d3 yading@10: ldrh lr, [r4], #2 yading@10: ldrh r2, [\rt, #-2]! yading@10: add lr, \dst, lr, lsl #2 yading@10: add r2, \dst, r2, lsl #2 yading@10: vst1.32 {d0[0]}, [lr,:32] yading@10: vst1.32 {d2[0]}, [r2,:32] yading@10: ldrh lr, [r4], #2 yading@10: ldrh r2, [\rt, #-2]! yading@10: add lr, \dst, lr, lsl #2 yading@10: add r2, \dst, r2, lsl #2 yading@10: vst1.32 {d0[1]}, [lr,:32] yading@10: vst1.32 {d2[1]}, [r2,:32] yading@10: ldrh lr, [r4], #2 yading@10: ldrh r2, [\rt, #-2]! yading@10: add lr, \dst, lr, lsl #2 yading@10: add r2, \dst, r2, lsl #2 yading@10: vst1.32 {d1[0]}, [lr,:32] yading@10: vst1.32 {d3[0]}, [r2,:32] yading@10: ldrh lr, [r4], #2 yading@10: ldrh r2, [\rt, #-2]! yading@10: add lr, \dst, lr, lsl #2 yading@10: add r2, \dst, r2, lsl #2 yading@10: vst1.32 {d1[1]}, [lr,:32] yading@10: vst1.32 {d3[1]}, [r2,:32] yading@10: subs r6, r6, #32 yading@10: bgt 1b yading@10: .endm yading@10: yading@10: function ff_mdct_fixed_calc_neon, export=1 yading@10: push {r1,r4-r11,lr} yading@10: yading@10: ldr r4, [r0, #8] @ revtab yading@10: ldr r6, [r0, #16] @ mdct_size; n yading@10: ldr r7, [r0, #24] @ tcos yading@10: yading@10: prerot r1, r5 yading@10: yading@10: mov r4, r0 yading@10: bl X(ff_fft_fixed_calc_neon) yading@10: yading@10: pop {r5} yading@10: mov r12, #-16 yading@10: ldr r6, [r4, #16] @ mdct_size; n yading@10: ldr r7, [r4, #24] @ tcos yading@10: add r5, r5, r6, lsr #1 yading@10: add r7, r7, r6, lsr #1 yading@10: sub r1, r5, #16 yading@10: sub r2, r7, #16 yading@10: 1: yading@10: vld2.16 {d4,d5}, [r7,:128]! yading@10: vld2.16 {d6,d7}, [r2,:128], r12 yading@10: vld2.16 {d0,d1}, [r5,:128] yading@10: vld2.16 {d2,d3}, [r1,:128] yading@10: vrev64.16 q3, q3 yading@10: vrev64.16 q1, q1 yading@10: vneg.s16 q3, q3 yading@10: vneg.s16 q2, q2 yading@10: vmull.s16 q11, d2, d6 yading@10: vmlal.s16 q11, d3, d7 yading@10: vmull.s16 q8, d0, d5 yading@10: vmlsl.s16 q8, d1, d4 yading@10: vmull.s16 q9, d0, d4 yading@10: vmlal.s16 q9, d1, d5 yading@10: vmull.s16 q10, d2, d7 yading@10: vmlsl.s16 q10, d3, d6 yading@10: vshrn.s32 d0, q11, #15 yading@10: vshrn.s32 d1, q8, #15 yading@10: vshrn.s32 d2, q9, #15 yading@10: vshrn.s32 d3, q10, #15 yading@10: vrev64.16 q0, q0 yading@10: vst2.16 {d2,d3}, [r5,:128]! yading@10: vst2.16 {d0,d1}, [r1,:128], r12 yading@10: subs r6, r6, #32 yading@10: bgt 1b yading@10: yading@10: pop {r4-r11,pc} yading@10: endfunc yading@10: yading@10: function ff_mdct_fixed_calcw_neon, export=1 yading@10: push {r1,r4-r11,lr} yading@10: yading@10: ldrd r4, r5, [r0, #8] @ revtab, tmp_buf yading@10: ldr r6, [r0, #16] @ mdct_size; n yading@10: ldr r7, [r0, #24] @ tcos yading@10: yading@10: prerot r5, r1 yading@10: yading@10: mov r4, r0 yading@10: mov r1, r5 yading@10: bl X(ff_fft_fixed_calc_neon) yading@10: yading@10: pop {r7} yading@10: mov r12, #-16 yading@10: ldr r6, [r4, #16] @ mdct_size; n yading@10: ldr r9, [r4, #24] @ tcos yading@10: add r5, r5, r6, lsr #1 yading@10: add r7, r7, r6 yading@10: add r9, r9, r6, lsr #1 yading@10: sub r3, r5, #16 yading@10: sub r1, r7, #16 yading@10: sub r2, r9, #16 yading@10: 1: yading@10: vld2.16 {d4,d5}, [r9,:128]! yading@10: vld2.16 {d6,d7}, [r2,:128], r12 yading@10: vld2.16 {d0,d1}, [r5,:128]! yading@10: vld2.16 {d2,d3}, [r3,:128], r12 yading@10: vrev64.16 q3, q3 yading@10: vrev64.16 q1, q1 yading@10: vneg.s16 q3, q3 yading@10: vneg.s16 q2, q2 yading@10: vmull.s16 q8, d2, d6 yading@10: vmlal.s16 q8, d3, d7 yading@10: vmull.s16 q9, d0, d5 yading@10: vmlsl.s16 q9, d1, d4 yading@10: vmull.s16 q10, d0, d4 yading@10: vmlal.s16 q10, d1, d5 yading@10: vmull.s16 q11, d2, d7 yading@10: vmlsl.s16 q11, d3, d6 yading@10: vrev64.32 q8, q8 yading@10: vrev64.32 q9, q9 yading@10: vst2.32 {q10,q11},[r7,:128]! yading@10: vst2.32 {d16,d18},[r1,:128], r12 yading@10: vst2.32 {d17,d19},[r1,:128], r12 yading@10: subs r6, r6, #32 yading@10: bgt 1b yading@10: yading@10: pop {r4-r11,pc} yading@10: endfunc