yading@10
|
1 /*
|
yading@10
|
2 * ARM NEON optimised RDFT
|
yading@10
|
3 * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 #include "libavutil/arm/asm.S"
|
yading@10
|
23
|
yading@10
|
24 function ff_rdft_calc_neon, export=1
|
yading@10
|
25 push {r4-r8,lr}
|
yading@10
|
26
|
yading@10
|
27 ldr r6, [r0, #4] @ inverse
|
yading@10
|
28 mov r4, r0
|
yading@10
|
29 mov r5, r1
|
yading@10
|
30
|
yading@10
|
31 lsls r6, r6, #31
|
yading@10
|
32 bne 1f
|
yading@10
|
33 add r0, r4, #20
|
yading@10
|
34 bl X(ff_fft_permute_neon)
|
yading@10
|
35 add r0, r4, #20
|
yading@10
|
36 mov r1, r5
|
yading@10
|
37 bl X(ff_fft_calc_neon)
|
yading@10
|
38 1:
|
yading@10
|
39 ldr r12, [r4, #0] @ nbits
|
yading@10
|
40 mov r2, #1
|
yading@10
|
41 lsl r12, r2, r12
|
yading@10
|
42 add r0, r5, #8
|
yading@10
|
43 add r1, r5, r12, lsl #2
|
yading@10
|
44 lsr r12, r12, #2
|
yading@10
|
45 ldr r2, [r4, #12] @ tcos
|
yading@10
|
46 sub r12, r12, #2
|
yading@10
|
47 ldr r3, [r4, #16] @ tsin
|
yading@10
|
48 mov r7, r0
|
yading@10
|
49 sub r1, r1, #8
|
yading@10
|
50 mov lr, r1
|
yading@10
|
51 mov r8, #-8
|
yading@10
|
52 vld1.32 {d0}, [r0,:64]! @ d1[0,1]
|
yading@10
|
53 vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
|
yading@10
|
54 vld1.32 {d4}, [r2,:64]! @ tcos[i]
|
yading@10
|
55 vld1.32 {d5}, [r3,:64]! @ tsin[i]
|
yading@10
|
56 vmov.f32 d18, #0.5 @ k1
|
yading@10
|
57 vdup.32 d19, r6
|
yading@10
|
58 pld [r0, #32]
|
yading@10
|
59 veor d19, d18, d19 @ k2
|
yading@10
|
60 vmov.i32 d16, #0
|
yading@10
|
61 vmov.i32 d17, #1<<31
|
yading@10
|
62 pld [r1, #-32]
|
yading@10
|
63 vtrn.32 d16, d17
|
yading@10
|
64 pld [r2, #32]
|
yading@10
|
65 vrev64.32 d16, d16 @ d16=1,0 d17=0,1
|
yading@10
|
66 pld [r3, #32]
|
yading@10
|
67 2:
|
yading@10
|
68 veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
|
yading@10
|
69 vld1.32 {d24}, [r0,:64]! @ d1[0,1]
|
yading@10
|
70 vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
|
yading@10
|
71 vld1.32 {d25}, [r1,:64], r8 @ d2[0,1]
|
yading@10
|
72 vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
|
yading@10
|
73 veor q3, q12, q8 @ -d1[0],d1[1], d2[0],-d2[1]
|
yading@10
|
74 pld [r0, #32]
|
yading@10
|
75 vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
|
yading@10
|
76 pld [r1, #-32]
|
yading@10
|
77 vadd.f32 d0, d24, d7 @ d1[0]+d2[0], d1[1]-d2[1]
|
yading@10
|
78 vadd.f32 d1, d6, d25 @ -d1[0]+d2[0], d1[1]+d2[1]
|
yading@10
|
79 vmul.f32 q11, q0, q9 @ ev.re, ev.im, od.im, od.re
|
yading@10
|
80 veor d7, d21, d16 @ -od.im, od.re
|
yading@10
|
81 vrev64.32 d3, d21 @ od.re, od.im
|
yading@10
|
82 veor d6, d20, d17 @ ev.re,-ev.im
|
yading@10
|
83 veor d2, d3, d16 @ -od.re, od.im
|
yading@10
|
84 vmla.f32 d20, d3, d4[1]
|
yading@10
|
85 vmla.f32 d20, d7, d5[1]
|
yading@10
|
86 vmla.f32 d6, d2, d4[1]
|
yading@10
|
87 vmla.f32 d6, d21, d5[1]
|
yading@10
|
88 vld1.32 {d4}, [r2,:64]! @ tcos[i]
|
yading@10
|
89 veor d7, d23, d16 @ -od.im, od.re
|
yading@10
|
90 vld1.32 {d5}, [r3,:64]! @ tsin[i]
|
yading@10
|
91 veor d24, d22, d17 @ ev.re,-ev.im
|
yading@10
|
92 vrev64.32 d3, d23 @ od.re, od.im
|
yading@10
|
93 pld [r2, #32]
|
yading@10
|
94 veor d2, d3, d16 @ -od.re, od.im
|
yading@10
|
95 pld [r3, #32]
|
yading@10
|
96 vmla.f32 d22, d3, d4[0]
|
yading@10
|
97 vmla.f32 d22, d7, d5[0]
|
yading@10
|
98 vmla.f32 d24, d2, d4[0]
|
yading@10
|
99 vmla.f32 d24, d23, d5[0]
|
yading@10
|
100 vld1.32 {d0}, [r0,:64]! @ d1[0,1]
|
yading@10
|
101 vld1.32 {d1}, [r1,:64], r8 @ d2[0,1]
|
yading@10
|
102 vst1.32 {d20}, [r7,:64]!
|
yading@10
|
103 vst1.32 {d6}, [lr,:64], r8
|
yading@10
|
104 vst1.32 {d22}, [r7,:64]!
|
yading@10
|
105 vst1.32 {d24}, [lr,:64], r8
|
yading@10
|
106 subs r12, r12, #2
|
yading@10
|
107 bgt 2b
|
yading@10
|
108
|
yading@10
|
109 veor q1, q0, q8 @ -d1[0],d1[1], d2[0],-d2[1]
|
yading@10
|
110 vadd.f32 d0, d0, d3 @ d1[0]+d2[0], d1[1]-d2[1]
|
yading@10
|
111 vadd.f32 d1, d2, d1 @ -d1[0]+d2[0], d1[1]+d2[1]
|
yading@10
|
112 ldr r2, [r4, #8] @ sign_convention
|
yading@10
|
113 vmul.f32 q10, q0, q9 @ ev.re, ev.im, od.im, od.re
|
yading@10
|
114 add r0, r0, #4
|
yading@10
|
115 bfc r2, #0, #31
|
yading@10
|
116 vld1.32 {d0[0]}, [r0,:32]
|
yading@10
|
117 veor d7, d21, d16 @ -od.im, od.re
|
yading@10
|
118 vrev64.32 d3, d21 @ od.re, od.im
|
yading@10
|
119 veor d6, d20, d17 @ ev.re,-ev.im
|
yading@10
|
120 vld1.32 {d22}, [r5,:64]
|
yading@10
|
121 vdup.32 d1, r2
|
yading@10
|
122 vmov d23, d22
|
yading@10
|
123 veor d2, d3, d16 @ -od.re, od.im
|
yading@10
|
124 vtrn.32 d22, d23
|
yading@10
|
125 veor d0, d0, d1
|
yading@10
|
126 veor d23, d23, d17
|
yading@10
|
127 vmla.f32 d20, d3, d4[1]
|
yading@10
|
128 vmla.f32 d20, d7, d5[1]
|
yading@10
|
129 vmla.f32 d6, d2, d4[1]
|
yading@10
|
130 vmla.f32 d6, d21, d5[1]
|
yading@10
|
131 vadd.f32 d22, d22, d23
|
yading@10
|
132 vst1.32 {d20}, [r7,:64]
|
yading@10
|
133 vst1.32 {d6}, [lr,:64]
|
yading@10
|
134 vst1.32 {d0[0]}, [r0,:32]
|
yading@10
|
135 vst1.32 {d22}, [r5,:64]
|
yading@10
|
136
|
yading@10
|
137 cmp r6, #0
|
yading@10
|
138 it eq
|
yading@10
|
139 popeq {r4-r8,pc}
|
yading@10
|
140
|
yading@10
|
141 vmul.f32 d22, d22, d18
|
yading@10
|
142 vst1.32 {d22}, [r5,:64]
|
yading@10
|
143 add r0, r4, #20
|
yading@10
|
144 mov r1, r5
|
yading@10
|
145 bl X(ff_fft_permute_neon)
|
yading@10
|
146 add r0, r4, #20
|
yading@10
|
147 mov r1, r5
|
yading@10
|
148 pop {r4-r8,lr}
|
yading@10
|
149 b X(ff_fft_calc_neon)
|
yading@10
|
150 endfunc
|