yading@10
|
1 /*
|
yading@10
|
2 * ARM NEON optimised MDCT
|
yading@10
|
3 * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 #include "libavutil/arm/asm.S"
|
yading@10
|
23
|
yading@10
|
24 #define ff_fft_calc_neon X(ff_fft_calc_neon)
|
yading@10
|
25
|
yading@10
|
26 function ff_imdct_half_neon, export=1
|
yading@10
|
27 push {r4-r8,lr}
|
yading@10
|
28
|
yading@10
|
29 mov r12, #1
|
yading@10
|
30 ldr lr, [r0, #20] @ mdct_bits
|
yading@10
|
31 ldr r4, [r0, #24] @ tcos
|
yading@10
|
32 ldr r3, [r0, #8] @ revtab
|
yading@10
|
33 lsl r12, r12, lr @ n = 1 << nbits
|
yading@10
|
34 lsr lr, r12, #2 @ n4 = n >> 2
|
yading@10
|
35 add r7, r2, r12, lsl #1
|
yading@10
|
36 mov r12, #-16
|
yading@10
|
37 sub r7, r7, #16
|
yading@10
|
38
|
yading@10
|
39 vld2.32 {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0
|
yading@10
|
40 vld2.32 {d0-d1}, [r2,:128]! @ d0 =m0,x d1 =m1,x
|
yading@10
|
41 vrev64.32 d17, d17
|
yading@10
|
42 vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2
|
yading@10
|
43 vmul.f32 d6, d17, d2
|
yading@10
|
44 vmul.f32 d7, d0, d2
|
yading@10
|
45 1:
|
yading@10
|
46 subs lr, lr, #2
|
yading@10
|
47 ldr r6, [r3], #4
|
yading@10
|
48 vmul.f32 d4, d0, d3
|
yading@10
|
49 vmul.f32 d5, d17, d3
|
yading@10
|
50 vsub.f32 d4, d6, d4
|
yading@10
|
51 vadd.f32 d5, d5, d7
|
yading@10
|
52 uxth r8, r6, ror #16
|
yading@10
|
53 uxth r6, r6
|
yading@10
|
54 add r8, r1, r8, lsl #3
|
yading@10
|
55 add r6, r1, r6, lsl #3
|
yading@10
|
56 beq 1f
|
yading@10
|
57 vld2.32 {d16-d17},[r7,:128],r12
|
yading@10
|
58 vld2.32 {d0-d1}, [r2,:128]!
|
yading@10
|
59 vrev64.32 d17, d17
|
yading@10
|
60 vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2
|
yading@10
|
61 vmul.f32 d6, d17, d2
|
yading@10
|
62 vmul.f32 d7, d0, d2
|
yading@10
|
63 vst2.32 {d4[0],d5[0]}, [r6,:64]
|
yading@10
|
64 vst2.32 {d4[1],d5[1]}, [r8,:64]
|
yading@10
|
65 b 1b
|
yading@10
|
66 1:
|
yading@10
|
67 vst2.32 {d4[0],d5[0]}, [r6,:64]
|
yading@10
|
68 vst2.32 {d4[1],d5[1]}, [r8,:64]
|
yading@10
|
69
|
yading@10
|
70 mov r4, r0
|
yading@10
|
71 mov r6, r1
|
yading@10
|
72 bl ff_fft_calc_neon
|
yading@10
|
73
|
yading@10
|
74 mov r12, #1
|
yading@10
|
75 ldr lr, [r4, #20] @ mdct_bits
|
yading@10
|
76 ldr r4, [r4, #24] @ tcos
|
yading@10
|
77 lsl r12, r12, lr @ n = 1 << nbits
|
yading@10
|
78 lsr lr, r12, #3 @ n8 = n >> 3
|
yading@10
|
79
|
yading@10
|
80 add r4, r4, lr, lsl #3
|
yading@10
|
81 add r6, r6, lr, lsl #3
|
yading@10
|
82 sub r1, r4, #16
|
yading@10
|
83 sub r3, r6, #16
|
yading@10
|
84
|
yading@10
|
85 mov r7, #-16
|
yading@10
|
86 mov r8, r6
|
yading@10
|
87 mov r0, r3
|
yading@10
|
88
|
yading@10
|
89 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0
|
yading@10
|
90 vld2.32 {d20-d21},[r6,:128]! @ d20=i2,r2 d21=i3,r3
|
yading@10
|
91 vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0
|
yading@10
|
92 1:
|
yading@10
|
93 subs lr, lr, #2
|
yading@10
|
94 vmul.f32 d7, d0, d18
|
yading@10
|
95 vld2.32 {d17,d19},[r4,:128]! @ d17=c2,c3 d19=s2,s3
|
yading@10
|
96 vmul.f32 d4, d1, d18
|
yading@10
|
97 vmul.f32 d5, d21, d19
|
yading@10
|
98 vmul.f32 d6, d20, d19
|
yading@10
|
99 vmul.f32 d22, d1, d16
|
yading@10
|
100 vmul.f32 d23, d21, d17
|
yading@10
|
101 vmul.f32 d24, d0, d16
|
yading@10
|
102 vmul.f32 d25, d20, d17
|
yading@10
|
103 vadd.f32 d7, d7, d22
|
yading@10
|
104 vadd.f32 d6, d6, d23
|
yading@10
|
105 vsub.f32 d4, d4, d24
|
yading@10
|
106 vsub.f32 d5, d5, d25
|
yading@10
|
107 beq 1f
|
yading@10
|
108 vld2.32 {d0-d1}, [r3,:128], r7
|
yading@10
|
109 vld2.32 {d20-d21},[r6,:128]!
|
yading@10
|
110 vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0
|
yading@10
|
111 vrev64.32 q3, q3
|
yading@10
|
112 vst2.32 {d4,d6}, [r0,:128], r7
|
yading@10
|
113 vst2.32 {d5,d7}, [r8,:128]!
|
yading@10
|
114 b 1b
|
yading@10
|
115 1:
|
yading@10
|
116 vrev64.32 q3, q3
|
yading@10
|
117 vst2.32 {d4,d6}, [r0,:128]
|
yading@10
|
118 vst2.32 {d5,d7}, [r8,:128]
|
yading@10
|
119
|
yading@10
|
120 pop {r4-r8,pc}
|
yading@10
|
121 endfunc
|
yading@10
|
122
|
yading@10
|
123 function ff_imdct_calc_neon, export=1
|
yading@10
|
124 push {r4-r6,lr}
|
yading@10
|
125
|
yading@10
|
126 ldr r3, [r0, #20]
|
yading@10
|
127 mov r4, #1
|
yading@10
|
128 mov r5, r1
|
yading@10
|
129 lsl r4, r4, r3
|
yading@10
|
130 add r1, r1, r4
|
yading@10
|
131
|
yading@10
|
132 bl ff_imdct_half_neon
|
yading@10
|
133
|
yading@10
|
134 add r0, r5, r4, lsl #2
|
yading@10
|
135 add r1, r5, r4, lsl #1
|
yading@10
|
136 sub r0, r0, #8
|
yading@10
|
137 sub r2, r1, #16
|
yading@10
|
138 mov r3, #-16
|
yading@10
|
139 mov r6, #-8
|
yading@10
|
140 vmov.i32 d30, #1<<31
|
yading@10
|
141 1:
|
yading@10
|
142 vld1.32 {d0-d1}, [r2,:128], r3
|
yading@10
|
143 pld [r0, #-16]
|
yading@10
|
144 vrev64.32 q0, q0
|
yading@10
|
145 vld1.32 {d2-d3}, [r1,:128]!
|
yading@10
|
146 veor d4, d1, d30
|
yading@10
|
147 pld [r2, #-16]
|
yading@10
|
148 vrev64.32 q1, q1
|
yading@10
|
149 veor d5, d0, d30
|
yading@10
|
150 vst1.32 {d2}, [r0,:64], r6
|
yading@10
|
151 vst1.32 {d3}, [r0,:64], r6
|
yading@10
|
152 vst1.32 {d4-d5}, [r5,:128]!
|
yading@10
|
153 subs r4, r4, #16
|
yading@10
|
154 bgt 1b
|
yading@10
|
155
|
yading@10
|
156 pop {r4-r6,pc}
|
yading@10
|
157 endfunc
|
yading@10
|
158
|
yading@10
|
159 function ff_mdct_calc_neon, export=1
|
yading@10
|
160 push {r4-r10,lr}
|
yading@10
|
161
|
yading@10
|
162 mov r12, #1
|
yading@10
|
163 ldr lr, [r0, #20] @ mdct_bits
|
yading@10
|
164 ldr r4, [r0, #24] @ tcos
|
yading@10
|
165 ldr r3, [r0, #8] @ revtab
|
yading@10
|
166 lsl lr, r12, lr @ n = 1 << nbits
|
yading@10
|
167 add r7, r2, lr @ in4u
|
yading@10
|
168 sub r9, r7, #16 @ in4d
|
yading@10
|
169 add r2, r7, lr, lsl #1 @ in3u
|
yading@10
|
170 add r8, r9, lr, lsl #1 @ in3d
|
yading@10
|
171 add r5, r4, lr, lsl #1
|
yading@10
|
172 sub r5, r5, #16
|
yading@10
|
173 sub r3, r3, #4
|
yading@10
|
174 mov r12, #-16
|
yading@10
|
175
|
yading@10
|
176 vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0
|
yading@10
|
177 vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0
|
yading@10
|
178 vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0
|
yading@10
|
179 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
|
yading@10
|
180 vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0
|
yading@10
|
181 vsub.f32 d0, d18, d0 @ in4d-in4u I
|
yading@10
|
182 vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1
|
yading@10
|
183 vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1
|
yading@10
|
184 vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3
|
yading@10
|
185 vadd.f32 d1, d1, d19 @ in3u+in3d -R
|
yading@10
|
186 vsub.f32 d16, d16, d2 @ in0u-in2d R
|
yading@10
|
187 vadd.f32 d17, d17, d3 @ in2u+in1d -I
|
yading@10
|
188 1:
|
yading@10
|
189 vmul.f32 d7, d0, d21 @ I*s
|
yading@10
|
190 A ldr r10, [r3, lr, lsr #1]
|
yading@10
|
191 T lsr r10, lr, #1
|
yading@10
|
192 T ldr r10, [r3, r10]
|
yading@10
|
193 vmul.f32 d6, d1, d20 @ -R*c
|
yading@10
|
194 ldr r6, [r3, #4]!
|
yading@10
|
195 vmul.f32 d4, d1, d21 @ -R*s
|
yading@10
|
196 vmul.f32 d5, d0, d20 @ I*c
|
yading@10
|
197 vmul.f32 d24, d16, d30 @ R*c
|
yading@10
|
198 vmul.f32 d25, d17, d31 @ -I*s
|
yading@10
|
199 vmul.f32 d22, d16, d31 @ R*s
|
yading@10
|
200 vmul.f32 d23, d17, d30 @ I*c
|
yading@10
|
201 subs lr, lr, #16
|
yading@10
|
202 vsub.f32 d6, d6, d7 @ -R*c-I*s
|
yading@10
|
203 vadd.f32 d7, d4, d5 @ -R*s+I*c
|
yading@10
|
204 vsub.f32 d24, d25, d24 @ I*s-R*c
|
yading@10
|
205 vadd.f32 d25, d22, d23 @ R*s-I*c
|
yading@10
|
206 beq 1f
|
yading@10
|
207 mov r12, #-16
|
yading@10
|
208 vld2.32 {d16,d18},[r9,:128],r12 @ in0u0,in0u1 in4d1,in4d0
|
yading@10
|
209 vld2.32 {d17,d19},[r8,:128],r12 @ in2u0,in2u1 in3d1,in3d0
|
yading@10
|
210 vneg.f32 d7, d7 @ R*s-I*c
|
yading@10
|
211 vld2.32 {d0, d2}, [r7,:128]! @ in4u0,in4u1 in2d1,in2d0
|
yading@10
|
212 vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1
|
yading@10
|
213 vld2.32 {d1, d3}, [r2,:128]! @ in3u0,in3u1 in1d1,in1d0
|
yading@10
|
214 vsub.f32 d0, d18, d0 @ in4d-in4u I
|
yading@10
|
215 vld2.32 {d20,d21},[r4,:128]! @ c0,c1 s0,s1
|
yading@10
|
216 vrev64.32 q1, q1 @ in2d0,in2d1 in1d0,in1d1
|
yading@10
|
217 vld2.32 {d30,d31},[r5,:128],r12 @ c2,c3 s2,s3
|
yading@10
|
218 vadd.f32 d1, d1, d19 @ in3u+in3d -R
|
yading@10
|
219 vsub.f32 d16, d16, d2 @ in0u-in2d R
|
yading@10
|
220 vadd.f32 d17, d17, d3 @ in2u+in1d -I
|
yading@10
|
221 uxth r12, r6, ror #16
|
yading@10
|
222 uxth r6, r6
|
yading@10
|
223 add r12, r1, r12, lsl #3
|
yading@10
|
224 add r6, r1, r6, lsl #3
|
yading@10
|
225 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
yading@10
|
226 vst2.32 {d6[1],d7[1]}, [r12,:64]
|
yading@10
|
227 uxth r6, r10, ror #16
|
yading@10
|
228 uxth r10, r10
|
yading@10
|
229 add r6 , r1, r6, lsl #3
|
yading@10
|
230 add r10, r1, r10, lsl #3
|
yading@10
|
231 vst2.32 {d24[0],d25[0]},[r10,:64]
|
yading@10
|
232 vst2.32 {d24[1],d25[1]},[r6,:64]
|
yading@10
|
233 b 1b
|
yading@10
|
234 1:
|
yading@10
|
235 vneg.f32 d7, d7 @ R*s-I*c
|
yading@10
|
236 uxth r12, r6, ror #16
|
yading@10
|
237 uxth r6, r6
|
yading@10
|
238 add r12, r1, r12, lsl #3
|
yading@10
|
239 add r6, r1, r6, lsl #3
|
yading@10
|
240 vst2.32 {d6[0],d7[0]}, [r6,:64]
|
yading@10
|
241 vst2.32 {d6[1],d7[1]}, [r12,:64]
|
yading@10
|
242 uxth r6, r10, ror #16
|
yading@10
|
243 uxth r10, r10
|
yading@10
|
244 add r6 , r1, r6, lsl #3
|
yading@10
|
245 add r10, r1, r10, lsl #3
|
yading@10
|
246 vst2.32 {d24[0],d25[0]},[r10,:64]
|
yading@10
|
247 vst2.32 {d24[1],d25[1]},[r6,:64]
|
yading@10
|
248
|
yading@10
|
249 mov r4, r0
|
yading@10
|
250 mov r6, r1
|
yading@10
|
251 bl ff_fft_calc_neon
|
yading@10
|
252
|
yading@10
|
253 mov r12, #1
|
yading@10
|
254 ldr lr, [r4, #20] @ mdct_bits
|
yading@10
|
255 ldr r4, [r4, #24] @ tcos
|
yading@10
|
256 lsl r12, r12, lr @ n = 1 << nbits
|
yading@10
|
257 lsr lr, r12, #3 @ n8 = n >> 3
|
yading@10
|
258
|
yading@10
|
259 add r4, r4, lr, lsl #3
|
yading@10
|
260 add r6, r6, lr, lsl #3
|
yading@10
|
261 sub r1, r4, #16
|
yading@10
|
262 sub r3, r6, #16
|
yading@10
|
263
|
yading@10
|
264 mov r7, #-16
|
yading@10
|
265 mov r8, r6
|
yading@10
|
266 mov r0, r3
|
yading@10
|
267
|
yading@10
|
268 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =r1,i1 d1 =r0,i0
|
yading@10
|
269 vld2.32 {d20-d21},[r6,:128]! @ d20=r2,i2 d21=r3,i3
|
yading@10
|
270 vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0
|
yading@10
|
271 1:
|
yading@10
|
272 subs lr, lr, #2
|
yading@10
|
273 vmul.f32 d7, d0, d18 @ r1*s1,r0*s0
|
yading@10
|
274 vld2.32 {d17,d19},[r4,:128]! @ c2,c3 s2,s3
|
yading@10
|
275 vmul.f32 d4, d1, d18 @ i1*s1,i0*s0
|
yading@10
|
276 vmul.f32 d5, d21, d19 @ i2*s2,i3*s3
|
yading@10
|
277 vmul.f32 d6, d20, d19 @ r2*s2,r3*s3
|
yading@10
|
278 vmul.f32 d24, d0, d16 @ r1*c1,r0*c0
|
yading@10
|
279 vmul.f32 d25, d20, d17 @ r2*c2,r3*c3
|
yading@10
|
280 vmul.f32 d22, d21, d17 @ i2*c2,i3*c3
|
yading@10
|
281 vmul.f32 d23, d1, d16 @ i1*c1,i0*c0
|
yading@10
|
282 vadd.f32 d4, d4, d24 @ i1*s1+r1*c1,i0*s0+r0*c0
|
yading@10
|
283 vadd.f32 d5, d5, d25 @ i2*s2+r2*c2,i3*s3+r3*c3
|
yading@10
|
284 vsub.f32 d6, d22, d6 @ i2*c2-r2*s2,i3*c3-r3*s3
|
yading@10
|
285 vsub.f32 d7, d23, d7 @ i1*c1-r1*s1,i0*c0-r0*s0
|
yading@10
|
286 vneg.f32 q2, q2
|
yading@10
|
287 beq 1f
|
yading@10
|
288 vld2.32 {d0-d1}, [r3,:128], r7
|
yading@10
|
289 vld2.32 {d20-d21},[r6,:128]!
|
yading@10
|
290 vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0
|
yading@10
|
291 vrev64.32 q3, q3
|
yading@10
|
292 vst2.32 {d4,d6}, [r0,:128], r7
|
yading@10
|
293 vst2.32 {d5,d7}, [r8,:128]!
|
yading@10
|
294 b 1b
|
yading@10
|
295 1:
|
yading@10
|
296 vrev64.32 q3, q3
|
yading@10
|
297 vst2.32 {d4,d6}, [r0,:128]
|
yading@10
|
298 vst2.32 {d5,d7}, [r8,:128]
|
yading@10
|
299
|
yading@10
|
300 pop {r4-r10,pc}
|
yading@10
|
301 endfunc
|