yading@10
|
1 /*
|
yading@10
|
2 * FFT transform with Altivec optimizations
|
yading@10
|
3 * Copyright (c) 2009 Loren Merritt
|
yading@10
|
4 *
|
yading@10
|
5 * This algorithm (though not any of the implementation details) is
|
yading@10
|
6 * based on libdjbfft by D. J. Bernstein.
|
yading@10
|
7 *
|
yading@10
|
8 * This file is part of FFmpeg.
|
yading@10
|
9 *
|
yading@10
|
10 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
11 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
12 * License as published by the Free Software Foundation; either
|
yading@10
|
13 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
14 *
|
yading@10
|
15 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
18 * Lesser General Public License for more details.
|
yading@10
|
19 *
|
yading@10
|
20 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
21 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
23 */
|
yading@10
|
24
|
yading@10
|
25 /*
|
yading@10
|
26 * These functions are not individually interchangeable with the C versions.
|
yading@10
|
27 * While C takes arrays of FFTComplex, Altivec leaves intermediate results
|
yading@10
|
28 * in blocks as convenient to the vector size.
|
yading@10
|
29 * i.e. {4x real, 4x imaginary, 4x real, ...}
|
yading@10
|
30 *
|
yading@10
|
31 * I ignore standard calling convention.
|
yading@10
|
32 * Instead, the following registers are treated as global constants:
|
yading@10
|
33 * v14: zero
|
yading@10
|
34 * v15..v18: cosines
|
yading@10
|
35 * v19..v29: permutations
|
yading@10
|
36 * r9: 16
|
yading@10
|
37 * r12: ff_cos_tabs
|
yading@10
|
38 * and the rest are free for local use.
|
yading@10
|
39 */
|
yading@10
|
40
|
yading@10
|
41 #include "config.h"
|
yading@10
|
42 #include "asm.S"
|
yading@10
|
43
|
yading@10
|
44 .text
|
yading@10
|
45
|
yading@10
|
46 .macro addi2 ra, imm // add 32-bit immediate
|
yading@10
|
47 .if \imm & 0xffff
|
yading@10
|
48 addi \ra, \ra, \imm@l
|
yading@10
|
49 .endif
|
yading@10
|
50 .if (\imm+0x8000)>>16
|
yading@10
|
51 addis \ra, \ra, \imm@ha
|
yading@10
|
52 .endif
|
yading@10
|
53 .endm
|
yading@10
|
54
|
yading@10
|
55 .macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3
|
yading@10
|
56 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
|
yading@10
|
57 vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
|
yading@10
|
58 vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
|
yading@10
|
59 vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
|
yading@10
|
60 vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
|
yading@10
|
61 vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
|
yading@10
|
62 vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
|
yading@10
|
63 vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
|
yading@10
|
64 vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
|
yading@10
|
65 vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
|
yading@10
|
66 .endm
|
yading@10
|
67
|
yading@10
|
68 .macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3
|
yading@10
|
69 vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
|
yading@10
|
70 vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
|
yading@10
|
71 vperm \b2,\b0,\b1,v20
|
yading@10
|
72 vperm \b3,\b0,\b1,v21
|
yading@10
|
73 vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
|
yading@10
|
74 vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
|
yading@10
|
75 vaddfp \b0,\b2,\b3
|
yading@10
|
76 vsubfp \b1,\b2,\b3
|
yading@10
|
77 vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
|
yading@10
|
78 vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
|
yading@10
|
79 vmrghw \b2,\b0,\b1
|
yading@10
|
80 vperm \b3,\b0,\b1,v22
|
yading@10
|
81 vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
|
yading@10
|
82 vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
|
yading@10
|
83 vaddfp \b0,\b2,\b3
|
yading@10
|
84 vsubfp \b1,\b2,\b3
|
yading@10
|
85 vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
|
yading@10
|
86 vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
|
yading@10
|
87 vperm \b2,\b0,\b1,v23
|
yading@10
|
88 vperm \b3,\b0,\b1,v24
|
yading@10
|
89 .endm
|
yading@10
|
90
|
yading@10
|
91 .macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1
|
yading@10
|
92 vmrghw \b2,\b0,\b1 // vcprm(0,s0,1,s1) // {r4,r6,i4,i6}
|
yading@10
|
93 vmrglw \b3,\b0,\b1 // vcprm(2,s2,3,s3) // {r5,r7,i5,i7}
|
yading@10
|
94 vperm \a2,\a0,\a1,v20 // FFT4 ...
|
yading@10
|
95 vperm \a3,\a0,\a1,v21
|
yading@10
|
96 vaddfp \b0,\b2,\b3 // {t1,t3,t2,t4}
|
yading@10
|
97 vsubfp \b1,\b2,\b3 // {r5,r7,i5,i7}
|
yading@10
|
98 vperm \b4,\b1,\b1,v25 // vcprm(2,3,0,1) // {i5,i7,r5,r7}
|
yading@10
|
99 vaddfp \a0,\a2,\a3
|
yading@10
|
100 vsubfp \a1,\a2,\a3
|
yading@10
|
101 vmaddfp \b1,\b1,v17,v14 // * {-1,1,1,-1}/sqrt(2)
|
yading@10
|
102 vmaddfp \b1,\b4,v18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9}
|
yading@10
|
103 vmrghw \a2,\a0,\a1
|
yading@10
|
104 vperm \a3,\a0,\a1,v22
|
yading@10
|
105 vperm \b2,\b0,\b1,v26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8}
|
yading@10
|
106 vperm \b3,\b0,\b1,v27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta}
|
yading@10
|
107 vaddfp \a0,\a2,\a3
|
yading@10
|
108 vsubfp \a1,\a2,\a3
|
yading@10
|
109 vaddfp \b0,\b2,\b3 // {t1,t2,t9,ta}
|
yading@10
|
110 vsubfp \b1,\b2,\b3 // {t6,t5,tc,tb}
|
yading@10
|
111 vperm \a2,\a0,\a1,v23
|
yading@10
|
112 vperm \a3,\a0,\a1,v24
|
yading@10
|
113 vperm \b2,\b0,\b1,v28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb}
|
yading@10
|
114 vperm \b3,\b0,\b1,v29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc}
|
yading@10
|
115 vsubfp \b0,\a2,\b2 // {r4,r5,r6,r7}
|
yading@10
|
116 vsubfp \b1,\a3,\b3 // {i4,i5,i6,i7}
|
yading@10
|
117 vaddfp \a0,\a2,\b2 // {r0,r1,r2,r3}
|
yading@10
|
118 vaddfp \a1,\a3,\b3 // {i0,i1,i2,i3}
|
yading@10
|
119 .endm
|
yading@10
|
120
|
yading@10
|
121 .macro BF d0,d1,s0,s1
|
yading@10
|
122 vsubfp \d1,\s0,\s1
|
yading@10
|
123 vaddfp \d0,\s0,\s1
|
yading@10
|
124 .endm
|
yading@10
|
125
|
yading@10
|
126 .macro zip d0,d1,s0,s1
|
yading@10
|
127 vmrghw \d0,\s0,\s1
|
yading@10
|
128 vmrglw \d1,\s0,\s1
|
yading@10
|
129 .endm
|
yading@10
|
130
|
yading@10
|
131 .macro def_fft4 interleave
|
yading@10
|
132 fft4\interleave\()_altivec:
|
yading@10
|
133 lvx v0, 0,r3
|
yading@10
|
134 lvx v1,r9,r3
|
yading@10
|
135 FFT4 v0,v1,v2,v3
|
yading@10
|
136 .ifnb \interleave
|
yading@10
|
137 zip v0,v1,v2,v3
|
yading@10
|
138 stvx v0, 0,r3
|
yading@10
|
139 stvx v1,r9,r3
|
yading@10
|
140 .else
|
yading@10
|
141 stvx v2, 0,r3
|
yading@10
|
142 stvx v3,r9,r3
|
yading@10
|
143 .endif
|
yading@10
|
144 blr
|
yading@10
|
145 .endm
|
yading@10
|
146
|
yading@10
|
147 .macro def_fft8 interleave
|
yading@10
|
148 fft8\interleave\()_altivec:
|
yading@10
|
149 addi r4,r3,32
|
yading@10
|
150 lvx v0, 0,r3
|
yading@10
|
151 lvx v1,r9,r3
|
yading@10
|
152 lvx v2, 0,r4
|
yading@10
|
153 lvx v3,r9,r4
|
yading@10
|
154 FFT8 v0,v1,v2,v3,v4,v5,v6,v7,v8
|
yading@10
|
155 .ifnb \interleave
|
yading@10
|
156 zip v4,v5,v0,v1
|
yading@10
|
157 zip v6,v7,v2,v3
|
yading@10
|
158 stvx v4, 0,r3
|
yading@10
|
159 stvx v5,r9,r3
|
yading@10
|
160 stvx v6, 0,r4
|
yading@10
|
161 stvx v7,r9,r4
|
yading@10
|
162 .else
|
yading@10
|
163 stvx v0, 0,r3
|
yading@10
|
164 stvx v1,r9,r3
|
yading@10
|
165 stvx v2, 0,r4
|
yading@10
|
166 stvx v3,r9,r4
|
yading@10
|
167 .endif
|
yading@10
|
168 blr
|
yading@10
|
169 .endm
|
yading@10
|
170
|
yading@10
|
171 .macro def_fft16 interleave
|
yading@10
|
172 fft16\interleave\()_altivec:
|
yading@10
|
173 addi r5,r3,64
|
yading@10
|
174 addi r6,r3,96
|
yading@10
|
175 addi r4,r3,32
|
yading@10
|
176 lvx v0, 0,r5
|
yading@10
|
177 lvx v1,r9,r5
|
yading@10
|
178 lvx v2, 0,r6
|
yading@10
|
179 lvx v3,r9,r6
|
yading@10
|
180 FFT4x2 v0,v1,v2,v3,v4,v5,v6,v7
|
yading@10
|
181 lvx v0, 0,r3
|
yading@10
|
182 lvx v1,r9,r3
|
yading@10
|
183 lvx v2, 0,r4
|
yading@10
|
184 lvx v3,r9,r4
|
yading@10
|
185 FFT8 v0,v1,v2,v3,v8,v9,v10,v11,v12
|
yading@10
|
186 vmaddfp v8,v4,v15,v14 // r2*wre
|
yading@10
|
187 vmaddfp v9,v5,v15,v14 // i2*wre
|
yading@10
|
188 vmaddfp v10,v6,v15,v14 // r3*wre
|
yading@10
|
189 vmaddfp v11,v7,v15,v14 // i3*wre
|
yading@10
|
190 vmaddfp v8,v5,v16,v8 // i2*wim
|
yading@10
|
191 vnmsubfp v9,v4,v16,v9 // r2*wim
|
yading@10
|
192 vnmsubfp v10,v7,v16,v10 // i3*wim
|
yading@10
|
193 vmaddfp v11,v6,v16,v11 // r3*wim
|
yading@10
|
194 BF v10,v12,v10,v8
|
yading@10
|
195 BF v11,v13,v9,v11
|
yading@10
|
196 BF v0,v4,v0,v10
|
yading@10
|
197 BF v3,v7,v3,v12
|
yading@10
|
198 BF v1,v5,v1,v11
|
yading@10
|
199 BF v2,v6,v2,v13
|
yading@10
|
200 .ifnb \interleave
|
yading@10
|
201 zip v8, v9,v0,v1
|
yading@10
|
202 zip v10,v11,v2,v3
|
yading@10
|
203 zip v12,v13,v4,v5
|
yading@10
|
204 zip v14,v15,v6,v7
|
yading@10
|
205 stvx v8, 0,r3
|
yading@10
|
206 stvx v9,r9,r3
|
yading@10
|
207 stvx v10, 0,r4
|
yading@10
|
208 stvx v11,r9,r4
|
yading@10
|
209 stvx v12, 0,r5
|
yading@10
|
210 stvx v13,r9,r5
|
yading@10
|
211 stvx v14, 0,r6
|
yading@10
|
212 stvx v15,r9,r6
|
yading@10
|
213 .else
|
yading@10
|
214 stvx v0, 0,r3
|
yading@10
|
215 stvx v4, 0,r5
|
yading@10
|
216 stvx v3,r9,r4
|
yading@10
|
217 stvx v7,r9,r6
|
yading@10
|
218 stvx v1,r9,r3
|
yading@10
|
219 stvx v5,r9,r5
|
yading@10
|
220 stvx v2, 0,r4
|
yading@10
|
221 stvx v6, 0,r6
|
yading@10
|
222 .endif
|
yading@10
|
223 blr
|
yading@10
|
224 .endm
|
yading@10
|
225
|
yading@10
|
226 // void pass(float *z, float *wre, int n)
|
yading@10
|
227 .macro PASS interleave, suffix
|
yading@10
|
228 fft_pass\suffix\()_altivec:
|
yading@10
|
229 mtctr r5
|
yading@10
|
230 slwi r0,r5,4
|
yading@10
|
231 slwi r7,r5,6 // o2
|
yading@10
|
232 slwi r5,r5,5 // o1
|
yading@10
|
233 add r10,r5,r7 // o3
|
yading@10
|
234 add r0,r4,r0 // wim
|
yading@10
|
235 addi r6,r5,16 // o1+16
|
yading@10
|
236 addi r8,r7,16 // o2+16
|
yading@10
|
237 addi r11,r10,16 // o3+16
|
yading@10
|
238 1:
|
yading@10
|
239 lvx v8, 0,r4 // wre
|
yading@10
|
240 lvx v10, 0,r0 // wim
|
yading@10
|
241 sub r0,r0,r9
|
yading@10
|
242 lvx v9, 0,r0
|
yading@10
|
243 vperm v9,v9,v10,v19 // vcprm(s0,3,2,1) => wim[0 .. -3]
|
yading@10
|
244 lvx v4,r3,r7 // r2 = z[o2]
|
yading@10
|
245 lvx v5,r3,r8 // i2 = z[o2+16]
|
yading@10
|
246 lvx v6,r3,r10 // r3 = z[o3]
|
yading@10
|
247 lvx v7,r3,r11 // i3 = z[o3+16]
|
yading@10
|
248 vmaddfp v10,v4,v8,v14 // r2*wre
|
yading@10
|
249 vmaddfp v11,v5,v8,v14 // i2*wre
|
yading@10
|
250 vmaddfp v12,v6,v8,v14 // r3*wre
|
yading@10
|
251 vmaddfp v13,v7,v8,v14 // i3*wre
|
yading@10
|
252 lvx v0, 0,r3 // r0 = z[0]
|
yading@10
|
253 lvx v3,r3,r6 // i1 = z[o1+16]
|
yading@10
|
254 vmaddfp v10,v5,v9,v10 // i2*wim
|
yading@10
|
255 vnmsubfp v11,v4,v9,v11 // r2*wim
|
yading@10
|
256 vnmsubfp v12,v7,v9,v12 // i3*wim
|
yading@10
|
257 vmaddfp v13,v6,v9,v13 // r3*wim
|
yading@10
|
258 lvx v1,r3,r9 // i0 = z[16]
|
yading@10
|
259 lvx v2,r3,r5 // r1 = z[o1]
|
yading@10
|
260 BF v12,v8,v12,v10
|
yading@10
|
261 BF v13,v9,v11,v13
|
yading@10
|
262 BF v0,v4,v0,v12
|
yading@10
|
263 BF v3,v7,v3,v8
|
yading@10
|
264 .if !\interleave
|
yading@10
|
265 stvx v0, 0,r3
|
yading@10
|
266 stvx v4,r3,r7
|
yading@10
|
267 stvx v3,r3,r6
|
yading@10
|
268 stvx v7,r3,r11
|
yading@10
|
269 .endif
|
yading@10
|
270 BF v1,v5,v1,v13
|
yading@10
|
271 BF v2,v6,v2,v9
|
yading@10
|
272 .if !\interleave
|
yading@10
|
273 stvx v1,r3,r9
|
yading@10
|
274 stvx v2,r3,r5
|
yading@10
|
275 stvx v5,r3,r8
|
yading@10
|
276 stvx v6,r3,r10
|
yading@10
|
277 .else
|
yading@10
|
278 vmrghw v8,v0,v1
|
yading@10
|
279 vmrglw v9,v0,v1
|
yading@10
|
280 stvx v8, 0,r3
|
yading@10
|
281 stvx v9,r3,r9
|
yading@10
|
282 vmrghw v8,v2,v3
|
yading@10
|
283 vmrglw v9,v2,v3
|
yading@10
|
284 stvx v8,r3,r5
|
yading@10
|
285 stvx v9,r3,r6
|
yading@10
|
286 vmrghw v8,v4,v5
|
yading@10
|
287 vmrglw v9,v4,v5
|
yading@10
|
288 stvx v8,r3,r7
|
yading@10
|
289 stvx v9,r3,r8
|
yading@10
|
290 vmrghw v8,v6,v7
|
yading@10
|
291 vmrglw v9,v6,v7
|
yading@10
|
292 stvx v8,r3,r10
|
yading@10
|
293 stvx v9,r3,r11
|
yading@10
|
294 .endif
|
yading@10
|
295 addi r3,r3,32
|
yading@10
|
296 addi r4,r4,16
|
yading@10
|
297 bdnz 1b
|
yading@10
|
298 sub r3,r3,r5
|
yading@10
|
299 blr
|
yading@10
|
300 .endm
|
yading@10
|
301
|
yading@10
|
302 #define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
|
yading@10
|
303
|
yading@10
|
304 #define WORD_0 0x00,0x01,0x02,0x03
|
yading@10
|
305 #define WORD_1 0x04,0x05,0x06,0x07
|
yading@10
|
306 #define WORD_2 0x08,0x09,0x0a,0x0b
|
yading@10
|
307 #define WORD_3 0x0c,0x0d,0x0e,0x0f
|
yading@10
|
308 #define WORD_s0 0x10,0x11,0x12,0x13
|
yading@10
|
309 #define WORD_s1 0x14,0x15,0x16,0x17
|
yading@10
|
310 #define WORD_s2 0x18,0x19,0x1a,0x1b
|
yading@10
|
311 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
|
yading@10
|
312
|
yading@10
|
313 #define vcprm(a, b, c, d) .byte WORD_##a, WORD_##b, WORD_##c, WORD_##d
|
yading@10
|
314
|
yading@10
|
315 .rodata
|
yading@10
|
316 .align 4
|
yading@10
|
317 fft_data:
|
yading@10
|
318 .float 0, 0, 0, 0
|
yading@10
|
319 .float 1, 0.92387953, M_SQRT1_2, 0.38268343
|
yading@10
|
320 .float 0, 0.38268343, M_SQRT1_2, 0.92387953
|
yading@10
|
321 .float -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2
|
yading@10
|
322 .float M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
|
yading@10
|
323 vcprm(s0,3,2,1)
|
yading@10
|
324 vcprm(0,1,s2,s1)
|
yading@10
|
325 vcprm(2,3,s0,s3)
|
yading@10
|
326 vcprm(2,s3,3,s2)
|
yading@10
|
327 vcprm(0,1,s0,s1)
|
yading@10
|
328 vcprm(2,3,s2,s3)
|
yading@10
|
329 vcprm(2,3,0,1)
|
yading@10
|
330 vcprm(1,2,s3,s0)
|
yading@10
|
331 vcprm(0,3,s2,s1)
|
yading@10
|
332 vcprm(0,2,s1,s3)
|
yading@10
|
333 vcprm(1,3,s0,s2)
|
yading@10
|
334
|
yading@10
|
335 .macro lvm b, r, regs:vararg
|
yading@10
|
336 lvx \r, 0, \b
|
yading@10
|
337 addi \b, \b, 16
|
yading@10
|
338 .ifnb \regs
|
yading@10
|
339 lvm \b, \regs
|
yading@10
|
340 .endif
|
yading@10
|
341 .endm
|
yading@10
|
342
|
yading@10
|
343 .macro stvm b, r, regs:vararg
|
yading@10
|
344 stvx \r, 0, \b
|
yading@10
|
345 addi \b, \b, 16
|
yading@10
|
346 .ifnb \regs
|
yading@10
|
347 stvm \b, \regs
|
yading@10
|
348 .endif
|
yading@10
|
349 .endm
|
yading@10
|
350
|
yading@10
|
351 .macro fft_calc interleave
|
yading@10
|
352 extfunc ff_fft_calc\interleave\()_altivec
|
yading@10
|
353 mflr r0
|
yading@10
|
354 stp r0, 2*PS(r1)
|
yading@10
|
355 stpu r1, -(160+16*PS)(r1)
|
yading@10
|
356 get_got r11
|
yading@10
|
357 addi r6, r1, 16*PS
|
yading@10
|
358 stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
yading@10
|
359 mfvrsave r0
|
yading@10
|
360 stw r0, 15*PS(r1)
|
yading@10
|
361 li r6, 0xfffffffc
|
yading@10
|
362 mtvrsave r6
|
yading@10
|
363
|
yading@10
|
364 movrel r6, fft_data, r11
|
yading@10
|
365 lvm r6, v14, v15, v16, v17, v18, v19, v20, v21
|
yading@10
|
366 lvm r6, v22, v23, v24, v25, v26, v27, v28, v29
|
yading@10
|
367
|
yading@10
|
368 li r9, 16
|
yading@10
|
369 movrel r12, X(ff_cos_tabs), r11
|
yading@10
|
370
|
yading@10
|
371 movrel r6, fft_dispatch_tab\interleave\()_altivec, r11
|
yading@10
|
372 lwz r3, 0(r3)
|
yading@10
|
373 subi r3, r3, 2
|
yading@10
|
374 slwi r3, r3, 2+ARCH_PPC64
|
yading@10
|
375 lpx r3, r3, r6
|
yading@10
|
376 mtctr r3
|
yading@10
|
377 mr r3, r4
|
yading@10
|
378 bctrl
|
yading@10
|
379
|
yading@10
|
380 addi r6, r1, 16*PS
|
yading@10
|
381 lvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
|
yading@10
|
382 lwz r6, 15*PS(r1)
|
yading@10
|
383 mtvrsave r6
|
yading@10
|
384 lp r1, 0(r1)
|
yading@10
|
385 lp r0, 2*PS(r1)
|
yading@10
|
386 mtlr r0
|
yading@10
|
387 blr
|
yading@10
|
388 .endm
|
yading@10
|
389
|
yading@10
|
390 .macro DECL_FFT suffix, bits, n, n2, n4
|
yading@10
|
391 fft\n\suffix\()_altivec:
|
yading@10
|
392 mflr r0
|
yading@10
|
393 stp r0,PS*(\bits-3)(r1)
|
yading@10
|
394 bl fft\n2\()_altivec
|
yading@10
|
395 addi2 r3,\n*4
|
yading@10
|
396 bl fft\n4\()_altivec
|
yading@10
|
397 addi2 r3,\n*2
|
yading@10
|
398 bl fft\n4\()_altivec
|
yading@10
|
399 addi2 r3,\n*-6
|
yading@10
|
400 lp r0,PS*(\bits-3)(r1)
|
yading@10
|
401 lp r4,\bits*PS(r12)
|
yading@10
|
402 mtlr r0
|
yading@10
|
403 li r5,\n/16
|
yading@10
|
404 b fft_pass\suffix\()_altivec
|
yading@10
|
405 .endm
|
yading@10
|
406
|
yading@10
|
407 .macro DECL_FFTS interleave, suffix
|
yading@10
|
408 .text
|
yading@10
|
409 def_fft4 \suffix
|
yading@10
|
410 def_fft8 \suffix
|
yading@10
|
411 def_fft16 \suffix
|
yading@10
|
412 PASS \interleave, \suffix
|
yading@10
|
413 DECL_FFT \suffix, 5, 32, 16, 8
|
yading@10
|
414 DECL_FFT \suffix, 6, 64, 32, 16
|
yading@10
|
415 DECL_FFT \suffix, 7, 128, 64, 32
|
yading@10
|
416 DECL_FFT \suffix, 8, 256, 128, 64
|
yading@10
|
417 DECL_FFT \suffix, 9, 512, 256, 128
|
yading@10
|
418 DECL_FFT \suffix,10, 1024, 512, 256
|
yading@10
|
419 DECL_FFT \suffix,11, 2048, 1024, 512
|
yading@10
|
420 DECL_FFT \suffix,12, 4096, 2048, 1024
|
yading@10
|
421 DECL_FFT \suffix,13, 8192, 4096, 2048
|
yading@10
|
422 DECL_FFT \suffix,14,16384, 8192, 4096
|
yading@10
|
423 DECL_FFT \suffix,15,32768,16384, 8192
|
yading@10
|
424 DECL_FFT \suffix,16,65536,32768,16384
|
yading@10
|
425
|
yading@10
|
426 fft_calc \suffix
|
yading@10
|
427
|
yading@10
|
428 .rodata
|
yading@10
|
429 .align 3
|
yading@10
|
430 fft_dispatch_tab\suffix\()_altivec:
|
yading@10
|
431 PTR fft4\suffix\()_altivec
|
yading@10
|
432 PTR fft8\suffix\()_altivec
|
yading@10
|
433 PTR fft16\suffix\()_altivec
|
yading@10
|
434 PTR fft32\suffix\()_altivec
|
yading@10
|
435 PTR fft64\suffix\()_altivec
|
yading@10
|
436 PTR fft128\suffix\()_altivec
|
yading@10
|
437 PTR fft256\suffix\()_altivec
|
yading@10
|
438 PTR fft512\suffix\()_altivec
|
yading@10
|
439 PTR fft1024\suffix\()_altivec
|
yading@10
|
440 PTR fft2048\suffix\()_altivec
|
yading@10
|
441 PTR fft4096\suffix\()_altivec
|
yading@10
|
442 PTR fft8192\suffix\()_altivec
|
yading@10
|
443 PTR fft16384\suffix\()_altivec
|
yading@10
|
444 PTR fft32768\suffix\()_altivec
|
yading@10
|
445 PTR fft65536\suffix\()_altivec
|
yading@10
|
446 .endm
|
yading@10
|
447
|
yading@10
|
448 DECL_FFTS 0
|
yading@10
|
449 DECL_FFTS 1, _interleave
|