Chris@0
|
1 /*
|
Chris@0
|
2 * libmad - MPEG audio decoder library
|
Chris@0
|
3 * Copyright (C) 2000-2004 Underbit Technologies, Inc.
|
Chris@0
|
4 *
|
Chris@0
|
5 * This program is free software; you can redistribute it and/or modify
|
Chris@0
|
6 * it under the terms of the GNU General Public License as published by
|
Chris@0
|
7 * the Free Software Foundation; either version 2 of the License, or
|
Chris@0
|
8 * (at your option) any later version.
|
Chris@0
|
9 *
|
Chris@0
|
10 * This program is distributed in the hope that it will be useful,
|
Chris@0
|
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Chris@0
|
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Chris@0
|
13 * GNU General Public License for more details.
|
Chris@0
|
14 *
|
Chris@0
|
15 * You should have received a copy of the GNU General Public License
|
Chris@0
|
16 * along with this program; if not, write to the Free Software
|
Chris@0
|
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
Chris@0
|
18 *
|
Chris@0
|
19 * $Id: fixed.h,v 1.38 2004/02/17 02:02:03 rob Exp $
|
Chris@0
|
20 */
|
Chris@0
|
21
|
Chris@0
|
22 # ifndef LIBMAD_FIXED_H
|
Chris@0
|
23 # define LIBMAD_FIXED_H
|
Chris@0
|
24
|
Chris@0
|
25 # if SIZEOF_INT >= 4
|
Chris@0
|
26 typedef signed int mad_fixed_t;
|
Chris@0
|
27
|
Chris@0
|
28 typedef signed int mad_fixed64hi_t;
|
Chris@0
|
29 typedef unsigned int mad_fixed64lo_t;
|
Chris@0
|
30 # else
|
Chris@0
|
31 typedef signed long mad_fixed_t;
|
Chris@0
|
32
|
Chris@0
|
33 typedef signed long mad_fixed64hi_t;
|
Chris@0
|
34 typedef unsigned long mad_fixed64lo_t;
|
Chris@0
|
35 # endif
|
Chris@0
|
36
|
Chris@0
|
37 # if defined(_MSC_VER)
|
Chris@0
|
38 # define mad_fixed64_t signed __int64
|
Chris@0
|
39 # elif 1 || defined(__GNUC__)
|
Chris@0
|
40 # define mad_fixed64_t signed long long
|
Chris@0
|
41 # endif
|
Chris@0
|
42
|
Chris@0
|
43 # if defined(FPM_FLOAT)
|
Chris@0
|
44 typedef double mad_sample_t;
|
Chris@0
|
45 # else
|
Chris@0
|
46 typedef mad_fixed_t mad_sample_t;
|
Chris@0
|
47 # endif
|
Chris@0
|
48
|
Chris@0
|
49 /*
|
Chris@0
|
50 * Fixed-point format: 0xABBBBBBB
|
Chris@0
|
51 * A == whole part (sign + 3 bits)
|
Chris@0
|
52 * B == fractional part (28 bits)
|
Chris@0
|
53 *
|
Chris@0
|
54 * Values are signed two's complement, so the effective range is:
|
Chris@0
|
55 * 0x80000000 to 0x7fffffff
|
Chris@0
|
56 * -8.0 to +7.9999999962747097015380859375
|
Chris@0
|
57 *
|
Chris@0
|
58 * The smallest representable value is:
|
Chris@0
|
59 * 0x00000001 == 0.0000000037252902984619140625 (i.e. about 3.725e-9)
|
Chris@0
|
60 *
|
Chris@0
|
61 * 28 bits of fractional accuracy represent about
|
Chris@0
|
62 * 8.6 digits of decimal accuracy.
|
Chris@0
|
63 *
|
Chris@0
|
64 * Fixed-point numbers can be added or subtracted as normal
|
Chris@0
|
65 * integers, but multiplication requires shifting the 64-bit result
|
Chris@0
|
66 * from 56 fractional bits back to 28 (and rounding.)
|
Chris@0
|
67 *
|
Chris@0
|
68 * Changing the definition of MAD_F_FRACBITS is only partially
|
Chris@0
|
69 * supported, and must be done with care.
|
Chris@0
|
70 */
|
Chris@0
|
71
|
Chris@0
|
72 # define MAD_F_FRACBITS 28
|
Chris@0
|
73
|
Chris@0
|
74 # if MAD_F_FRACBITS == 28
|
Chris@0
|
75 # define MAD_F(x) ((mad_fixed_t) (x##L))
|
Chris@0
|
76 # else
|
Chris@0
|
77 # if MAD_F_FRACBITS < 28
|
Chris@0
|
78 # warning "MAD_F_FRACBITS < 28"
|
Chris@0
|
79 # define MAD_F(x) ((mad_fixed_t) \
|
Chris@0
|
80 (((x##L) + \
|
Chris@0
|
81 (1L << (28 - MAD_F_FRACBITS - 1))) >> \
|
Chris@0
|
82 (28 - MAD_F_FRACBITS)))
|
Chris@0
|
83 # elif MAD_F_FRACBITS > 28
|
Chris@0
|
84 # error "MAD_F_FRACBITS > 28 not currently supported"
|
Chris@0
|
85 # define MAD_F(x) ((mad_fixed_t) \
|
Chris@0
|
86 ((x##L) << (MAD_F_FRACBITS - 28)))
|
Chris@0
|
87 # endif
|
Chris@0
|
88 # endif
|
Chris@0
|
89
|
Chris@0
|
90 # define MAD_F_MIN ((mad_fixed_t) -0x80000000L)
|
Chris@0
|
91 # define MAD_F_MAX ((mad_fixed_t) +0x7fffffffL)
|
Chris@0
|
92
|
Chris@0
|
93 # define MAD_F_ONE MAD_F(0x10000000)
|
Chris@0
|
94
|
Chris@0
|
95 # define mad_f_tofixed(x) ((mad_fixed_t) \
|
Chris@0
|
96 ((x) * (double) (1L << MAD_F_FRACBITS) + 0.5))
|
Chris@0
|
97 # define mad_f_todouble(x) ((double) \
|
Chris@0
|
98 ((x) / (double) (1L << MAD_F_FRACBITS)))
|
Chris@0
|
99
|
Chris@0
|
100 # define mad_f_intpart(x) ((x) >> MAD_F_FRACBITS)
|
Chris@0
|
101 # define mad_f_fracpart(x) ((x) & ((1L << MAD_F_FRACBITS) - 1))
|
Chris@0
|
102 /* (x should be positive) */
|
Chris@0
|
103
|
Chris@0
|
104 # define mad_f_fromint(x) ((x) << MAD_F_FRACBITS)
|
Chris@0
|
105
|
Chris@0
|
106 # define mad_f_add(x, y) ((x) + (y))
|
Chris@0
|
107 # define mad_f_sub(x, y) ((x) - (y))
|
Chris@0
|
108
|
Chris@0
|
109 # if defined(FPM_FLOAT)
|
Chris@0
|
110 # error "FPM_FLOAT not yet supported"
|
Chris@0
|
111
|
Chris@0
|
112 # undef MAD_F
|
Chris@0
|
113 # define MAD_F(x) mad_f_todouble(x)
|
Chris@0
|
114
|
Chris@0
|
115 # define mad_f_mul(x, y) ((x) * (y))
|
Chris@0
|
116 # define mad_f_scale64
|
Chris@0
|
117
|
Chris@0
|
118 # undef ASO_ZEROCHECK
|
Chris@0
|
119
|
Chris@0
|
120 # elif defined(FPM_64BIT)
|
Chris@0
|
121
|
Chris@0
|
122 /*
|
Chris@0
|
123 * This version should be the most accurate if 64-bit types are supported by
|
Chris@0
|
124 * the compiler, although it may not be the most efficient.
|
Chris@0
|
125 */
|
Chris@0
|
126 # if defined(OPT_ACCURACY)
|
Chris@0
|
127 # define mad_f_mul(x, y) \
|
Chris@0
|
128 ((mad_fixed_t) \
|
Chris@0
|
129 ((((mad_fixed64_t) (x) * (y)) + \
|
Chris@0
|
130 (1L << (MAD_F_SCALEBITS - 1))) >> MAD_F_SCALEBITS))
|
Chris@0
|
131 # else
|
Chris@0
|
132 # define mad_f_mul(x, y) \
|
Chris@0
|
133 ((mad_fixed_t) (((mad_fixed64_t) (x) * (y)) >> MAD_F_SCALEBITS))
|
Chris@0
|
134 # endif
|
Chris@0
|
135
|
Chris@0
|
136 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
137
|
Chris@0
|
138 /* --- Intel --------------------------------------------------------------- */
|
Chris@0
|
139
|
Chris@0
|
140 # elif defined(FPM_INTEL)
|
Chris@0
|
141
|
Chris@0
|
142 # if defined(_MSC_VER)
|
Chris@0
|
143 # pragma warning(push)
|
Chris@0
|
144 # pragma warning(disable: 4035) /* no return value */
|
Chris@0
|
145 static __forceinline
|
Chris@0
|
146 mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y)
|
Chris@0
|
147 {
|
Chris@0
|
148 enum {
|
Chris@0
|
149 fracbits = MAD_F_FRACBITS
|
Chris@0
|
150 };
|
Chris@0
|
151
|
Chris@0
|
152 __asm {
|
Chris@0
|
153 mov eax, x
|
Chris@0
|
154 imul y
|
Chris@0
|
155 shrd eax, edx, fracbits
|
Chris@0
|
156 }
|
Chris@0
|
157
|
Chris@0
|
158 /* implicit return of eax */
|
Chris@0
|
159 }
|
Chris@0
|
160 # pragma warning(pop)
|
Chris@0
|
161
|
Chris@0
|
162 # define mad_f_mul mad_f_mul_inline
|
Chris@0
|
163 # define mad_f_scale64
|
Chris@0
|
164 # else
|
Chris@0
|
165 /*
|
Chris@0
|
166 * This Intel version is fast and accurate; the disposition of the least
|
Chris@0
|
167 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
|
Chris@0
|
168 */
|
Chris@0
|
169 # define MAD_F_MLX(hi, lo, x, y) \
|
Chris@0
|
170 asm ("imull %3" \
|
Chris@0
|
171 : "=a" (lo), "=d" (hi) \
|
Chris@0
|
172 : "%a" (x), "rm" (y) \
|
Chris@0
|
173 : "cc")
|
Chris@0
|
174
|
Chris@0
|
175 # if defined(OPT_ACCURACY)
|
Chris@0
|
176 /*
|
Chris@0
|
177 * This gives best accuracy but is not very fast.
|
Chris@0
|
178 */
|
Chris@0
|
179 # define MAD_F_MLA(hi, lo, x, y) \
|
Chris@0
|
180 ({ mad_fixed64hi_t __hi; \
|
Chris@0
|
181 mad_fixed64lo_t __lo; \
|
Chris@0
|
182 MAD_F_MLX(__hi, __lo, (x), (y)); \
|
Chris@0
|
183 asm ("addl %2,%0\n\t" \
|
Chris@0
|
184 "adcl %3,%1" \
|
Chris@0
|
185 : "=rm" (lo), "=rm" (hi) \
|
Chris@0
|
186 : "r" (__lo), "r" (__hi), "0" (lo), "1" (hi) \
|
Chris@0
|
187 : "cc"); \
|
Chris@0
|
188 })
|
Chris@0
|
189 # endif /* OPT_ACCURACY */
|
Chris@0
|
190
|
Chris@0
|
191 # if defined(OPT_ACCURACY)
|
Chris@0
|
192 /*
|
Chris@0
|
193 * Surprisingly, this is faster than SHRD followed by ADC.
|
Chris@0
|
194 */
|
Chris@0
|
195 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
196 ({ mad_fixed64hi_t __hi_; \
|
Chris@0
|
197 mad_fixed64lo_t __lo_; \
|
Chris@0
|
198 mad_fixed_t __result; \
|
Chris@0
|
199 asm ("addl %4,%2\n\t" \
|
Chris@0
|
200 "adcl %5,%3" \
|
Chris@0
|
201 : "=rm" (__lo_), "=rm" (__hi_) \
|
Chris@0
|
202 : "0" (lo), "1" (hi), \
|
Chris@0
|
203 "ir" (1L << (MAD_F_SCALEBITS - 1)), "ir" (0) \
|
Chris@0
|
204 : "cc"); \
|
Chris@0
|
205 asm ("shrdl %3,%2,%1" \
|
Chris@0
|
206 : "=rm" (__result) \
|
Chris@0
|
207 : "0" (__lo_), "r" (__hi_), "I" (MAD_F_SCALEBITS) \
|
Chris@0
|
208 : "cc"); \
|
Chris@0
|
209 __result; \
|
Chris@0
|
210 })
|
Chris@0
|
211 # elif defined(OPT_INTEL)
|
Chris@0
|
212 /*
|
Chris@0
|
213 * Alternate Intel scaling that may or may not perform better.
|
Chris@0
|
214 */
|
Chris@0
|
215 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
216 ({ mad_fixed_t __result; \
|
Chris@0
|
217 asm ("shrl %3,%1\n\t" \
|
Chris@0
|
218 "shll %4,%2\n\t" \
|
Chris@0
|
219 "orl %2,%1" \
|
Chris@0
|
220 : "=rm" (__result) \
|
Chris@0
|
221 : "0" (lo), "r" (hi), \
|
Chris@0
|
222 "I" (MAD_F_SCALEBITS), "I" (32 - MAD_F_SCALEBITS) \
|
Chris@0
|
223 : "cc"); \
|
Chris@0
|
224 __result; \
|
Chris@0
|
225 })
|
Chris@0
|
226 # else
|
Chris@0
|
227 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
228 ({ mad_fixed_t __result; \
|
Chris@0
|
229 asm ("shrdl %3,%2,%1" \
|
Chris@0
|
230 : "=rm" (__result) \
|
Chris@0
|
231 : "0" (lo), "r" (hi), "I" (MAD_F_SCALEBITS) \
|
Chris@0
|
232 : "cc"); \
|
Chris@0
|
233 __result; \
|
Chris@0
|
234 })
|
Chris@0
|
235 # endif /* OPT_ACCURACY */
|
Chris@0
|
236
|
Chris@0
|
237 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
238 # endif
|
Chris@0
|
239
|
Chris@0
|
240 /* --- ARM ----------------------------------------------------------------- */
|
Chris@0
|
241
|
Chris@0
|
242 # elif defined(FPM_ARM)
|
Chris@0
|
243
|
Chris@0
|
244 /*
|
Chris@0
|
245 * This ARM V4 version is as accurate as FPM_64BIT but much faster. The
|
Chris@0
|
246 * least significant bit is properly rounded at no CPU cycle cost!
|
Chris@0
|
247 */
|
Chris@0
|
248 # if 1
|
Chris@0
|
249 /*
|
Chris@0
|
250 * This is faster than the default implementation via MAD_F_MLX() and
|
Chris@0
|
251 * mad_f_scale64().
|
Chris@0
|
252 */
|
Chris@0
|
253 # define mad_f_mul(x, y) \
|
Chris@0
|
254 ({ mad_fixed64hi_t __hi; \
|
Chris@0
|
255 mad_fixed64lo_t __lo; \
|
Chris@0
|
256 mad_fixed_t __result; \
|
Chris@0
|
257 asm ("smull %0, %1, %3, %4\n\t" \
|
Chris@0
|
258 "movs %0, %0, lsr %5\n\t" \
|
Chris@0
|
259 "adc %2, %0, %1, lsl %6" \
|
Chris@0
|
260 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
|
Chris@0
|
261 : "%r" (x), "r" (y), \
|
Chris@0
|
262 "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS) \
|
Chris@0
|
263 : "cc"); \
|
Chris@0
|
264 __result; \
|
Chris@0
|
265 })
|
Chris@0
|
266 # endif
|
Chris@0
|
267
|
Chris@0
|
268 # define MAD_F_MLX(hi, lo, x, y) \
|
Chris@0
|
269 asm ("smull %0, %1, %2, %3" \
|
Chris@0
|
270 : "=&r" (lo), "=&r" (hi) \
|
Chris@0
|
271 : "%r" (x), "r" (y))
|
Chris@0
|
272
|
Chris@0
|
273 # define MAD_F_MLA(hi, lo, x, y) \
|
Chris@0
|
274 asm ("smlal %0, %1, %2, %3" \
|
Chris@0
|
275 : "+r" (lo), "+r" (hi) \
|
Chris@0
|
276 : "%r" (x), "r" (y))
|
Chris@0
|
277
|
Chris@0
|
278 # define MAD_F_MLN(hi, lo) \
|
Chris@0
|
279 asm ("rsbs %0, %2, #0\n\t" \
|
Chris@0
|
280 "rsc %1, %3, #0" \
|
Chris@0
|
281 : "=r" (lo), "=r" (hi) \
|
Chris@0
|
282 : "0" (lo), "1" (hi) \
|
Chris@0
|
283 : "cc")
|
Chris@0
|
284
|
Chris@0
|
285 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
286 ({ mad_fixed_t __result; \
|
Chris@0
|
287 asm ("movs %0, %1, lsr %3\n\t" \
|
Chris@0
|
288 "adc %0, %0, %2, lsl %4" \
|
Chris@0
|
289 : "=&r" (__result) \
|
Chris@0
|
290 : "r" (lo), "r" (hi), \
|
Chris@0
|
291 "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS) \
|
Chris@0
|
292 : "cc"); \
|
Chris@0
|
293 __result; \
|
Chris@0
|
294 })
|
Chris@0
|
295
|
Chris@0
|
296 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
297
|
Chris@0
|
298 /* --- MIPS ---------------------------------------------------------------- */
|
Chris@0
|
299
|
Chris@0
|
300 # elif defined(FPM_MIPS)
|
Chris@0
|
301
|
Chris@0
|
302 /*
|
Chris@0
|
303 * This MIPS version is fast and accurate; the disposition of the least
|
Chris@0
|
304 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
|
Chris@0
|
305 */
|
Chris@0
|
306 # define MAD_F_MLX(hi, lo, x, y) \
|
Chris@0
|
307 asm ("mult %2,%3" \
|
Chris@0
|
308 : "=l" (lo), "=h" (hi) \
|
Chris@0
|
309 : "%r" (x), "r" (y))
|
Chris@0
|
310
|
Chris@0
|
311 # if defined(HAVE_MADD_ASM)
|
Chris@0
|
312 # define MAD_F_MLA(hi, lo, x, y) \
|
Chris@0
|
313 asm ("madd %2,%3" \
|
Chris@0
|
314 : "+l" (lo), "+h" (hi) \
|
Chris@0
|
315 : "%r" (x), "r" (y))
|
Chris@0
|
316 # elif defined(HAVE_MADD16_ASM)
|
Chris@0
|
317 /*
|
Chris@0
|
318 * This loses significant accuracy due to the 16-bit integer limit in the
|
Chris@0
|
319 * multiply/accumulate instruction.
|
Chris@0
|
320 */
|
Chris@0
|
321 # define MAD_F_ML0(hi, lo, x, y) \
|
Chris@0
|
322 asm ("mult %2,%3" \
|
Chris@0
|
323 : "=l" (lo), "=h" (hi) \
|
Chris@0
|
324 : "%r" ((x) >> 12), "r" ((y) >> 16))
|
Chris@0
|
325 # define MAD_F_MLA(hi, lo, x, y) \
|
Chris@0
|
326 asm ("madd16 %2,%3" \
|
Chris@0
|
327 : "+l" (lo), "+h" (hi) \
|
Chris@0
|
328 : "%r" ((x) >> 12), "r" ((y) >> 16))
|
Chris@0
|
329 # define MAD_F_MLZ(hi, lo) ((mad_fixed_t) (lo))
|
Chris@0
|
330 # endif
|
Chris@0
|
331
|
Chris@0
|
332 # if defined(OPT_SPEED)
|
Chris@0
|
333 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
334 ((mad_fixed_t) ((hi) << (32 - MAD_F_SCALEBITS)))
|
Chris@0
|
335 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
336 # endif
|
Chris@0
|
337
|
Chris@0
|
338 /* --- SPARC --------------------------------------------------------------- */
|
Chris@0
|
339
|
Chris@0
|
340 # elif defined(FPM_SPARC)
|
Chris@0
|
341
|
Chris@0
|
342 /*
|
Chris@0
|
343 * This SPARC V8 version is fast and accurate; the disposition of the least
|
Chris@0
|
344 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
|
Chris@0
|
345 */
|
Chris@0
|
346 # define MAD_F_MLX(hi, lo, x, y) \
|
Chris@0
|
347 asm ("smul %2, %3, %0\n\t" \
|
Chris@0
|
348 "rd %%y, %1" \
|
Chris@0
|
349 : "=r" (lo), "=r" (hi) \
|
Chris@0
|
350 : "%r" (x), "rI" (y))
|
Chris@0
|
351
|
Chris@0
|
352 /* --- PowerPC ------------------------------------------------------------- */
|
Chris@0
|
353
|
Chris@0
|
354 # elif defined(FPM_PPC)
|
Chris@0
|
355
|
Chris@0
|
356 /*
|
Chris@0
|
357 * This PowerPC version is fast and accurate; the disposition of the least
|
Chris@0
|
358 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
|
Chris@0
|
359 */
|
Chris@0
|
360 # define MAD_F_MLX(hi, lo, x, y) \
|
Chris@0
|
361 do { \
|
Chris@0
|
362 asm ("mullw %0,%1,%2" \
|
Chris@0
|
363 : "=r" (lo) \
|
Chris@0
|
364 : "%r" (x), "r" (y)); \
|
Chris@0
|
365 asm ("mulhw %0,%1,%2" \
|
Chris@0
|
366 : "=r" (hi) \
|
Chris@0
|
367 : "%r" (x), "r" (y)); \
|
Chris@0
|
368 } \
|
Chris@0
|
369 while (0)
|
Chris@0
|
370
|
Chris@0
|
371 # if defined(OPT_ACCURACY)
|
Chris@0
|
372 /*
|
Chris@0
|
373 * This gives best accuracy but is not very fast.
|
Chris@0
|
374 */
|
Chris@0
|
375 # define MAD_F_MLA(hi, lo, x, y) \
|
Chris@0
|
376 ({ mad_fixed64hi_t __hi; \
|
Chris@0
|
377 mad_fixed64lo_t __lo; \
|
Chris@0
|
378 MAD_F_MLX(__hi, __lo, (x), (y)); \
|
Chris@0
|
379 asm ("addc %0,%2,%3\n\t" \
|
Chris@0
|
380 "adde %1,%4,%5" \
|
Chris@0
|
381 : "=r" (lo), "=r" (hi) \
|
Chris@0
|
382 : "%r" (lo), "r" (__lo), \
|
Chris@0
|
383 "%r" (hi), "r" (__hi) \
|
Chris@0
|
384 : "xer"); \
|
Chris@0
|
385 })
|
Chris@0
|
386 # endif
|
Chris@0
|
387
|
Chris@0
|
388 # if defined(OPT_ACCURACY)
|
Chris@0
|
389 /*
|
Chris@0
|
390 * This is slower than the truncating version below it.
|
Chris@0
|
391 */
|
Chris@0
|
392 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
393 ({ mad_fixed_t __result, __round; \
|
Chris@0
|
394 asm ("rotrwi %0,%1,%2" \
|
Chris@0
|
395 : "=r" (__result) \
|
Chris@0
|
396 : "r" (lo), "i" (MAD_F_SCALEBITS)); \
|
Chris@0
|
397 asm ("extrwi %0,%1,1,0" \
|
Chris@0
|
398 : "=r" (__round) \
|
Chris@0
|
399 : "r" (__result)); \
|
Chris@0
|
400 asm ("insrwi %0,%1,%2,0" \
|
Chris@0
|
401 : "+r" (__result) \
|
Chris@0
|
402 : "r" (hi), "i" (MAD_F_SCALEBITS)); \
|
Chris@0
|
403 asm ("add %0,%1,%2" \
|
Chris@0
|
404 : "=r" (__result) \
|
Chris@0
|
405 : "%r" (__result), "r" (__round)); \
|
Chris@0
|
406 __result; \
|
Chris@0
|
407 })
|
Chris@0
|
408 # else
|
Chris@0
|
409 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
410 ({ mad_fixed_t __result; \
|
Chris@0
|
411 asm ("rotrwi %0,%1,%2" \
|
Chris@0
|
412 : "=r" (__result) \
|
Chris@0
|
413 : "r" (lo), "i" (MAD_F_SCALEBITS)); \
|
Chris@0
|
414 asm ("insrwi %0,%1,%2,0" \
|
Chris@0
|
415 : "+r" (__result) \
|
Chris@0
|
416 : "r" (hi), "i" (MAD_F_SCALEBITS)); \
|
Chris@0
|
417 __result; \
|
Chris@0
|
418 })
|
Chris@0
|
419 # endif
|
Chris@0
|
420
|
Chris@0
|
421 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
422
|
Chris@0
|
423 /* --- Default ------------------------------------------------------------- */
|
Chris@0
|
424
|
Chris@0
|
425 # elif defined(FPM_DEFAULT)
|
Chris@0
|
426
|
Chris@0
|
427 /*
|
Chris@0
|
428 * This version is the most portable but it loses significant accuracy.
|
Chris@0
|
429 * Furthermore, accuracy is biased against the second argument, so care
|
Chris@0
|
430 * should be taken when ordering operands.
|
Chris@0
|
431 *
|
Chris@0
|
432 * The scale factors are constant as this is not used with SSO.
|
Chris@0
|
433 *
|
Chris@0
|
434 * Pre-rounding is required to stay within the limits of compliance.
|
Chris@0
|
435 */
|
Chris@0
|
436 # if defined(OPT_SPEED)
|
Chris@0
|
437 # define mad_f_mul(x, y) (((x) >> 12) * ((y) >> 16))
|
Chris@0
|
438 # else
|
Chris@0
|
439 # define mad_f_mul(x, y) ((((x) + (1L << 11)) >> 12) * \
|
Chris@0
|
440 (((y) + (1L << 15)) >> 16))
|
Chris@0
|
441 # endif
|
Chris@0
|
442
|
Chris@0
|
443 /* ------------------------------------------------------------------------- */
|
Chris@0
|
444
|
Chris@0
|
445 # else
|
Chris@0
|
446 # error "no FPM selected"
|
Chris@0
|
447 # endif
|
Chris@0
|
448
|
Chris@0
|
449 /* default implementations */
|
Chris@0
|
450
|
Chris@0
|
451 # if !defined(mad_f_mul)
|
Chris@0
|
452 # define mad_f_mul(x, y) \
|
Chris@0
|
453 ({ register mad_fixed64hi_t __hi; \
|
Chris@0
|
454 register mad_fixed64lo_t __lo; \
|
Chris@0
|
455 MAD_F_MLX(__hi, __lo, (x), (y)); \
|
Chris@0
|
456 mad_f_scale64(__hi, __lo); \
|
Chris@0
|
457 })
|
Chris@0
|
458 # endif
|
Chris@0
|
459
|
Chris@0
|
460 # if !defined(MAD_F_MLA)
|
Chris@0
|
461 # define MAD_F_ML0(hi, lo, x, y) ((lo) = mad_f_mul((x), (y)))
|
Chris@0
|
462 # define MAD_F_MLA(hi, lo, x, y) ((lo) += mad_f_mul((x), (y)))
|
Chris@0
|
463 # define MAD_F_MLN(hi, lo) ((lo) = -(lo))
|
Chris@0
|
464 # define MAD_F_MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo))
|
Chris@0
|
465 # endif
|
Chris@0
|
466
|
Chris@0
|
467 # if !defined(MAD_F_ML0)
|
Chris@0
|
468 # define MAD_F_ML0(hi, lo, x, y) MAD_F_MLX((hi), (lo), (x), (y))
|
Chris@0
|
469 # endif
|
Chris@0
|
470
|
Chris@0
|
471 # if !defined(MAD_F_MLN)
|
Chris@0
|
472 # define MAD_F_MLN(hi, lo) ((hi) = ((lo) = -(lo)) ? ~(hi) : -(hi))
|
Chris@0
|
473 # endif
|
Chris@0
|
474
|
Chris@0
|
475 # if !defined(MAD_F_MLZ)
|
Chris@0
|
476 # define MAD_F_MLZ(hi, lo) mad_f_scale64((hi), (lo))
|
Chris@0
|
477 # endif
|
Chris@0
|
478
|
Chris@0
|
479 # if !defined(mad_f_scale64)
|
Chris@0
|
480 # if defined(OPT_ACCURACY)
|
Chris@0
|
481 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
482 ((((mad_fixed_t) \
|
Chris@0
|
483 (((hi) << (32 - (MAD_F_SCALEBITS - 1))) | \
|
Chris@0
|
484 ((lo) >> (MAD_F_SCALEBITS - 1)))) + 1) >> 1)
|
Chris@0
|
485 # else
|
Chris@0
|
486 # define mad_f_scale64(hi, lo) \
|
Chris@0
|
487 ((mad_fixed_t) \
|
Chris@0
|
488 (((hi) << (32 - MAD_F_SCALEBITS)) | \
|
Chris@0
|
489 ((lo) >> MAD_F_SCALEBITS)))
|
Chris@0
|
490 # endif
|
Chris@0
|
491 # define MAD_F_SCALEBITS MAD_F_FRACBITS
|
Chris@0
|
492 # endif
|
Chris@0
|
493
|
Chris@0
|
494 /* C routines */
|
Chris@0
|
495
|
Chris@0
|
496 mad_fixed_t mad_f_abs(mad_fixed_t);
|
Chris@0
|
497 mad_fixed_t mad_f_div(mad_fixed_t, mad_fixed_t);
|
Chris@0
|
498
|
Chris@0
|
499 # endif
|