annotate fft/native/bqvec/pommier/neon_mathfun.h @ 40:223f770b5341 kissfft-double tip

Try a double-precision kissfft
author Chris Cannam
date Wed, 07 Sep 2016 10:40:32 +0100
parents cf59817a5983
children
rev   line source
Chris@29 1 /* NEON implementation of sin, cos, exp and log
Chris@29 2
Chris@29 3 Inspired by Intel Approximate Math library, and based on the
Chris@29 4 corresponding algorithms of the cephes math library
Chris@29 5 */
Chris@29 6
Chris@29 7 /* Copyright (C) 2011 Julien Pommier
Chris@29 8
Chris@29 9 This software is provided 'as-is', without any express or implied
Chris@29 10 warranty. In no event will the authors be held liable for any damages
Chris@29 11 arising from the use of this software.
Chris@29 12
Chris@29 13 Permission is granted to anyone to use this software for any purpose,
Chris@29 14 including commercial applications, and to alter it and redistribute it
Chris@29 15 freely, subject to the following restrictions:
Chris@29 16
Chris@29 17 1. The origin of this software must not be misrepresented; you must not
Chris@29 18 claim that you wrote the original software. If you use this software
Chris@29 19 in a product, an acknowledgment in the product documentation would be
Chris@29 20 appreciated but is not required.
Chris@29 21 2. Altered source versions must be plainly marked as such, and must not be
Chris@29 22 misrepresented as being the original software.
Chris@29 23 3. This notice may not be removed or altered from any source distribution.
Chris@29 24
Chris@29 25 (this is the zlib license)
Chris@29 26 */
Chris@29 27
Chris@29 28 #include <arm_neon.h>
Chris@29 29
Chris@29 30 typedef float32x4_t v4sf; // vector of 4 float
Chris@29 31 typedef uint32x4_t v4su; // vector of 4 uint32
Chris@29 32 typedef int32x4_t v4si; // vector of 4 uint32
Chris@29 33
Chris@29 34 #define c_inv_mant_mask ~0x7f800000u
Chris@29 35 #define c_cephes_SQRTHF 0.707106781186547524
Chris@29 36 #define c_cephes_log_p0 7.0376836292E-2
Chris@29 37 #define c_cephes_log_p1 - 1.1514610310E-1
Chris@29 38 #define c_cephes_log_p2 1.1676998740E-1
Chris@29 39 #define c_cephes_log_p3 - 1.2420140846E-1
Chris@29 40 #define c_cephes_log_p4 + 1.4249322787E-1
Chris@29 41 #define c_cephes_log_p5 - 1.6668057665E-1
Chris@29 42 #define c_cephes_log_p6 + 2.0000714765E-1
Chris@29 43 #define c_cephes_log_p7 - 2.4999993993E-1
Chris@29 44 #define c_cephes_log_p8 + 3.3333331174E-1
Chris@29 45 #define c_cephes_log_q1 -2.12194440e-4
Chris@29 46 #define c_cephes_log_q2 0.693359375
Chris@29 47
Chris@29 48 /* natural logarithm computed for 4 simultaneous float
Chris@29 49 return NaN for x <= 0
Chris@29 50 */
Chris@29 51 v4sf log_ps(v4sf x) {
Chris@29 52 v4sf one = vdupq_n_f32(1);
Chris@29 53
Chris@29 54 x = vmaxq_f32(x, vdupq_n_f32(0)); /* force flush to zero on denormal values */
Chris@29 55 v4su invalid_mask = vcleq_f32(x, vdupq_n_f32(0));
Chris@29 56
Chris@29 57 v4si ux = vreinterpretq_s32_f32(x);
Chris@29 58
Chris@29 59 v4si emm0 = vshrq_n_s32(ux, 23);
Chris@29 60
Chris@29 61 /* keep only the fractional part */
Chris@29 62 ux = vandq_s32(ux, vdupq_n_s32(c_inv_mant_mask));
Chris@29 63 ux = vorrq_s32(ux, vreinterpretq_s32_f32(vdupq_n_f32(0.5f)));
Chris@29 64 x = vreinterpretq_f32_s32(ux);
Chris@29 65
Chris@29 66 emm0 = vsubq_s32(emm0, vdupq_n_s32(0x7f));
Chris@29 67 v4sf e = vcvtq_f32_s32(emm0);
Chris@29 68
Chris@29 69 e = vaddq_f32(e, one);
Chris@29 70
Chris@29 71 /* part2:
Chris@29 72 if( x < SQRTHF ) {
Chris@29 73 e -= 1;
Chris@29 74 x = x + x - 1.0;
Chris@29 75 } else { x = x - 1.0; }
Chris@29 76 */
Chris@29 77 v4su mask = vcltq_f32(x, vdupq_n_f32(c_cephes_SQRTHF));
Chris@29 78 v4sf tmp = vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(x), mask));
Chris@29 79 x = vsubq_f32(x, one);
Chris@29 80 e = vsubq_f32(e, vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(one), mask)));
Chris@29 81 x = vaddq_f32(x, tmp);
Chris@29 82
Chris@29 83 v4sf z = vmulq_f32(x,x);
Chris@29 84
Chris@29 85 v4sf y = vdupq_n_f32(c_cephes_log_p0);
Chris@29 86 y = vmulq_f32(y, x);
Chris@29 87 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p1));
Chris@29 88 y = vmulq_f32(y, x);
Chris@29 89 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p2));
Chris@29 90 y = vmulq_f32(y, x);
Chris@29 91 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p3));
Chris@29 92 y = vmulq_f32(y, x);
Chris@29 93 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p4));
Chris@29 94 y = vmulq_f32(y, x);
Chris@29 95 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p5));
Chris@29 96 y = vmulq_f32(y, x);
Chris@29 97 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p6));
Chris@29 98 y = vmulq_f32(y, x);
Chris@29 99 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p7));
Chris@29 100 y = vmulq_f32(y, x);
Chris@29 101 y = vaddq_f32(y, vdupq_n_f32(c_cephes_log_p8));
Chris@29 102 y = vmulq_f32(y, x);
Chris@29 103
Chris@29 104 y = vmulq_f32(y, z);
Chris@29 105
Chris@29 106
Chris@29 107 tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q1));
Chris@29 108 y = vaddq_f32(y, tmp);
Chris@29 109
Chris@29 110
Chris@29 111 tmp = vmulq_f32(z, vdupq_n_f32(0.5f));
Chris@29 112 y = vsubq_f32(y, tmp);
Chris@29 113
Chris@29 114 tmp = vmulq_f32(e, vdupq_n_f32(c_cephes_log_q2));
Chris@29 115 x = vaddq_f32(x, y);
Chris@29 116 x = vaddq_f32(x, tmp);
Chris@29 117 x = vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(x), invalid_mask)); // negative arg will be NAN
Chris@29 118 return x;
Chris@29 119 }
Chris@29 120
Chris@29 121 #define c_exp_hi 88.3762626647949f
Chris@29 122 #define c_exp_lo -88.3762626647949f
Chris@29 123
Chris@29 124 #define c_cephes_LOG2EF 1.44269504088896341
Chris@29 125 #define c_cephes_exp_C1 0.693359375
Chris@29 126 #define c_cephes_exp_C2 -2.12194440e-4
Chris@29 127
Chris@29 128 #define c_cephes_exp_p0 1.9875691500E-4
Chris@29 129 #define c_cephes_exp_p1 1.3981999507E-3
Chris@29 130 #define c_cephes_exp_p2 8.3334519073E-3
Chris@29 131 #define c_cephes_exp_p3 4.1665795894E-2
Chris@29 132 #define c_cephes_exp_p4 1.6666665459E-1
Chris@29 133 #define c_cephes_exp_p5 5.0000001201E-1
Chris@29 134
Chris@29 135 /* exp() computed for 4 float at once */
Chris@29 136 v4sf exp_ps(v4sf x) {
Chris@29 137 v4sf tmp, fx;
Chris@29 138
Chris@29 139 v4sf one = vdupq_n_f32(1);
Chris@29 140 x = vminq_f32(x, vdupq_n_f32(c_exp_hi));
Chris@29 141 x = vmaxq_f32(x, vdupq_n_f32(c_exp_lo));
Chris@29 142
Chris@29 143 /* express exp(x) as exp(g + n*log(2)) */
Chris@29 144 fx = vmlaq_f32(vdupq_n_f32(0.5f), x, vdupq_n_f32(c_cephes_LOG2EF));
Chris@29 145
Chris@29 146 /* perform a floorf */
Chris@29 147 tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
Chris@29 148
Chris@29 149 /* if greater, substract 1 */
Chris@29 150 v4su mask = vcgtq_f32(tmp, fx);
Chris@29 151 mask = vandq_u32(mask, vreinterpretq_u32_f32(one));
Chris@29 152
Chris@29 153
Chris@29 154 fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
Chris@29 155
Chris@29 156 tmp = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C1));
Chris@29 157 v4sf z = vmulq_f32(fx, vdupq_n_f32(c_cephes_exp_C2));
Chris@29 158 x = vsubq_f32(x, tmp);
Chris@29 159 x = vsubq_f32(x, z);
Chris@29 160
Chris@29 161 static const float32_t cephes_exp_p[6] = { c_cephes_exp_p0, c_cephes_exp_p1, c_cephes_exp_p2, c_cephes_exp_p3, c_cephes_exp_p4, c_cephes_exp_p5 };
Chris@29 162 v4sf y = vld1q_dup_f32(cephes_exp_p+0);
Chris@29 163 v4sf c1 = vld1q_dup_f32(cephes_exp_p+1);
Chris@29 164 v4sf c2 = vld1q_dup_f32(cephes_exp_p+2);
Chris@29 165 v4sf c3 = vld1q_dup_f32(cephes_exp_p+3);
Chris@29 166 v4sf c4 = vld1q_dup_f32(cephes_exp_p+4);
Chris@29 167 v4sf c5 = vld1q_dup_f32(cephes_exp_p+5);
Chris@29 168
Chris@29 169 y = vmulq_f32(y, x);
Chris@29 170 z = vmulq_f32(x,x);
Chris@29 171 y = vaddq_f32(y, c1);
Chris@29 172 y = vmulq_f32(y, x);
Chris@29 173 y = vaddq_f32(y, c2);
Chris@29 174 y = vmulq_f32(y, x);
Chris@29 175 y = vaddq_f32(y, c3);
Chris@29 176 y = vmulq_f32(y, x);
Chris@29 177 y = vaddq_f32(y, c4);
Chris@29 178 y = vmulq_f32(y, x);
Chris@29 179 y = vaddq_f32(y, c5);
Chris@29 180
Chris@29 181 y = vmulq_f32(y, z);
Chris@29 182 y = vaddq_f32(y, x);
Chris@29 183 y = vaddq_f32(y, one);
Chris@29 184
Chris@29 185 /* build 2^n */
Chris@29 186 int32x4_t mm;
Chris@29 187 mm = vcvtq_s32_f32(fx);
Chris@29 188 mm = vaddq_s32(mm, vdupq_n_s32(0x7f));
Chris@29 189 mm = vshlq_n_s32(mm, 23);
Chris@29 190 v4sf pow2n = vreinterpretq_f32_s32(mm);
Chris@29 191
Chris@29 192 y = vmulq_f32(y, pow2n);
Chris@29 193 return y;
Chris@29 194 }
Chris@29 195
Chris@29 196 #define c_minus_cephes_DP1 -0.78515625
Chris@29 197 #define c_minus_cephes_DP2 -2.4187564849853515625e-4
Chris@29 198 #define c_minus_cephes_DP3 -3.77489497744594108e-8
Chris@29 199 #define c_sincof_p0 -1.9515295891E-4
Chris@29 200 #define c_sincof_p1 8.3321608736E-3
Chris@29 201 #define c_sincof_p2 -1.6666654611E-1
Chris@29 202 #define c_coscof_p0 2.443315711809948E-005
Chris@29 203 #define c_coscof_p1 -1.388731625493765E-003
Chris@29 204 #define c_coscof_p2 4.166664568298827E-002
Chris@29 205 #define c_cephes_FOPI 1.27323954473516 // 4 / M_PI
Chris@29 206
Chris@29 207 /* evaluation of 4 sines & cosines at once.
Chris@29 208
Chris@29 209 The code is the exact rewriting of the cephes sinf function.
Chris@29 210 Precision is excellent as long as x < 8192 (I did not bother to
Chris@29 211 take into account the special handling they have for greater values
Chris@29 212 -- it does not return garbage for arguments over 8192, though, but
Chris@29 213 the extra precision is missing).
Chris@29 214
Chris@29 215 Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
Chris@29 216 surprising but correct result.
Chris@29 217
Chris@29 218 Note also that when you compute sin(x), cos(x) is available at
Chris@29 219 almost no extra price so both sin_ps and cos_ps make use of
Chris@29 220 sincos_ps..
Chris@29 221 */
Chris@29 222 void sincos_ps(v4sf x, v4sf *ysin, v4sf *ycos) { // any x
Chris@29 223 v4sf xmm1, xmm2, xmm3, y;
Chris@29 224
Chris@29 225 v4su emm2;
Chris@29 226
Chris@29 227 v4su sign_mask_sin, sign_mask_cos;
Chris@29 228 sign_mask_sin = vcltq_f32(x, vdupq_n_f32(0));
Chris@29 229 x = vabsq_f32(x);
Chris@29 230
Chris@29 231 /* scale by 4/Pi */
Chris@29 232 y = vmulq_f32(x, vdupq_n_f32(c_cephes_FOPI));
Chris@29 233
Chris@29 234 /* store the integer part of y in mm0 */
Chris@29 235 emm2 = vcvtq_u32_f32(y);
Chris@29 236 /* j=(j+1) & (~1) (see the cephes sources) */
Chris@29 237 emm2 = vaddq_u32(emm2, vdupq_n_u32(1));
Chris@29 238 emm2 = vandq_u32(emm2, vdupq_n_u32(~1));
Chris@29 239 y = vcvtq_f32_u32(emm2);
Chris@29 240
Chris@29 241 /* get the polynom selection mask
Chris@29 242 there is one polynom for 0 <= x <= Pi/4
Chris@29 243 and another one for Pi/4<x<=Pi/2
Chris@29 244
Chris@29 245 Both branches will be computed.
Chris@29 246 */
Chris@29 247 v4su poly_mask = vtstq_u32(emm2, vdupq_n_u32(2));
Chris@29 248
Chris@29 249 /* The magic pass: "Extended precision modular arithmetic"
Chris@29 250 x = ((x - y * DP1) - y * DP2) - y * DP3; */
Chris@29 251 xmm1 = vmulq_n_f32(y, c_minus_cephes_DP1);
Chris@29 252 xmm2 = vmulq_n_f32(y, c_minus_cephes_DP2);
Chris@29 253 xmm3 = vmulq_n_f32(y, c_minus_cephes_DP3);
Chris@29 254 x = vaddq_f32(x, xmm1);
Chris@29 255 x = vaddq_f32(x, xmm2);
Chris@29 256 x = vaddq_f32(x, xmm3);
Chris@29 257
Chris@29 258 sign_mask_sin = veorq_u32(sign_mask_sin, vtstq_u32(emm2, vdupq_n_u32(4)));
Chris@29 259 sign_mask_cos = vtstq_u32(vsubq_u32(emm2, vdupq_n_u32(2)), vdupq_n_u32(4));
Chris@29 260
Chris@29 261 /* Evaluate the first polynom (0 <= x <= Pi/4) in y1,
Chris@29 262 and the second polynom (Pi/4 <= x <= 0) in y2 */
Chris@29 263 v4sf z = vmulq_f32(x,x);
Chris@29 264 v4sf y1, y2;
Chris@29 265
Chris@29 266 y1 = vmulq_n_f32(z, c_coscof_p0);
Chris@29 267 y2 = vmulq_n_f32(z, c_sincof_p0);
Chris@29 268 y1 = vaddq_f32(y1, vdupq_n_f32(c_coscof_p1));
Chris@29 269 y2 = vaddq_f32(y2, vdupq_n_f32(c_sincof_p1));
Chris@29 270 y1 = vmulq_f32(y1, z);
Chris@29 271 y2 = vmulq_f32(y2, z);
Chris@29 272 y1 = vaddq_f32(y1, vdupq_n_f32(c_coscof_p2));
Chris@29 273 y2 = vaddq_f32(y2, vdupq_n_f32(c_sincof_p2));
Chris@29 274 y1 = vmulq_f32(y1, z);
Chris@29 275 y2 = vmulq_f32(y2, z);
Chris@29 276 y1 = vmulq_f32(y1, z);
Chris@29 277 y2 = vmulq_f32(y2, x);
Chris@29 278 y1 = vsubq_f32(y1, vmulq_f32(z, vdupq_n_f32(0.5f)));
Chris@29 279 y2 = vaddq_f32(y2, x);
Chris@29 280 y1 = vaddq_f32(y1, vdupq_n_f32(1));
Chris@29 281
Chris@29 282 /* select the correct result from the two polynoms */
Chris@29 283 v4sf ys = vbslq_f32(poly_mask, y1, y2);
Chris@29 284 v4sf yc = vbslq_f32(poly_mask, y2, y1);
Chris@29 285 *ysin = vbslq_f32(sign_mask_sin, vnegq_f32(ys), ys);
Chris@29 286 *ycos = vbslq_f32(sign_mask_cos, yc, vnegq_f32(yc));
Chris@29 287 }
Chris@29 288
Chris@29 289 v4sf sin_ps(v4sf x) {
Chris@29 290 v4sf ysin, ycos;
Chris@29 291 sincos_ps(x, &ysin, &ycos);
Chris@29 292 return ysin;
Chris@29 293 }
Chris@29 294
Chris@29 295 v4sf cos_ps(v4sf x) {
Chris@29 296 v4sf ysin, ycos;
Chris@29 297 sincos_ps(x, &ysin, &ycos);
Chris@29 298 return ycos;
Chris@29 299 }
Chris@29 300
Chris@29 301