annotate src/opus-1.3/celt/arm/celt_neon_intr.c @ 168:ceec0dd9ec9c

Replace these with versions built using an older toolset (so as to avoid ABI compatibilities when linking on Ubuntu 14.04 for packaging purposes)
author Chris Cannam <cannam@all-day-breakfast.com>
date Fri, 07 Feb 2020 11:51:13 +0000
parents 4664ac0c1032
children
rev   line source
cannam@154 1 /* Copyright (c) 2014-2015 Xiph.Org Foundation
cannam@154 2 Written by Viswanath Puttagunta */
cannam@154 3 /**
cannam@154 4 @file celt_neon_intr.c
cannam@154 5 @brief ARM Neon Intrinsic optimizations for celt
cannam@154 6 */
cannam@154 7
cannam@154 8 /*
cannam@154 9 Redistribution and use in source and binary forms, with or without
cannam@154 10 modification, are permitted provided that the following conditions
cannam@154 11 are met:
cannam@154 12
cannam@154 13 - Redistributions of source code must retain the above copyright
cannam@154 14 notice, this list of conditions and the following disclaimer.
cannam@154 15
cannam@154 16 - Redistributions in binary form must reproduce the above copyright
cannam@154 17 notice, this list of conditions and the following disclaimer in the
cannam@154 18 documentation and/or other materials provided with the distribution.
cannam@154 19
cannam@154 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
cannam@154 21 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
cannam@154 22 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
cannam@154 23 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
cannam@154 24 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
cannam@154 25 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
cannam@154 26 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
cannam@154 27 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
cannam@154 28 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
cannam@154 29 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
cannam@154 30 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
cannam@154 31 */
cannam@154 32
cannam@154 33 #ifdef HAVE_CONFIG_H
cannam@154 34 #include "config.h"
cannam@154 35 #endif
cannam@154 36
cannam@154 37 #include <arm_neon.h>
cannam@154 38 #include "../pitch.h"
cannam@154 39
cannam@154 40 #if defined(FIXED_POINT)
cannam@154 41 void xcorr_kernel_neon_fixed(const opus_val16 * x, const opus_val16 * y, opus_val32 sum[4], int len)
cannam@154 42 {
cannam@154 43 int j;
cannam@154 44 int32x4_t a = vld1q_s32(sum);
cannam@154 45 /* Load y[0...3] */
cannam@154 46 /* This requires len>0 to always be valid (which we assert in the C code). */
cannam@154 47 int16x4_t y0 = vld1_s16(y);
cannam@154 48 y += 4;
cannam@154 49
cannam@154 50 for (j = 0; j + 8 <= len; j += 8)
cannam@154 51 {
cannam@154 52 /* Load x[0...7] */
cannam@154 53 int16x8_t xx = vld1q_s16(x);
cannam@154 54 int16x4_t x0 = vget_low_s16(xx);
cannam@154 55 int16x4_t x4 = vget_high_s16(xx);
cannam@154 56 /* Load y[4...11] */
cannam@154 57 int16x8_t yy = vld1q_s16(y);
cannam@154 58 int16x4_t y4 = vget_low_s16(yy);
cannam@154 59 int16x4_t y8 = vget_high_s16(yy);
cannam@154 60 int32x4_t a0 = vmlal_lane_s16(a, y0, x0, 0);
cannam@154 61 int32x4_t a1 = vmlal_lane_s16(a0, y4, x4, 0);
cannam@154 62
cannam@154 63 int16x4_t y1 = vext_s16(y0, y4, 1);
cannam@154 64 int16x4_t y5 = vext_s16(y4, y8, 1);
cannam@154 65 int32x4_t a2 = vmlal_lane_s16(a1, y1, x0, 1);
cannam@154 66 int32x4_t a3 = vmlal_lane_s16(a2, y5, x4, 1);
cannam@154 67
cannam@154 68 int16x4_t y2 = vext_s16(y0, y4, 2);
cannam@154 69 int16x4_t y6 = vext_s16(y4, y8, 2);
cannam@154 70 int32x4_t a4 = vmlal_lane_s16(a3, y2, x0, 2);
cannam@154 71 int32x4_t a5 = vmlal_lane_s16(a4, y6, x4, 2);
cannam@154 72
cannam@154 73 int16x4_t y3 = vext_s16(y0, y4, 3);
cannam@154 74 int16x4_t y7 = vext_s16(y4, y8, 3);
cannam@154 75 int32x4_t a6 = vmlal_lane_s16(a5, y3, x0, 3);
cannam@154 76 int32x4_t a7 = vmlal_lane_s16(a6, y7, x4, 3);
cannam@154 77
cannam@154 78 y0 = y8;
cannam@154 79 a = a7;
cannam@154 80 x += 8;
cannam@154 81 y += 8;
cannam@154 82 }
cannam@154 83
cannam@154 84 for (; j < len; j++)
cannam@154 85 {
cannam@154 86 int16x4_t x0 = vld1_dup_s16(x); /* load next x */
cannam@154 87 int32x4_t a0 = vmlal_s16(a, y0, x0);
cannam@154 88
cannam@154 89 int16x4_t y4 = vld1_dup_s16(y); /* load next y */
cannam@154 90 y0 = vext_s16(y0, y4, 1);
cannam@154 91 a = a0;
cannam@154 92 x++;
cannam@154 93 y++;
cannam@154 94 }
cannam@154 95
cannam@154 96 vst1q_s32(sum, a);
cannam@154 97 }
cannam@154 98
cannam@154 99 #else
cannam@154 100 /*
cannam@154 101 * Function: xcorr_kernel_neon_float
cannam@154 102 * ---------------------------------
cannam@154 103 * Computes 4 correlation values and stores them in sum[4]
cannam@154 104 */
cannam@154 105 static void xcorr_kernel_neon_float(const float32_t *x, const float32_t *y,
cannam@154 106 float32_t sum[4], int len) {
cannam@154 107 float32x4_t YY[3];
cannam@154 108 float32x4_t YEXT[3];
cannam@154 109 float32x4_t XX[2];
cannam@154 110 float32x2_t XX_2;
cannam@154 111 float32x4_t SUMM;
cannam@154 112 const float32_t *xi = x;
cannam@154 113 const float32_t *yi = y;
cannam@154 114
cannam@154 115 celt_assert(len>0);
cannam@154 116
cannam@154 117 YY[0] = vld1q_f32(yi);
cannam@154 118 SUMM = vdupq_n_f32(0);
cannam@154 119
cannam@154 120 /* Consume 8 elements in x vector and 12 elements in y
cannam@154 121 * vector. However, the 12'th element never really gets
cannam@154 122 * touched in this loop. So, if len == 8, then we only
cannam@154 123 * must access y[0] to y[10]. y[11] must not be accessed
cannam@154 124 * hence make sure len > 8 and not len >= 8
cannam@154 125 */
cannam@154 126 while (len > 8) {
cannam@154 127 yi += 4;
cannam@154 128 YY[1] = vld1q_f32(yi);
cannam@154 129 yi += 4;
cannam@154 130 YY[2] = vld1q_f32(yi);
cannam@154 131
cannam@154 132 XX[0] = vld1q_f32(xi);
cannam@154 133 xi += 4;
cannam@154 134 XX[1] = vld1q_f32(xi);
cannam@154 135 xi += 4;
cannam@154 136
cannam@154 137 SUMM = vmlaq_lane_f32(SUMM, YY[0], vget_low_f32(XX[0]), 0);
cannam@154 138 YEXT[0] = vextq_f32(YY[0], YY[1], 1);
cannam@154 139 SUMM = vmlaq_lane_f32(SUMM, YEXT[0], vget_low_f32(XX[0]), 1);
cannam@154 140 YEXT[1] = vextq_f32(YY[0], YY[1], 2);
cannam@154 141 SUMM = vmlaq_lane_f32(SUMM, YEXT[1], vget_high_f32(XX[0]), 0);
cannam@154 142 YEXT[2] = vextq_f32(YY[0], YY[1], 3);
cannam@154 143 SUMM = vmlaq_lane_f32(SUMM, YEXT[2], vget_high_f32(XX[0]), 1);
cannam@154 144
cannam@154 145 SUMM = vmlaq_lane_f32(SUMM, YY[1], vget_low_f32(XX[1]), 0);
cannam@154 146 YEXT[0] = vextq_f32(YY[1], YY[2], 1);
cannam@154 147 SUMM = vmlaq_lane_f32(SUMM, YEXT[0], vget_low_f32(XX[1]), 1);
cannam@154 148 YEXT[1] = vextq_f32(YY[1], YY[2], 2);
cannam@154 149 SUMM = vmlaq_lane_f32(SUMM, YEXT[1], vget_high_f32(XX[1]), 0);
cannam@154 150 YEXT[2] = vextq_f32(YY[1], YY[2], 3);
cannam@154 151 SUMM = vmlaq_lane_f32(SUMM, YEXT[2], vget_high_f32(XX[1]), 1);
cannam@154 152
cannam@154 153 YY[0] = YY[2];
cannam@154 154 len -= 8;
cannam@154 155 }
cannam@154 156
cannam@154 157 /* Consume 4 elements in x vector and 8 elements in y
cannam@154 158 * vector. However, the 8'th element in y never really gets
cannam@154 159 * touched in this loop. So, if len == 4, then we only
cannam@154 160 * must access y[0] to y[6]. y[7] must not be accessed
cannam@154 161 * hence make sure len>4 and not len>=4
cannam@154 162 */
cannam@154 163 if (len > 4) {
cannam@154 164 yi += 4;
cannam@154 165 YY[1] = vld1q_f32(yi);
cannam@154 166
cannam@154 167 XX[0] = vld1q_f32(xi);
cannam@154 168 xi += 4;
cannam@154 169
cannam@154 170 SUMM = vmlaq_lane_f32(SUMM, YY[0], vget_low_f32(XX[0]), 0);
cannam@154 171 YEXT[0] = vextq_f32(YY[0], YY[1], 1);
cannam@154 172 SUMM = vmlaq_lane_f32(SUMM, YEXT[0], vget_low_f32(XX[0]), 1);
cannam@154 173 YEXT[1] = vextq_f32(YY[0], YY[1], 2);
cannam@154 174 SUMM = vmlaq_lane_f32(SUMM, YEXT[1], vget_high_f32(XX[0]), 0);
cannam@154 175 YEXT[2] = vextq_f32(YY[0], YY[1], 3);
cannam@154 176 SUMM = vmlaq_lane_f32(SUMM, YEXT[2], vget_high_f32(XX[0]), 1);
cannam@154 177
cannam@154 178 YY[0] = YY[1];
cannam@154 179 len -= 4;
cannam@154 180 }
cannam@154 181
cannam@154 182 while (--len > 0) {
cannam@154 183 XX_2 = vld1_dup_f32(xi++);
cannam@154 184 SUMM = vmlaq_lane_f32(SUMM, YY[0], XX_2, 0);
cannam@154 185 YY[0]= vld1q_f32(++yi);
cannam@154 186 }
cannam@154 187
cannam@154 188 XX_2 = vld1_dup_f32(xi);
cannam@154 189 SUMM = vmlaq_lane_f32(SUMM, YY[0], XX_2, 0);
cannam@154 190
cannam@154 191 vst1q_f32(sum, SUMM);
cannam@154 192 }
cannam@154 193
cannam@154 194 void celt_pitch_xcorr_float_neon(const opus_val16 *_x, const opus_val16 *_y,
cannam@154 195 opus_val32 *xcorr, int len, int max_pitch, int arch) {
cannam@154 196 int i;
cannam@154 197 (void)arch;
cannam@154 198 celt_assert(max_pitch > 0);
cannam@154 199 celt_sig_assert((((unsigned char *)_x-(unsigned char *)NULL)&3)==0);
cannam@154 200
cannam@154 201 for (i = 0; i < (max_pitch-3); i += 4) {
cannam@154 202 xcorr_kernel_neon_float((const float32_t *)_x, (const float32_t *)_y+i,
cannam@154 203 (float32_t *)xcorr+i, len);
cannam@154 204 }
cannam@154 205
cannam@154 206 /* In case max_pitch isn't a multiple of 4, do non-unrolled version. */
cannam@154 207 for (; i < max_pitch; i++) {
cannam@154 208 xcorr[i] = celt_inner_prod_neon(_x, _y+i, len);
cannam@154 209 }
cannam@154 210 }
cannam@154 211 #endif