annotate src/opus-1.3/silk/arm/LPC_inv_pred_gain_neon_intr.c @ 169:223a55898ab9 tip default

Add null config files
author Chris Cannam <cannam@all-day-breakfast.com>
date Mon, 02 Mar 2020 14:03:47 +0000
parents 4664ac0c1032
children
rev   line source
cannam@154 1 /***********************************************************************
cannam@154 2 Copyright (c) 2017 Google Inc.
cannam@154 3 Redistribution and use in source and binary forms, with or without
cannam@154 4 modification, are permitted provided that the following conditions
cannam@154 5 are met:
cannam@154 6 - Redistributions of source code must retain the above copyright notice,
cannam@154 7 this list of conditions and the following disclaimer.
cannam@154 8 - Redistributions in binary form must reproduce the above copyright
cannam@154 9 notice, this list of conditions and the following disclaimer in the
cannam@154 10 documentation and/or other materials provided with the distribution.
cannam@154 11 - Neither the name of Internet Society, IETF or IETF Trust, nor the
cannam@154 12 names of specific contributors, may be used to endorse or promote
cannam@154 13 products derived from this software without specific prior written
cannam@154 14 permission.
cannam@154 15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
cannam@154 16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
cannam@154 17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
cannam@154 18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
cannam@154 19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
cannam@154 20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
cannam@154 21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
cannam@154 22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
cannam@154 23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
cannam@154 24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
cannam@154 25 POSSIBILITY OF SUCH DAMAGE.
cannam@154 26 ***********************************************************************/
cannam@154 27
cannam@154 28 #ifdef HAVE_CONFIG_H
cannam@154 29 #include "config.h"
cannam@154 30 #endif
cannam@154 31
cannam@154 32 #include <arm_neon.h>
cannam@154 33 #include "SigProc_FIX.h"
cannam@154 34 #include "define.h"
cannam@154 35
cannam@154 36 #define QA 24
cannam@154 37 #define A_LIMIT SILK_FIX_CONST( 0.99975, QA )
cannam@154 38
cannam@154 39 #define MUL32_FRAC_Q(a32, b32, Q) ((opus_int32)(silk_RSHIFT_ROUND64(silk_SMULL(a32, b32), Q)))
cannam@154 40
cannam@154 41 /* The difficulty is how to judge a 64-bit signed integer tmp64 is 32-bit overflowed,
cannam@154 42 * since NEON has no 64-bit min, max or comparison instructions.
cannam@154 43 * A failed idea is to compare the results of vmovn(tmp64) and vqmovn(tmp64) whether they are equal or not.
cannam@154 44 * However, this idea fails when the tmp64 is something like 0xFFFFFFF980000000.
cannam@154 45 * Here we know that mult2Q >= 1, so the highest bit (bit 63, sign bit) of tmp64 must equal to bit 62.
cannam@154 46 * tmp64 was shifted left by 1 and we got tmp64'. If high_half(tmp64') != 0 and high_half(tmp64') != -1,
cannam@154 47 * then we know that bit 31 to bit 63 of tmp64 can not all be the sign bit, and therefore tmp64 is 32-bit overflowed.
cannam@154 48 * That is, we judge if tmp64' > 0x00000000FFFFFFFF, or tmp64' <= 0xFFFFFFFF00000000.
cannam@154 49 * We use narrowing shift right 31 bits to tmp32' to save data bandwidth and instructions.
cannam@154 50 * That is, we judge if tmp32' > 0x00000000, or tmp32' <= 0xFFFFFFFF.
cannam@154 51 */
cannam@154 52
cannam@154 53 /* Compute inverse of LPC prediction gain, and */
cannam@154 54 /* test if LPC coefficients are stable (all poles within unit circle) */
cannam@154 55 static OPUS_INLINE opus_int32 LPC_inverse_pred_gain_QA_neon( /* O Returns inverse prediction gain in energy domain, Q30 */
cannam@154 56 opus_int32 A_QA[ SILK_MAX_ORDER_LPC ], /* I Prediction coefficients */
cannam@154 57 const opus_int order /* I Prediction order */
cannam@154 58 )
cannam@154 59 {
cannam@154 60 opus_int k, n, mult2Q;
cannam@154 61 opus_int32 invGain_Q30, rc_Q31, rc_mult1_Q30, rc_mult2, tmp1, tmp2;
cannam@154 62 opus_int32 max, min;
cannam@154 63 int32x4_t max_s32x4, min_s32x4;
cannam@154 64 int32x2_t max_s32x2, min_s32x2;
cannam@154 65
cannam@154 66 max_s32x4 = vdupq_n_s32( silk_int32_MIN );
cannam@154 67 min_s32x4 = vdupq_n_s32( silk_int32_MAX );
cannam@154 68 invGain_Q30 = SILK_FIX_CONST( 1, 30 );
cannam@154 69 for( k = order - 1; k > 0; k-- ) {
cannam@154 70 int32x2_t rc_Q31_s32x2, rc_mult2_s32x2;
cannam@154 71 int64x2_t mult2Q_s64x2;
cannam@154 72
cannam@154 73 /* Check for stability */
cannam@154 74 if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) {
cannam@154 75 return 0;
cannam@154 76 }
cannam@154 77
cannam@154 78 /* Set RC equal to negated AR coef */
cannam@154 79 rc_Q31 = -silk_LSHIFT( A_QA[ k ], 31 - QA );
cannam@154 80
cannam@154 81 /* rc_mult1_Q30 range: [ 1 : 2^30 ] */
cannam@154 82 rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) );
cannam@154 83 silk_assert( rc_mult1_Q30 > ( 1 << 15 ) ); /* reduce A_LIMIT if fails */
cannam@154 84 silk_assert( rc_mult1_Q30 <= ( 1 << 30 ) );
cannam@154 85
cannam@154 86 /* Update inverse gain */
cannam@154 87 /* invGain_Q30 range: [ 0 : 2^30 ] */
cannam@154 88 invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 );
cannam@154 89 silk_assert( invGain_Q30 >= 0 );
cannam@154 90 silk_assert( invGain_Q30 <= ( 1 << 30 ) );
cannam@154 91 if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) {
cannam@154 92 return 0;
cannam@154 93 }
cannam@154 94
cannam@154 95 /* rc_mult2 range: [ 2^30 : silk_int32_MAX ] */
cannam@154 96 mult2Q = 32 - silk_CLZ32( silk_abs( rc_mult1_Q30 ) );
cannam@154 97 rc_mult2 = silk_INVERSE32_varQ( rc_mult1_Q30, mult2Q + 30 );
cannam@154 98
cannam@154 99 /* Update AR coefficient */
cannam@154 100 rc_Q31_s32x2 = vdup_n_s32( rc_Q31 );
cannam@154 101 mult2Q_s64x2 = vdupq_n_s64( -mult2Q );
cannam@154 102 rc_mult2_s32x2 = vdup_n_s32( rc_mult2 );
cannam@154 103
cannam@154 104 for( n = 0; n < ( ( k + 1 ) >> 1 ) - 3; n += 4 ) {
cannam@154 105 /* We always calculate extra elements of A_QA buffer when ( k % 4 ) != 0, to take the advantage of SIMD parallelization. */
cannam@154 106 int32x4_t tmp1_s32x4, tmp2_s32x4, t0_s32x4, t1_s32x4, s0_s32x4, s1_s32x4, t_QA0_s32x4, t_QA1_s32x4;
cannam@154 107 int64x2_t t0_s64x2, t1_s64x2, t2_s64x2, t3_s64x2;
cannam@154 108 tmp1_s32x4 = vld1q_s32( A_QA + n );
cannam@154 109 tmp2_s32x4 = vld1q_s32( A_QA + k - n - 4 );
cannam@154 110 tmp2_s32x4 = vrev64q_s32( tmp2_s32x4 );
cannam@154 111 tmp2_s32x4 = vcombine_s32( vget_high_s32( tmp2_s32x4 ), vget_low_s32( tmp2_s32x4 ) );
cannam@154 112 t0_s32x4 = vqrdmulhq_lane_s32( tmp2_s32x4, rc_Q31_s32x2, 0 );
cannam@154 113 t1_s32x4 = vqrdmulhq_lane_s32( tmp1_s32x4, rc_Q31_s32x2, 0 );
cannam@154 114 t_QA0_s32x4 = vqsubq_s32( tmp1_s32x4, t0_s32x4 );
cannam@154 115 t_QA1_s32x4 = vqsubq_s32( tmp2_s32x4, t1_s32x4 );
cannam@154 116 t0_s64x2 = vmull_s32( vget_low_s32 ( t_QA0_s32x4 ), rc_mult2_s32x2 );
cannam@154 117 t1_s64x2 = vmull_s32( vget_high_s32( t_QA0_s32x4 ), rc_mult2_s32x2 );
cannam@154 118 t2_s64x2 = vmull_s32( vget_low_s32 ( t_QA1_s32x4 ), rc_mult2_s32x2 );
cannam@154 119 t3_s64x2 = vmull_s32( vget_high_s32( t_QA1_s32x4 ), rc_mult2_s32x2 );
cannam@154 120 t0_s64x2 = vrshlq_s64( t0_s64x2, mult2Q_s64x2 );
cannam@154 121 t1_s64x2 = vrshlq_s64( t1_s64x2, mult2Q_s64x2 );
cannam@154 122 t2_s64x2 = vrshlq_s64( t2_s64x2, mult2Q_s64x2 );
cannam@154 123 t3_s64x2 = vrshlq_s64( t3_s64x2, mult2Q_s64x2 );
cannam@154 124 t0_s32x4 = vcombine_s32( vmovn_s64( t0_s64x2 ), vmovn_s64( t1_s64x2 ) );
cannam@154 125 t1_s32x4 = vcombine_s32( vmovn_s64( t2_s64x2 ), vmovn_s64( t3_s64x2 ) );
cannam@154 126 s0_s32x4 = vcombine_s32( vshrn_n_s64( t0_s64x2, 31 ), vshrn_n_s64( t1_s64x2, 31 ) );
cannam@154 127 s1_s32x4 = vcombine_s32( vshrn_n_s64( t2_s64x2, 31 ), vshrn_n_s64( t3_s64x2, 31 ) );
cannam@154 128 max_s32x4 = vmaxq_s32( max_s32x4, s0_s32x4 );
cannam@154 129 min_s32x4 = vminq_s32( min_s32x4, s0_s32x4 );
cannam@154 130 max_s32x4 = vmaxq_s32( max_s32x4, s1_s32x4 );
cannam@154 131 min_s32x4 = vminq_s32( min_s32x4, s1_s32x4 );
cannam@154 132 t1_s32x4 = vrev64q_s32( t1_s32x4 );
cannam@154 133 t1_s32x4 = vcombine_s32( vget_high_s32( t1_s32x4 ), vget_low_s32( t1_s32x4 ) );
cannam@154 134 vst1q_s32( A_QA + n, t0_s32x4 );
cannam@154 135 vst1q_s32( A_QA + k - n - 4, t1_s32x4 );
cannam@154 136 }
cannam@154 137 for( ; n < (k + 1) >> 1; n++ ) {
cannam@154 138 opus_int64 tmp64;
cannam@154 139 tmp1 = A_QA[ n ];
cannam@154 140 tmp2 = A_QA[ k - n - 1 ];
cannam@154 141 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp1,
cannam@154 142 MUL32_FRAC_Q( tmp2, rc_Q31, 31 ) ), rc_mult2 ), mult2Q);
cannam@154 143 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) {
cannam@154 144 return 0;
cannam@154 145 }
cannam@154 146 A_QA[ n ] = ( opus_int32 )tmp64;
cannam@154 147 tmp64 = silk_RSHIFT_ROUND64( silk_SMULL( silk_SUB_SAT32(tmp2,
cannam@154 148 MUL32_FRAC_Q( tmp1, rc_Q31, 31 ) ), rc_mult2), mult2Q);
cannam@154 149 if( tmp64 > silk_int32_MAX || tmp64 < silk_int32_MIN ) {
cannam@154 150 return 0;
cannam@154 151 }
cannam@154 152 A_QA[ k - n - 1 ] = ( opus_int32 )tmp64;
cannam@154 153 }
cannam@154 154 }
cannam@154 155
cannam@154 156 /* Check for stability */
cannam@154 157 if( ( A_QA[ k ] > A_LIMIT ) || ( A_QA[ k ] < -A_LIMIT ) ) {
cannam@154 158 return 0;
cannam@154 159 }
cannam@154 160
cannam@154 161 max_s32x2 = vmax_s32( vget_low_s32( max_s32x4 ), vget_high_s32( max_s32x4 ) );
cannam@154 162 min_s32x2 = vmin_s32( vget_low_s32( min_s32x4 ), vget_high_s32( min_s32x4 ) );
cannam@154 163 max_s32x2 = vmax_s32( max_s32x2, vreinterpret_s32_s64( vshr_n_s64( vreinterpret_s64_s32( max_s32x2 ), 32 ) ) );
cannam@154 164 min_s32x2 = vmin_s32( min_s32x2, vreinterpret_s32_s64( vshr_n_s64( vreinterpret_s64_s32( min_s32x2 ), 32 ) ) );
cannam@154 165 max = vget_lane_s32( max_s32x2, 0 );
cannam@154 166 min = vget_lane_s32( min_s32x2, 0 );
cannam@154 167 if( ( max > 0 ) || ( min < -1 ) ) {
cannam@154 168 return 0;
cannam@154 169 }
cannam@154 170
cannam@154 171 /* Set RC equal to negated AR coef */
cannam@154 172 rc_Q31 = -silk_LSHIFT( A_QA[ 0 ], 31 - QA );
cannam@154 173
cannam@154 174 /* Range: [ 1 : 2^30 ] */
cannam@154 175 rc_mult1_Q30 = silk_SUB32( SILK_FIX_CONST( 1, 30 ), silk_SMMUL( rc_Q31, rc_Q31 ) );
cannam@154 176
cannam@154 177 /* Update inverse gain */
cannam@154 178 /* Range: [ 0 : 2^30 ] */
cannam@154 179 invGain_Q30 = silk_LSHIFT( silk_SMMUL( invGain_Q30, rc_mult1_Q30 ), 2 );
cannam@154 180 silk_assert( invGain_Q30 >= 0 );
cannam@154 181 silk_assert( invGain_Q30 <= ( 1 << 30 ) );
cannam@154 182 if( invGain_Q30 < SILK_FIX_CONST( 1.0f / MAX_PREDICTION_POWER_GAIN, 30 ) ) {
cannam@154 183 return 0;
cannam@154 184 }
cannam@154 185
cannam@154 186 return invGain_Q30;
cannam@154 187 }
cannam@154 188
cannam@154 189 /* For input in Q12 domain */
cannam@154 190 opus_int32 silk_LPC_inverse_pred_gain_neon( /* O Returns inverse prediction gain in energy domain, Q30 */
cannam@154 191 const opus_int16 *A_Q12, /* I Prediction coefficients, Q12 [order] */
cannam@154 192 const opus_int order /* I Prediction order */
cannam@154 193 )
cannam@154 194 {
cannam@154 195 #ifdef OPUS_CHECK_ASM
cannam@154 196 const opus_int32 invGain_Q30_c = silk_LPC_inverse_pred_gain_c( A_Q12, order );
cannam@154 197 #endif
cannam@154 198
cannam@154 199 opus_int32 invGain_Q30;
cannam@154 200 if( ( SILK_MAX_ORDER_LPC != 24 ) || ( order & 1 )) {
cannam@154 201 invGain_Q30 = silk_LPC_inverse_pred_gain_c( A_Q12, order );
cannam@154 202 }
cannam@154 203 else {
cannam@154 204 opus_int32 Atmp_QA[ SILK_MAX_ORDER_LPC ];
cannam@154 205 opus_int32 DC_resp;
cannam@154 206 int16x8_t t0_s16x8, t1_s16x8, t2_s16x8;
cannam@154 207 int32x4_t t0_s32x4;
cannam@154 208 const opus_int leftover = order & 7;
cannam@154 209
cannam@154 210 /* Increase Q domain of the AR coefficients */
cannam@154 211 t0_s16x8 = vld1q_s16( A_Q12 + 0 );
cannam@154 212 t1_s16x8 = vld1q_s16( A_Q12 + 8 );
cannam@154 213 t2_s16x8 = vld1q_s16( A_Q12 + 16 );
cannam@154 214 t0_s32x4 = vpaddlq_s16( t0_s16x8 );
cannam@154 215
cannam@154 216 switch( order - leftover )
cannam@154 217 {
cannam@154 218 case 24:
cannam@154 219 t0_s32x4 = vpadalq_s16( t0_s32x4, t2_s16x8 );
cannam@154 220 /* FALLTHROUGH */
cannam@154 221
cannam@154 222 case 16:
cannam@154 223 t0_s32x4 = vpadalq_s16( t0_s32x4, t1_s16x8 );
cannam@154 224 vst1q_s32( Atmp_QA + 16, vshll_n_s16( vget_low_s16 ( t2_s16x8 ), QA - 12 ) );
cannam@154 225 vst1q_s32( Atmp_QA + 20, vshll_n_s16( vget_high_s16( t2_s16x8 ), QA - 12 ) );
cannam@154 226 /* FALLTHROUGH */
cannam@154 227
cannam@154 228 case 8:
cannam@154 229 {
cannam@154 230 const int32x2_t t_s32x2 = vpadd_s32( vget_low_s32( t0_s32x4 ), vget_high_s32( t0_s32x4 ) );
cannam@154 231 const int64x1_t t_s64x1 = vpaddl_s32( t_s32x2 );
cannam@154 232 DC_resp = vget_lane_s32( vreinterpret_s32_s64( t_s64x1 ), 0 );
cannam@154 233 vst1q_s32( Atmp_QA + 8, vshll_n_s16( vget_low_s16 ( t1_s16x8 ), QA - 12 ) );
cannam@154 234 vst1q_s32( Atmp_QA + 12, vshll_n_s16( vget_high_s16( t1_s16x8 ), QA - 12 ) );
cannam@154 235 }
cannam@154 236 break;
cannam@154 237
cannam@154 238 default:
cannam@154 239 DC_resp = 0;
cannam@154 240 break;
cannam@154 241 }
cannam@154 242 A_Q12 += order - leftover;
cannam@154 243
cannam@154 244 switch( leftover )
cannam@154 245 {
cannam@154 246 case 6:
cannam@154 247 DC_resp += (opus_int32)A_Q12[ 5 ];
cannam@154 248 DC_resp += (opus_int32)A_Q12[ 4 ];
cannam@154 249 /* FALLTHROUGH */
cannam@154 250
cannam@154 251 case 4:
cannam@154 252 DC_resp += (opus_int32)A_Q12[ 3 ];
cannam@154 253 DC_resp += (opus_int32)A_Q12[ 2 ];
cannam@154 254 /* FALLTHROUGH */
cannam@154 255
cannam@154 256 case 2:
cannam@154 257 DC_resp += (opus_int32)A_Q12[ 1 ];
cannam@154 258 DC_resp += (opus_int32)A_Q12[ 0 ];
cannam@154 259 /* FALLTHROUGH */
cannam@154 260
cannam@154 261 default:
cannam@154 262 break;
cannam@154 263 }
cannam@154 264
cannam@154 265 /* If the DC is unstable, we don't even need to do the full calculations */
cannam@154 266 if( DC_resp >= 4096 ) {
cannam@154 267 invGain_Q30 = 0;
cannam@154 268 } else {
cannam@154 269 vst1q_s32( Atmp_QA + 0, vshll_n_s16( vget_low_s16 ( t0_s16x8 ), QA - 12 ) );
cannam@154 270 vst1q_s32( Atmp_QA + 4, vshll_n_s16( vget_high_s16( t0_s16x8 ), QA - 12 ) );
cannam@154 271 invGain_Q30 = LPC_inverse_pred_gain_QA_neon( Atmp_QA, order );
cannam@154 272 }
cannam@154 273 }
cannam@154 274
cannam@154 275 #ifdef OPUS_CHECK_ASM
cannam@154 276 silk_assert( invGain_Q30_c == invGain_Q30 );
cannam@154 277 #endif
cannam@154 278
cannam@154 279 return invGain_Q30;
cannam@154 280 }