Chris@69
|
1 /***********************************************************************
|
Chris@69
|
2 Copyright (c) 2017 Google Inc.
|
Chris@69
|
3 Redistribution and use in source and binary forms, with or without
|
Chris@69
|
4 modification, are permitted provided that the following conditions
|
Chris@69
|
5 are met:
|
Chris@69
|
6 - Redistributions of source code must retain the above copyright notice,
|
Chris@69
|
7 this list of conditions and the following disclaimer.
|
Chris@69
|
8 - Redistributions in binary form must reproduce the above copyright
|
Chris@69
|
9 notice, this list of conditions and the following disclaimer in the
|
Chris@69
|
10 documentation and/or other materials provided with the distribution.
|
Chris@69
|
11 - Neither the name of Internet Society, IETF or IETF Trust, nor the
|
Chris@69
|
12 names of specific contributors, may be used to endorse or promote
|
Chris@69
|
13 products derived from this software without specific prior written
|
Chris@69
|
14 permission.
|
Chris@69
|
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
Chris@69
|
16 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
Chris@69
|
17 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
Chris@69
|
18 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
Chris@69
|
19 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
Chris@69
|
20 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
Chris@69
|
21 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
Chris@69
|
22 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
Chris@69
|
23 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
Chris@69
|
24 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
Chris@69
|
25 POSSIBILITY OF SUCH DAMAGE.
|
Chris@69
|
26 ***********************************************************************/
|
Chris@69
|
27
|
Chris@69
|
28 #ifdef HAVE_CONFIG_H
|
Chris@69
|
29 #include "config.h"
|
Chris@69
|
30 #endif
|
Chris@69
|
31
|
Chris@69
|
32 #include <arm_neon.h>
|
Chris@69
|
33 #ifdef OPUS_CHECK_ASM
|
Chris@69
|
34 # include <string.h>
|
Chris@69
|
35 # include "stack_alloc.h"
|
Chris@69
|
36 #endif
|
Chris@69
|
37 #include "SigProc_FIX.h"
|
Chris@69
|
38
|
Chris@69
|
39 static inline void silk_biquad_alt_stride2_kernel( const int32x4_t A_L_s32x4, const int32x4_t A_U_s32x4, const int32x4_t B_Q28_s32x4, const int32x2_t t_s32x2, const int32x4_t in_s32x4, int32x4_t *S_s32x4, int32x2_t *out32_Q14_s32x2 )
|
Chris@69
|
40 {
|
Chris@69
|
41 int32x4_t t_s32x4, out32_Q14_s32x4;
|
Chris@69
|
42
|
Chris@69
|
43 *out32_Q14_s32x2 = vadd_s32( vget_low_s32( *S_s32x4 ), t_s32x2 ); /* silk_SMLAWB( S{0,1}, B_Q28[ 0 ], in{0,1} ) */
|
Chris@69
|
44 *S_s32x4 = vcombine_s32( vget_high_s32( *S_s32x4 ), vdup_n_s32( 0 ) ); /* S{0,1} = S{2,3}; S{2,3} = 0; */
|
Chris@69
|
45 *out32_Q14_s32x2 = vshl_n_s32( *out32_Q14_s32x2, 2 ); /* out32_Q14_{0,1} = silk_LSHIFT( silk_SMLAWB( S{0,1}, B_Q28[ 0 ], in{0,1} ), 2 ); */
|
Chris@69
|
46 out32_Q14_s32x4 = vcombine_s32( *out32_Q14_s32x2, *out32_Q14_s32x2 ); /* out32_Q14_{0,1,0,1} */
|
Chris@69
|
47 t_s32x4 = vqdmulhq_s32( out32_Q14_s32x4, A_L_s32x4 ); /* silk_SMULWB( out32_Q14_{0,1,0,1}, A{0,0,1,1}_L_Q28 ) */
|
Chris@69
|
48 *S_s32x4 = vrsraq_n_s32( *S_s32x4, t_s32x4, 14 ); /* S{0,1} = S{2,3} + silk_RSHIFT_ROUND(); S{2,3} = silk_RSHIFT_ROUND(); */
|
Chris@69
|
49 t_s32x4 = vqdmulhq_s32( out32_Q14_s32x4, A_U_s32x4 ); /* silk_SMULWB( out32_Q14_{0,1,0,1}, A{0,0,1,1}_U_Q28 ) */
|
Chris@69
|
50 *S_s32x4 = vaddq_s32( *S_s32x4, t_s32x4 ); /* S0 = silk_SMLAWB( S{0,1,2,3}, out32_Q14_{0,1,0,1}, A{0,0,1,1}_U_Q28 ); */
|
Chris@69
|
51 t_s32x4 = vqdmulhq_s32( in_s32x4, B_Q28_s32x4 ); /* silk_SMULWB( B_Q28[ {1,1,2,2} ], in{0,1,0,1} ) */
|
Chris@69
|
52 *S_s32x4 = vaddq_s32( *S_s32x4, t_s32x4 ); /* S0 = silk_SMLAWB( S0, B_Q28[ {1,1,2,2} ], in{0,1,0,1} ); */
|
Chris@69
|
53 }
|
Chris@69
|
54
|
Chris@69
|
55 void silk_biquad_alt_stride2_neon(
|
Chris@69
|
56 const opus_int16 *in, /* I input signal */
|
Chris@69
|
57 const opus_int32 *B_Q28, /* I MA coefficients [3] */
|
Chris@69
|
58 const opus_int32 *A_Q28, /* I AR coefficients [2] */
|
Chris@69
|
59 opus_int32 *S, /* I/O State vector [4] */
|
Chris@69
|
60 opus_int16 *out, /* O output signal */
|
Chris@69
|
61 const opus_int32 len /* I signal length (must be even) */
|
Chris@69
|
62 )
|
Chris@69
|
63 {
|
Chris@69
|
64 /* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */
|
Chris@69
|
65 opus_int k = 0;
|
Chris@69
|
66 const int32x2_t offset_s32x2 = vdup_n_s32( (1<<14) - 1 );
|
Chris@69
|
67 const int32x4_t offset_s32x4 = vcombine_s32( offset_s32x2, offset_s32x2 );
|
Chris@69
|
68 int16x4_t in_s16x4 = vdup_n_s16( 0 );
|
Chris@69
|
69 int16x4_t out_s16x4;
|
Chris@69
|
70 int32x2_t A_Q28_s32x2, A_L_s32x2, A_U_s32x2, B_Q28_s32x2, t_s32x2;
|
Chris@69
|
71 int32x4_t A_L_s32x4, A_U_s32x4, B_Q28_s32x4, S_s32x4, out32_Q14_s32x4;
|
Chris@69
|
72 int32x2x2_t t0_s32x2x2, t1_s32x2x2, t2_s32x2x2, S_s32x2x2;
|
Chris@69
|
73
|
Chris@69
|
74 #ifdef OPUS_CHECK_ASM
|
Chris@69
|
75 opus_int32 S_c[ 4 ];
|
Chris@69
|
76 VARDECL( opus_int16, out_c );
|
Chris@69
|
77 SAVE_STACK;
|
Chris@69
|
78 ALLOC( out_c, 2 * len, opus_int16 );
|
Chris@69
|
79
|
Chris@69
|
80 silk_memcpy( &S_c, S, sizeof( S_c ) );
|
Chris@69
|
81 silk_biquad_alt_stride2_c( in, B_Q28, A_Q28, S_c, out_c, len );
|
Chris@69
|
82 #endif
|
Chris@69
|
83
|
Chris@69
|
84 /* Negate A_Q28 values and split in two parts */
|
Chris@69
|
85 A_Q28_s32x2 = vld1_s32( A_Q28 );
|
Chris@69
|
86 A_Q28_s32x2 = vneg_s32( A_Q28_s32x2 );
|
Chris@69
|
87 A_L_s32x2 = vshl_n_s32( A_Q28_s32x2, 18 ); /* ( -A_Q28[] & 0x00003FFF ) << 18 */
|
Chris@69
|
88 A_L_s32x2 = vreinterpret_s32_u32( vshr_n_u32( vreinterpret_u32_s32( A_L_s32x2 ), 3 ) ); /* ( -A_Q28[] & 0x00003FFF ) << 15 */
|
Chris@69
|
89 A_U_s32x2 = vshr_n_s32( A_Q28_s32x2, 14 ); /* silk_RSHIFT( -A_Q28[], 14 ) */
|
Chris@69
|
90 A_U_s32x2 = vshl_n_s32( A_U_s32x2, 16 ); /* silk_RSHIFT( -A_Q28[], 14 ) << 16 (Clip two leading bits to conform to C function.) */
|
Chris@69
|
91 A_U_s32x2 = vshr_n_s32( A_U_s32x2, 1 ); /* silk_RSHIFT( -A_Q28[], 14 ) << 15 */
|
Chris@69
|
92
|
Chris@69
|
93 B_Q28_s32x2 = vld1_s32( B_Q28 );
|
Chris@69
|
94 t_s32x2 = vld1_s32( B_Q28 + 1 );
|
Chris@69
|
95 t0_s32x2x2 = vzip_s32( A_L_s32x2, A_L_s32x2 );
|
Chris@69
|
96 t1_s32x2x2 = vzip_s32( A_U_s32x2, A_U_s32x2 );
|
Chris@69
|
97 t2_s32x2x2 = vzip_s32( t_s32x2, t_s32x2 );
|
Chris@69
|
98 A_L_s32x4 = vcombine_s32( t0_s32x2x2.val[ 0 ], t0_s32x2x2.val[ 1 ] ); /* A{0,0,1,1}_L_Q28 */
|
Chris@69
|
99 A_U_s32x4 = vcombine_s32( t1_s32x2x2.val[ 0 ], t1_s32x2x2.val[ 1 ] ); /* A{0,0,1,1}_U_Q28 */
|
Chris@69
|
100 B_Q28_s32x4 = vcombine_s32( t2_s32x2x2.val[ 0 ], t2_s32x2x2.val[ 1 ] ); /* B_Q28[ {1,1,2,2} ] */
|
Chris@69
|
101 S_s32x4 = vld1q_s32( S ); /* S0 = S[ 0 ]; S3 = S[ 3 ]; */
|
Chris@69
|
102 S_s32x2x2 = vtrn_s32( vget_low_s32( S_s32x4 ), vget_high_s32( S_s32x4 ) ); /* S2 = S[ 1 ]; S1 = S[ 2 ]; */
|
Chris@69
|
103 S_s32x4 = vcombine_s32( S_s32x2x2.val[ 0 ], S_s32x2x2.val[ 1 ] );
|
Chris@69
|
104
|
Chris@69
|
105 for( ; k < len - 1; k += 2 ) {
|
Chris@69
|
106 int32x4_t in_s32x4[ 2 ], t_s32x4;
|
Chris@69
|
107 int32x2_t out32_Q14_s32x2[ 2 ];
|
Chris@69
|
108
|
Chris@69
|
109 /* S[ 2 * i + 0 ], S[ 2 * i + 1 ], S[ 2 * i + 2 ], S[ 2 * i + 3 ]: Q12 */
|
Chris@69
|
110 in_s16x4 = vld1_s16( &in[ 2 * k ] ); /* in{0,1,2,3} = in[ 2 * k + {0,1,2,3} ]; */
|
Chris@69
|
111 in_s32x4[ 0 ] = vshll_n_s16( in_s16x4, 15 ); /* in{0,1,2,3} << 15 */
|
Chris@69
|
112 t_s32x4 = vqdmulhq_lane_s32( in_s32x4[ 0 ], B_Q28_s32x2, 0 ); /* silk_SMULWB( B_Q28[ 0 ], in{0,1,2,3} ) */
|
Chris@69
|
113 in_s32x4[ 1 ] = vcombine_s32( vget_high_s32( in_s32x4[ 0 ] ), vget_high_s32( in_s32x4[ 0 ] ) ); /* in{2,3,2,3} << 15 */
|
Chris@69
|
114 in_s32x4[ 0 ] = vcombine_s32( vget_low_s32 ( in_s32x4[ 0 ] ), vget_low_s32 ( in_s32x4[ 0 ] ) ); /* in{0,1,0,1} << 15 */
|
Chris@69
|
115 silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, vget_low_s32 ( t_s32x4 ), in_s32x4[ 0 ], &S_s32x4, &out32_Q14_s32x2[ 0 ] );
|
Chris@69
|
116 silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, vget_high_s32( t_s32x4 ), in_s32x4[ 1 ], &S_s32x4, &out32_Q14_s32x2[ 1 ] );
|
Chris@69
|
117
|
Chris@69
|
118 /* Scale back to Q0 and saturate */
|
Chris@69
|
119 out32_Q14_s32x4 = vcombine_s32( out32_Q14_s32x2[ 0 ], out32_Q14_s32x2[ 1 ] ); /* out32_Q14_{0,1,2,3} */
|
Chris@69
|
120 out32_Q14_s32x4 = vaddq_s32( out32_Q14_s32x4, offset_s32x4 ); /* out32_Q14_{0,1,2,3} + (1<<14) - 1 */
|
Chris@69
|
121 out_s16x4 = vqshrn_n_s32( out32_Q14_s32x4, 14 ); /* (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,2,3} + (1<<14) - 1, 14 ) ) */
|
Chris@69
|
122 vst1_s16( &out[ 2 * k ], out_s16x4 ); /* out[ 2 * k + {0,1,2,3} ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,2,3} + (1<<14) - 1, 14 ) ); */
|
Chris@69
|
123 }
|
Chris@69
|
124
|
Chris@69
|
125 /* Process leftover. */
|
Chris@69
|
126 if( k < len ) {
|
Chris@69
|
127 int32x4_t in_s32x4;
|
Chris@69
|
128 int32x2_t out32_Q14_s32x2;
|
Chris@69
|
129
|
Chris@69
|
130 /* S[ 2 * i + 0 ], S[ 2 * i + 1 ]: Q12 */
|
Chris@69
|
131 in_s16x4 = vld1_lane_s16( &in[ 2 * k + 0 ], in_s16x4, 0 ); /* in{0,1} = in[ 2 * k + {0,1} ]; */
|
Chris@69
|
132 in_s16x4 = vld1_lane_s16( &in[ 2 * k + 1 ], in_s16x4, 1 ); /* in{0,1} = in[ 2 * k + {0,1} ]; */
|
Chris@69
|
133 in_s32x4 = vshll_n_s16( in_s16x4, 15 ); /* in{0,1} << 15 */
|
Chris@69
|
134 t_s32x2 = vqdmulh_lane_s32( vget_low_s32( in_s32x4 ), B_Q28_s32x2, 0 ); /* silk_SMULWB( B_Q28[ 0 ], in{0,1} ) */
|
Chris@69
|
135 in_s32x4 = vcombine_s32( vget_low_s32( in_s32x4 ), vget_low_s32( in_s32x4 ) ); /* in{0,1,0,1} << 15 */
|
Chris@69
|
136 silk_biquad_alt_stride2_kernel( A_L_s32x4, A_U_s32x4, B_Q28_s32x4, t_s32x2, in_s32x4, &S_s32x4, &out32_Q14_s32x2 );
|
Chris@69
|
137
|
Chris@69
|
138 /* Scale back to Q0 and saturate */
|
Chris@69
|
139 out32_Q14_s32x2 = vadd_s32( out32_Q14_s32x2, offset_s32x2 ); /* out32_Q14_{0,1} + (1<<14) - 1 */
|
Chris@69
|
140 out32_Q14_s32x4 = vcombine_s32( out32_Q14_s32x2, out32_Q14_s32x2 ); /* out32_Q14_{0,1,0,1} + (1<<14) - 1 */
|
Chris@69
|
141 out_s16x4 = vqshrn_n_s32( out32_Q14_s32x4, 14 ); /* (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_{0,1,0,1} + (1<<14) - 1, 14 ) ) */
|
Chris@69
|
142 vst1_lane_s16( &out[ 2 * k + 0 ], out_s16x4, 0 ); /* out[ 2 * k + 0 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_0 + (1<<14) - 1, 14 ) ); */
|
Chris@69
|
143 vst1_lane_s16( &out[ 2 * k + 1 ], out_s16x4, 1 ); /* out[ 2 * k + 1 ] = (opus_int16)silk_SAT16( silk_RSHIFT( out32_Q14_1 + (1<<14) - 1, 14 ) ); */
|
Chris@69
|
144 }
|
Chris@69
|
145
|
Chris@69
|
146 vst1q_lane_s32( &S[ 0 ], S_s32x4, 0 ); /* S[ 0 ] = S0; */
|
Chris@69
|
147 vst1q_lane_s32( &S[ 1 ], S_s32x4, 2 ); /* S[ 1 ] = S2; */
|
Chris@69
|
148 vst1q_lane_s32( &S[ 2 ], S_s32x4, 1 ); /* S[ 2 ] = S1; */
|
Chris@69
|
149 vst1q_lane_s32( &S[ 3 ], S_s32x4, 3 ); /* S[ 3 ] = S3; */
|
Chris@69
|
150
|
Chris@69
|
151 #ifdef OPUS_CHECK_ASM
|
Chris@69
|
152 silk_assert( !memcmp( S_c, S, sizeof( S_c ) ) );
|
Chris@69
|
153 silk_assert( !memcmp( out_c, out, 2 * len * sizeof( opus_int16 ) ) );
|
Chris@69
|
154 RESTORE_STACK;
|
Chris@69
|
155 #endif
|
Chris@69
|
156 }
|