cannam@154
|
1 /* Copyright (c) 2014, Cisco Systems, INC
|
cannam@154
|
2 Written by XiangMingZhu WeiZhou MinPeng YanWang
|
cannam@154
|
3
|
cannam@154
|
4 Redistribution and use in source and binary forms, with or without
|
cannam@154
|
5 modification, are permitted provided that the following conditions
|
cannam@154
|
6 are met:
|
cannam@154
|
7
|
cannam@154
|
8 - Redistributions of source code must retain the above copyright
|
cannam@154
|
9 notice, this list of conditions and the following disclaimer.
|
cannam@154
|
10
|
cannam@154
|
11 - Redistributions in binary form must reproduce the above copyright
|
cannam@154
|
12 notice, this list of conditions and the following disclaimer in the
|
cannam@154
|
13 documentation and/or other materials provided with the distribution.
|
cannam@154
|
14
|
cannam@154
|
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
cannam@154
|
16 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
cannam@154
|
17 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
cannam@154
|
18 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
cannam@154
|
19 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
cannam@154
|
20 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
cannam@154
|
21 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
cannam@154
|
22 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
cannam@154
|
23 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
cannam@154
|
24 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
cannam@154
|
25 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
cannam@154
|
26 */
|
cannam@154
|
27
|
cannam@154
|
28 #ifdef HAVE_CONFIG_H
|
cannam@154
|
29 #include "config.h"
|
cannam@154
|
30 #endif
|
cannam@154
|
31
|
cannam@154
|
32 #include <xmmintrin.h>
|
cannam@154
|
33 #include <emmintrin.h>
|
cannam@154
|
34 #include <smmintrin.h>
|
cannam@154
|
35 #include "main.h"
|
cannam@154
|
36 #include "celt/x86/x86cpu.h"
|
cannam@154
|
37
|
cannam@154
|
38 /* Entropy constrained matrix-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */
|
cannam@154
|
39 void silk_VQ_WMat_EC_sse4_1(
|
cannam@154
|
40 opus_int8 *ind, /* O index of best codebook vector */
|
cannam@154
|
41 opus_int32 *rate_dist_Q14, /* O best weighted quant error + mu * rate */
|
cannam@154
|
42 opus_int *gain_Q7, /* O sum of absolute LTP coefficients */
|
cannam@154
|
43 const opus_int16 *in_Q14, /* I input vector to be quantized */
|
cannam@154
|
44 const opus_int32 *W_Q18, /* I weighting matrix */
|
cannam@154
|
45 const opus_int8 *cb_Q7, /* I codebook */
|
cannam@154
|
46 const opus_uint8 *cb_gain_Q7, /* I codebook effective gain */
|
cannam@154
|
47 const opus_uint8 *cl_Q5, /* I code length for each codebook vector */
|
cannam@154
|
48 const opus_int mu_Q9, /* I tradeoff betw. weighted error and rate */
|
cannam@154
|
49 const opus_int32 max_gain_Q7, /* I maximum sum of absolute LTP coefficients */
|
cannam@154
|
50 opus_int L /* I number of vectors in codebook */
|
cannam@154
|
51 )
|
cannam@154
|
52 {
|
cannam@154
|
53 opus_int k, gain_tmp_Q7;
|
cannam@154
|
54 const opus_int8 *cb_row_Q7;
|
cannam@154
|
55 opus_int16 diff_Q14[ 5 ];
|
cannam@154
|
56 opus_int32 sum1_Q14, sum2_Q16;
|
cannam@154
|
57
|
cannam@154
|
58 __m128i C_tmp1, C_tmp2, C_tmp3, C_tmp4, C_tmp5;
|
cannam@154
|
59 /* Loop over codebook */
|
cannam@154
|
60 *rate_dist_Q14 = silk_int32_MAX;
|
cannam@154
|
61 cb_row_Q7 = cb_Q7;
|
cannam@154
|
62 for( k = 0; k < L; k++ ) {
|
cannam@154
|
63 gain_tmp_Q7 = cb_gain_Q7[k];
|
cannam@154
|
64
|
cannam@154
|
65 diff_Q14[ 0 ] = in_Q14[ 0 ] - silk_LSHIFT( cb_row_Q7[ 0 ], 7 );
|
cannam@154
|
66
|
cannam@154
|
67 C_tmp1 = OP_CVTEPI16_EPI32_M64( &in_Q14[ 1 ] );
|
cannam@154
|
68 C_tmp2 = OP_CVTEPI8_EPI32_M32( &cb_row_Q7[ 1 ] );
|
cannam@154
|
69 C_tmp2 = _mm_slli_epi32( C_tmp2, 7 );
|
cannam@154
|
70 C_tmp1 = _mm_sub_epi32( C_tmp1, C_tmp2 );
|
cannam@154
|
71
|
cannam@154
|
72 diff_Q14[ 1 ] = _mm_extract_epi16( C_tmp1, 0 );
|
cannam@154
|
73 diff_Q14[ 2 ] = _mm_extract_epi16( C_tmp1, 2 );
|
cannam@154
|
74 diff_Q14[ 3 ] = _mm_extract_epi16( C_tmp1, 4 );
|
cannam@154
|
75 diff_Q14[ 4 ] = _mm_extract_epi16( C_tmp1, 6 );
|
cannam@154
|
76
|
cannam@154
|
77 /* Weighted rate */
|
cannam@154
|
78 sum1_Q14 = silk_SMULBB( mu_Q9, cl_Q5[ k ] );
|
cannam@154
|
79
|
cannam@154
|
80 /* Penalty for too large gain */
|
cannam@154
|
81 sum1_Q14 = silk_ADD_LSHIFT32( sum1_Q14, silk_max( silk_SUB32( gain_tmp_Q7, max_gain_Q7 ), 0 ), 10 );
|
cannam@154
|
82
|
cannam@154
|
83 silk_assert( sum1_Q14 >= 0 );
|
cannam@154
|
84
|
cannam@154
|
85 /* first row of W_Q18 */
|
cannam@154
|
86 C_tmp3 = _mm_loadu_si128( (__m128i *)(&W_Q18[ 1 ] ) );
|
cannam@154
|
87 C_tmp4 = _mm_mul_epi32( C_tmp3, C_tmp1 );
|
cannam@154
|
88 C_tmp4 = _mm_srli_si128( C_tmp4, 2 );
|
cannam@154
|
89
|
cannam@154
|
90 C_tmp1 = _mm_shuffle_epi32( C_tmp1, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* shift right 4 bytes */
|
cannam@154
|
91 C_tmp3 = _mm_shuffle_epi32( C_tmp3, _MM_SHUFFLE( 0, 3, 2, 1 ) ); /* shift right 4 bytes */
|
cannam@154
|
92
|
cannam@154
|
93 C_tmp5 = _mm_mul_epi32( C_tmp3, C_tmp1 );
|
cannam@154
|
94 C_tmp5 = _mm_srli_si128( C_tmp5, 2 );
|
cannam@154
|
95
|
cannam@154
|
96 C_tmp5 = _mm_add_epi32( C_tmp4, C_tmp5 );
|
cannam@154
|
97 C_tmp5 = _mm_slli_epi32( C_tmp5, 1 );
|
cannam@154
|
98
|
cannam@154
|
99 C_tmp5 = _mm_add_epi32( C_tmp5, _mm_shuffle_epi32( C_tmp5, _MM_SHUFFLE( 0, 0, 0, 2 ) ) );
|
cannam@154
|
100 sum2_Q16 = _mm_cvtsi128_si32( C_tmp5 );
|
cannam@154
|
101
|
cannam@154
|
102 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 0 ], diff_Q14[ 0 ] );
|
cannam@154
|
103 sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 0 ] );
|
cannam@154
|
104
|
cannam@154
|
105 /* second row of W_Q18 */
|
cannam@154
|
106 sum2_Q16 = silk_SMULWB( W_Q18[ 7 ], diff_Q14[ 2 ] );
|
cannam@154
|
107 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 8 ], diff_Q14[ 3 ] );
|
cannam@154
|
108 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 9 ], diff_Q14[ 4 ] );
|
cannam@154
|
109 sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
|
cannam@154
|
110 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 6 ], diff_Q14[ 1 ] );
|
cannam@154
|
111 sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 1 ] );
|
cannam@154
|
112
|
cannam@154
|
113 /* third row of W_Q18 */
|
cannam@154
|
114 sum2_Q16 = silk_SMULWB( W_Q18[ 13 ], diff_Q14[ 3 ] );
|
cannam@154
|
115 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 14 ], diff_Q14[ 4 ] );
|
cannam@154
|
116 sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
|
cannam@154
|
117 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 12 ], diff_Q14[ 2 ] );
|
cannam@154
|
118 sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 2 ] );
|
cannam@154
|
119
|
cannam@154
|
120 /* fourth row of W_Q18 */
|
cannam@154
|
121 sum2_Q16 = silk_SMULWB( W_Q18[ 19 ], diff_Q14[ 4 ] );
|
cannam@154
|
122 sum2_Q16 = silk_LSHIFT( sum2_Q16, 1 );
|
cannam@154
|
123 sum2_Q16 = silk_SMLAWB( sum2_Q16, W_Q18[ 18 ], diff_Q14[ 3 ] );
|
cannam@154
|
124 sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 3 ] );
|
cannam@154
|
125
|
cannam@154
|
126 /* last row of W_Q18 */
|
cannam@154
|
127 sum2_Q16 = silk_SMULWB( W_Q18[ 24 ], diff_Q14[ 4 ] );
|
cannam@154
|
128 sum1_Q14 = silk_SMLAWB( sum1_Q14, sum2_Q16, diff_Q14[ 4 ] );
|
cannam@154
|
129
|
cannam@154
|
130 silk_assert( sum1_Q14 >= 0 );
|
cannam@154
|
131
|
cannam@154
|
132 /* find best */
|
cannam@154
|
133 if( sum1_Q14 < *rate_dist_Q14 ) {
|
cannam@154
|
134 *rate_dist_Q14 = sum1_Q14;
|
cannam@154
|
135 *ind = (opus_int8)k;
|
cannam@154
|
136 *gain_Q7 = gain_tmp_Q7;
|
cannam@154
|
137 }
|
cannam@154
|
138
|
cannam@154
|
139 /* Go to next cbk vector */
|
cannam@154
|
140 cb_row_Q7 += LTP_ORDER;
|
cannam@154
|
141 }
|
cannam@154
|
142 }
|