yading@10
|
1 /*
|
yading@10
|
2 * x86-optimized AC-3 DSP utils
|
yading@10
|
3 * Copyright (c) 2011 Justin Ruggles
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 #include "libavutil/mem.h"
|
yading@10
|
23 #include "libavutil/x86/asm.h"
|
yading@10
|
24 #include "libavutil/x86/cpu.h"
|
yading@10
|
25 #include "dsputil_mmx.h"
|
yading@10
|
26 #include "libavcodec/ac3.h"
|
yading@10
|
27 #include "libavcodec/ac3dsp.h"
|
yading@10
|
28
|
yading@10
|
29 void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
|
yading@10
|
30 void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
|
yading@10
|
31 void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
|
yading@10
|
32
|
yading@10
|
33 int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
|
yading@10
|
34 int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
|
yading@10
|
35 int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
|
yading@10
|
36 int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
|
yading@10
|
37
|
yading@10
|
38 void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
|
yading@10
|
39 void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
|
yading@10
|
40
|
yading@10
|
41 void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
|
yading@10
|
42 void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
|
yading@10
|
43
|
yading@10
|
44 void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
|
yading@10
|
45 void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
|
yading@10
|
46 void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
|
yading@10
|
47
|
yading@10
|
48 int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
|
yading@10
|
49
|
yading@10
|
50 void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
|
yading@10
|
51 void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
|
yading@10
|
52 void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
|
yading@10
|
53
|
yading@10
|
54 #if ARCH_X86_32 && defined(__INTEL_COMPILER)
|
yading@10
|
55 # undef HAVE_7REGS
|
yading@10
|
56 # define HAVE_7REGS 0
|
yading@10
|
57 #endif
|
yading@10
|
58
|
yading@10
|
59 #if HAVE_SSE_INLINE && HAVE_7REGS
|
yading@10
|
60
|
yading@10
|
61 #define IF1(x) x
|
yading@10
|
62 #define IF0(x)
|
yading@10
|
63
|
yading@10
|
64 #define MIX5(mono, stereo) \
|
yading@10
|
65 __asm__ volatile ( \
|
yading@10
|
66 "movss 0(%1), %%xmm5 \n" \
|
yading@10
|
67 "movss 8(%1), %%xmm6 \n" \
|
yading@10
|
68 "movss 24(%1), %%xmm7 \n" \
|
yading@10
|
69 "shufps $0, %%xmm5, %%xmm5 \n" \
|
yading@10
|
70 "shufps $0, %%xmm6, %%xmm6 \n" \
|
yading@10
|
71 "shufps $0, %%xmm7, %%xmm7 \n" \
|
yading@10
|
72 "1: \n" \
|
yading@10
|
73 "movaps (%0, %2), %%xmm0 \n" \
|
yading@10
|
74 "movaps (%0, %3), %%xmm1 \n" \
|
yading@10
|
75 "movaps (%0, %4), %%xmm2 \n" \
|
yading@10
|
76 "movaps (%0, %5), %%xmm3 \n" \
|
yading@10
|
77 "movaps (%0, %6), %%xmm4 \n" \
|
yading@10
|
78 "mulps %%xmm5, %%xmm0 \n" \
|
yading@10
|
79 "mulps %%xmm6, %%xmm1 \n" \
|
yading@10
|
80 "mulps %%xmm5, %%xmm2 \n" \
|
yading@10
|
81 "mulps %%xmm7, %%xmm3 \n" \
|
yading@10
|
82 "mulps %%xmm7, %%xmm4 \n" \
|
yading@10
|
83 stereo("addps %%xmm1, %%xmm0 \n") \
|
yading@10
|
84 "addps %%xmm1, %%xmm2 \n" \
|
yading@10
|
85 "addps %%xmm3, %%xmm0 \n" \
|
yading@10
|
86 "addps %%xmm4, %%xmm2 \n" \
|
yading@10
|
87 mono("addps %%xmm2, %%xmm0 \n") \
|
yading@10
|
88 "movaps %%xmm0, (%0, %2) \n" \
|
yading@10
|
89 stereo("movaps %%xmm2, (%0, %3) \n") \
|
yading@10
|
90 "add $16, %0 \n" \
|
yading@10
|
91 "jl 1b \n" \
|
yading@10
|
92 : "+&r"(i) \
|
yading@10
|
93 : "r"(matrix), \
|
yading@10
|
94 "r"(samples[0] + len), \
|
yading@10
|
95 "r"(samples[1] + len), \
|
yading@10
|
96 "r"(samples[2] + len), \
|
yading@10
|
97 "r"(samples[3] + len), \
|
yading@10
|
98 "r"(samples[4] + len) \
|
yading@10
|
99 : XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
|
yading@10
|
100 "%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
|
yading@10
|
101 "memory" \
|
yading@10
|
102 );
|
yading@10
|
103
|
yading@10
|
104 #define MIX_MISC(stereo) \
|
yading@10
|
105 __asm__ volatile ( \
|
yading@10
|
106 "mov %5, %2 \n" \
|
yading@10
|
107 "1: \n" \
|
yading@10
|
108 "mov -%c7(%6, %2, %c8), %3 \n" \
|
yading@10
|
109 "movaps (%3, %0), %%xmm0 \n" \
|
yading@10
|
110 stereo("movaps %%xmm0, %%xmm1 \n") \
|
yading@10
|
111 "mulps %%xmm4, %%xmm0 \n" \
|
yading@10
|
112 stereo("mulps %%xmm5, %%xmm1 \n") \
|
yading@10
|
113 "2: \n" \
|
yading@10
|
114 "mov (%6, %2, %c8), %1 \n" \
|
yading@10
|
115 "movaps (%1, %0), %%xmm2 \n" \
|
yading@10
|
116 stereo("movaps %%xmm2, %%xmm3 \n") \
|
yading@10
|
117 "mulps (%4, %2, 8), %%xmm2 \n" \
|
yading@10
|
118 stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
|
yading@10
|
119 "addps %%xmm2, %%xmm0 \n" \
|
yading@10
|
120 stereo("addps %%xmm3, %%xmm1 \n") \
|
yading@10
|
121 "add $4, %2 \n" \
|
yading@10
|
122 "jl 2b \n" \
|
yading@10
|
123 "mov %5, %2 \n" \
|
yading@10
|
124 stereo("mov (%6, %2, %c8), %1 \n") \
|
yading@10
|
125 "movaps %%xmm0, (%3, %0) \n" \
|
yading@10
|
126 stereo("movaps %%xmm1, (%1, %0) \n") \
|
yading@10
|
127 "add $16, %0 \n" \
|
yading@10
|
128 "jl 1b \n" \
|
yading@10
|
129 : "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
|
yading@10
|
130 : "r"(matrix_simd + in_ch), \
|
yading@10
|
131 "g"((intptr_t) - 4 * (in_ch - 1)), \
|
yading@10
|
132 "r"(samp + in_ch), \
|
yading@10
|
133 "i"(sizeof(float *)), "i"(sizeof(float *)/4) \
|
yading@10
|
134 : "memory" \
|
yading@10
|
135 );
|
yading@10
|
136
|
yading@10
|
137 static void ac3_downmix_sse(float **samples, float (*matrix)[2],
|
yading@10
|
138 int out_ch, int in_ch, int len)
|
yading@10
|
139 {
|
yading@10
|
140 int (*matrix_cmp)[2] = (int(*)[2])matrix;
|
yading@10
|
141 intptr_t i, j, k, m;
|
yading@10
|
142
|
yading@10
|
143 i = -len * sizeof(float);
|
yading@10
|
144 if (in_ch == 5 && out_ch == 2 &&
|
yading@10
|
145 !(matrix_cmp[0][1] | matrix_cmp[2][0] |
|
yading@10
|
146 matrix_cmp[3][1] | matrix_cmp[4][0] |
|
yading@10
|
147 (matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
|
yading@10
|
148 (matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
|
yading@10
|
149 MIX5(IF0, IF1);
|
yading@10
|
150 } else if (in_ch == 5 && out_ch == 1 &&
|
yading@10
|
151 matrix_cmp[0][0] == matrix_cmp[2][0] &&
|
yading@10
|
152 matrix_cmp[3][0] == matrix_cmp[4][0]) {
|
yading@10
|
153 MIX5(IF1, IF0);
|
yading@10
|
154 } else {
|
yading@10
|
155 DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
|
yading@10
|
156 float *samp[AC3_MAX_CHANNELS];
|
yading@10
|
157
|
yading@10
|
158 for (j = 0; j < in_ch; j++)
|
yading@10
|
159 samp[j] = samples[j] + len;
|
yading@10
|
160
|
yading@10
|
161 j = 2 * in_ch * sizeof(float);
|
yading@10
|
162 __asm__ volatile (
|
yading@10
|
163 "1: \n"
|
yading@10
|
164 "sub $8, %0 \n"
|
yading@10
|
165 "movss (%2, %0), %%xmm4 \n"
|
yading@10
|
166 "movss 4(%2, %0), %%xmm5 \n"
|
yading@10
|
167 "shufps $0, %%xmm4, %%xmm4 \n"
|
yading@10
|
168 "shufps $0, %%xmm5, %%xmm5 \n"
|
yading@10
|
169 "movaps %%xmm4, (%1, %0, 4) \n"
|
yading@10
|
170 "movaps %%xmm5, 16(%1, %0, 4) \n"
|
yading@10
|
171 "jg 1b \n"
|
yading@10
|
172 : "+&r"(j)
|
yading@10
|
173 : "r"(matrix_simd), "r"(matrix)
|
yading@10
|
174 : "memory"
|
yading@10
|
175 );
|
yading@10
|
176 if (out_ch == 2) {
|
yading@10
|
177 MIX_MISC(IF1);
|
yading@10
|
178 } else {
|
yading@10
|
179 MIX_MISC(IF0);
|
yading@10
|
180 }
|
yading@10
|
181 }
|
yading@10
|
182 }
|
yading@10
|
183
|
yading@10
|
184 #endif /* HAVE_SSE_INLINE && HAVE_7REGS */
|
yading@10
|
185
|
yading@10
|
186 av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
|
yading@10
|
187 {
|
yading@10
|
188 int mm_flags = av_get_cpu_flags();
|
yading@10
|
189
|
yading@10
|
190 if (EXTERNAL_MMX(mm_flags)) {
|
yading@10
|
191 c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
|
yading@10
|
192 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
|
yading@10
|
193 c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
|
yading@10
|
194 c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
|
yading@10
|
195 }
|
yading@10
|
196 if (EXTERNAL_AMD3DNOW(mm_flags)) {
|
yading@10
|
197 c->extract_exponents = ff_ac3_extract_exponents_3dnow;
|
yading@10
|
198 if (!bit_exact) {
|
yading@10
|
199 c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
|
yading@10
|
200 }
|
yading@10
|
201 }
|
yading@10
|
202 if (EXTERNAL_MMXEXT(mm_flags)) {
|
yading@10
|
203 c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
|
yading@10
|
204 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
|
yading@10
|
205 }
|
yading@10
|
206 if (EXTERNAL_SSE(mm_flags)) {
|
yading@10
|
207 c->float_to_fixed24 = ff_float_to_fixed24_sse;
|
yading@10
|
208 }
|
yading@10
|
209 if (EXTERNAL_SSE2(mm_flags)) {
|
yading@10
|
210 c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
|
yading@10
|
211 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
|
yading@10
|
212 c->float_to_fixed24 = ff_float_to_fixed24_sse2;
|
yading@10
|
213 c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
|
yading@10
|
214 c->extract_exponents = ff_ac3_extract_exponents_sse2;
|
yading@10
|
215 if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
|
yading@10
|
216 c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
|
yading@10
|
217 c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
|
yading@10
|
218 }
|
yading@10
|
219 }
|
yading@10
|
220 if (EXTERNAL_SSSE3(mm_flags)) {
|
yading@10
|
221 c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
|
yading@10
|
222 if (!(mm_flags & AV_CPU_FLAG_ATOM)) {
|
yading@10
|
223 c->extract_exponents = ff_ac3_extract_exponents_ssse3;
|
yading@10
|
224 }
|
yading@10
|
225 }
|
yading@10
|
226
|
yading@10
|
227 #if HAVE_SSE_INLINE && HAVE_7REGS
|
yading@10
|
228 if (INLINE_SSE(mm_flags)) {
|
yading@10
|
229 c->downmix = ac3_downmix_sse;
|
yading@10
|
230 }
|
yading@10
|
231 #endif
|
yading@10
|
232 }
|