yading@10
|
1 /*
|
yading@10
|
2 * MMX optimized LPC DSP utils
|
yading@10
|
3 * Copyright (c) 2007 Loren Merritt
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 #include "libavutil/x86/asm.h"
|
yading@10
|
23 #include "libavutil/attributes.h"
|
yading@10
|
24 #include "libavutil/cpu.h"
|
yading@10
|
25 #include "libavcodec/lpc.h"
|
yading@10
|
26
|
yading@10
|
27 #if HAVE_SSE2_INLINE
|
yading@10
|
28
|
yading@10
|
29 static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
|
yading@10
|
30 double *w_data)
|
yading@10
|
31 {
|
yading@10
|
32 double c = 2.0 / (len-1.0);
|
yading@10
|
33 int n2 = len>>1;
|
yading@10
|
34 x86_reg i = -n2*sizeof(int32_t);
|
yading@10
|
35 x86_reg j = n2*sizeof(int32_t);
|
yading@10
|
36 __asm__ volatile(
|
yading@10
|
37 "movsd %4, %%xmm7 \n\t"
|
yading@10
|
38 "movapd "MANGLE(ff_pd_1)", %%xmm6 \n\t"
|
yading@10
|
39 "movapd "MANGLE(ff_pd_2)", %%xmm5 \n\t"
|
yading@10
|
40 "movlhps %%xmm7, %%xmm7 \n\t"
|
yading@10
|
41 "subpd %%xmm5, %%xmm7 \n\t"
|
yading@10
|
42 "addsd %%xmm6, %%xmm7 \n\t"
|
yading@10
|
43 "test $1, %5 \n\t"
|
yading@10
|
44 "jz 2f \n\t"
|
yading@10
|
45 #define WELCH(MOVPD, offset)\
|
yading@10
|
46 "1: \n\t"\
|
yading@10
|
47 "movapd %%xmm7, %%xmm1 \n\t"\
|
yading@10
|
48 "mulpd %%xmm1, %%xmm1 \n\t"\
|
yading@10
|
49 "movapd %%xmm6, %%xmm0 \n\t"\
|
yading@10
|
50 "subpd %%xmm1, %%xmm0 \n\t"\
|
yading@10
|
51 "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\
|
yading@10
|
52 "cvtpi2pd (%3,%0), %%xmm2 \n\t"\
|
yading@10
|
53 "cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\
|
yading@10
|
54 "mulpd %%xmm0, %%xmm2 \n\t"\
|
yading@10
|
55 "mulpd %%xmm1, %%xmm3 \n\t"\
|
yading@10
|
56 "movapd %%xmm2, (%2,%0,2) \n\t"\
|
yading@10
|
57 MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\
|
yading@10
|
58 "subpd %%xmm5, %%xmm7 \n\t"\
|
yading@10
|
59 "sub $8, %1 \n\t"\
|
yading@10
|
60 "add $8, %0 \n\t"\
|
yading@10
|
61 "jl 1b \n\t"\
|
yading@10
|
62
|
yading@10
|
63 WELCH("movupd", -1)
|
yading@10
|
64 "jmp 3f \n\t"
|
yading@10
|
65 "2: \n\t"
|
yading@10
|
66 WELCH("movapd", -2)
|
yading@10
|
67 "3: \n\t"
|
yading@10
|
68 :"+&r"(i), "+&r"(j)
|
yading@10
|
69 :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
|
yading@10
|
70 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
|
yading@10
|
71 "%xmm5", "%xmm6", "%xmm7")
|
yading@10
|
72 );
|
yading@10
|
73 #undef WELCH
|
yading@10
|
74 }
|
yading@10
|
75
|
yading@10
|
76 static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
|
yading@10
|
77 double *autoc)
|
yading@10
|
78 {
|
yading@10
|
79 int j;
|
yading@10
|
80
|
yading@10
|
81 if((x86_reg)data & 15)
|
yading@10
|
82 data++;
|
yading@10
|
83
|
yading@10
|
84 for(j=0; j<lag; j+=2){
|
yading@10
|
85 x86_reg i = -len*sizeof(double);
|
yading@10
|
86 if(j == lag-2) {
|
yading@10
|
87 __asm__ volatile(
|
yading@10
|
88 "movsd "MANGLE(ff_pd_1)", %%xmm0 \n\t"
|
yading@10
|
89 "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t"
|
yading@10
|
90 "movsd "MANGLE(ff_pd_1)", %%xmm2 \n\t"
|
yading@10
|
91 "1: \n\t"
|
yading@10
|
92 "movapd (%2,%0), %%xmm3 \n\t"
|
yading@10
|
93 "movupd -8(%3,%0), %%xmm4 \n\t"
|
yading@10
|
94 "movapd (%3,%0), %%xmm5 \n\t"
|
yading@10
|
95 "mulpd %%xmm3, %%xmm4 \n\t"
|
yading@10
|
96 "mulpd %%xmm3, %%xmm5 \n\t"
|
yading@10
|
97 "mulpd -16(%3,%0), %%xmm3 \n\t"
|
yading@10
|
98 "addpd %%xmm4, %%xmm1 \n\t"
|
yading@10
|
99 "addpd %%xmm5, %%xmm0 \n\t"
|
yading@10
|
100 "addpd %%xmm3, %%xmm2 \n\t"
|
yading@10
|
101 "add $16, %0 \n\t"
|
yading@10
|
102 "jl 1b \n\t"
|
yading@10
|
103 "movhlps %%xmm0, %%xmm3 \n\t"
|
yading@10
|
104 "movhlps %%xmm1, %%xmm4 \n\t"
|
yading@10
|
105 "movhlps %%xmm2, %%xmm5 \n\t"
|
yading@10
|
106 "addsd %%xmm3, %%xmm0 \n\t"
|
yading@10
|
107 "addsd %%xmm4, %%xmm1 \n\t"
|
yading@10
|
108 "addsd %%xmm5, %%xmm2 \n\t"
|
yading@10
|
109 "movsd %%xmm0, (%1) \n\t"
|
yading@10
|
110 "movsd %%xmm1, 8(%1) \n\t"
|
yading@10
|
111 "movsd %%xmm2, 16(%1) \n\t"
|
yading@10
|
112 :"+&r"(i)
|
yading@10
|
113 :"r"(autoc+j), "r"(data+len), "r"(data+len-j)
|
yading@10
|
114 :"memory"
|
yading@10
|
115 );
|
yading@10
|
116 } else {
|
yading@10
|
117 __asm__ volatile(
|
yading@10
|
118 "movsd "MANGLE(ff_pd_1)", %%xmm0 \n\t"
|
yading@10
|
119 "movsd "MANGLE(ff_pd_1)", %%xmm1 \n\t"
|
yading@10
|
120 "1: \n\t"
|
yading@10
|
121 "movapd (%3,%0), %%xmm3 \n\t"
|
yading@10
|
122 "movupd -8(%4,%0), %%xmm4 \n\t"
|
yading@10
|
123 "mulpd %%xmm3, %%xmm4 \n\t"
|
yading@10
|
124 "mulpd (%4,%0), %%xmm3 \n\t"
|
yading@10
|
125 "addpd %%xmm4, %%xmm1 \n\t"
|
yading@10
|
126 "addpd %%xmm3, %%xmm0 \n\t"
|
yading@10
|
127 "add $16, %0 \n\t"
|
yading@10
|
128 "jl 1b \n\t"
|
yading@10
|
129 "movhlps %%xmm0, %%xmm3 \n\t"
|
yading@10
|
130 "movhlps %%xmm1, %%xmm4 \n\t"
|
yading@10
|
131 "addsd %%xmm3, %%xmm0 \n\t"
|
yading@10
|
132 "addsd %%xmm4, %%xmm1 \n\t"
|
yading@10
|
133 "movsd %%xmm0, %1 \n\t"
|
yading@10
|
134 "movsd %%xmm1, %2 \n\t"
|
yading@10
|
135 :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
|
yading@10
|
136 :"r"(data+len), "r"(data+len-j)
|
yading@10
|
137 );
|
yading@10
|
138 }
|
yading@10
|
139 }
|
yading@10
|
140 }
|
yading@10
|
141
|
yading@10
|
142 #endif /* HAVE_SSE2_INLINE */
|
yading@10
|
143
|
yading@10
|
144 av_cold void ff_lpc_init_x86(LPCContext *c)
|
yading@10
|
145 {
|
yading@10
|
146 #if HAVE_SSE2_INLINE
|
yading@10
|
147 int mm_flags = av_get_cpu_flags();
|
yading@10
|
148
|
yading@10
|
149 if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
|
yading@10
|
150 c->lpc_apply_welch_window = lpc_apply_welch_window_sse2;
|
yading@10
|
151 c->lpc_compute_autocorr = lpc_compute_autocorr_sse2;
|
yading@10
|
152 }
|
yading@10
|
153 #endif /* HAVE_SSE2_INLINE */
|
yading@10
|
154 }
|