yading@10
|
1 ;******************************************************************************
|
yading@10
|
2 ;* MMX/SSE2-optimized functions for the RV30 and RV40 decoders
|
yading@10
|
3 ;* Copyright (C) 2012 Christophe Gisquet <christophe.gisquet@gmail.com>
|
yading@10
|
4 ;*
|
yading@10
|
5 ;* This file is part of Libav.
|
yading@10
|
6 ;*
|
yading@10
|
7 ;* Libav is free software; you can redistribute it and/or
|
yading@10
|
8 ;* modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 ;* License as published by the Free Software Foundation; either
|
yading@10
|
10 ;* version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 ;*
|
yading@10
|
12 ;* Libav is distributed in the hope that it will be useful,
|
yading@10
|
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 ;* Lesser General Public License for more details.
|
yading@10
|
16 ;*
|
yading@10
|
17 ;* You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 ;* License along with Libav; if not, write to the Free Software
|
yading@10
|
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 ;******************************************************************************
|
yading@10
|
21
|
yading@10
|
22 %include "libavutil/x86/x86util.asm"
|
yading@10
|
23
|
yading@10
|
24 SECTION_RODATA
|
yading@10
|
25 pw_row_coeffs: times 4 dw 13
|
yading@10
|
26 times 4 dw 17
|
yading@10
|
27 times 4 dw 7
|
yading@10
|
28 pd_512: times 2 dd 0x200
|
yading@10
|
29 pw_col_coeffs: dw 13, 13, 13, -13
|
yading@10
|
30 dw 17, 7, 7, -17
|
yading@10
|
31 dw 13, -13, 13, 13
|
yading@10
|
32 dw -7, 17, -17, -7
|
yading@10
|
33
|
yading@10
|
34 SECTION .text
|
yading@10
|
35
|
yading@10
|
36 %macro IDCT_DC_NOROUND 1
|
yading@10
|
37 imul %1, 13*13*3
|
yading@10
|
38 sar %1, 11
|
yading@10
|
39 %endmacro
|
yading@10
|
40
|
yading@10
|
41 %macro IDCT_DC_ROUND 1
|
yading@10
|
42 imul %1, 13*13
|
yading@10
|
43 add %1, 0x200
|
yading@10
|
44 sar %1, 10
|
yading@10
|
45 %endmacro
|
yading@10
|
46
|
yading@10
|
47 %macro rv34_idct 1
|
yading@10
|
48 cglobal rv34_idct_%1, 1, 2, 0
|
yading@10
|
49 movsx r1, word [r0]
|
yading@10
|
50 IDCT_DC r1
|
yading@10
|
51 movd m0, r1d
|
yading@10
|
52 pshufw m0, m0, 0
|
yading@10
|
53 movq [r0+ 0], m0
|
yading@10
|
54 movq [r0+ 8], m0
|
yading@10
|
55 movq [r0+16], m0
|
yading@10
|
56 movq [r0+24], m0
|
yading@10
|
57 REP_RET
|
yading@10
|
58 %endmacro
|
yading@10
|
59
|
yading@10
|
60 INIT_MMX mmxext
|
yading@10
|
61 %define IDCT_DC IDCT_DC_ROUND
|
yading@10
|
62 rv34_idct dc
|
yading@10
|
63 %define IDCT_DC IDCT_DC_NOROUND
|
yading@10
|
64 rv34_idct dc_noround
|
yading@10
|
65
|
yading@10
|
66 ; ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc);
|
yading@10
|
67 INIT_MMX mmx
|
yading@10
|
68 cglobal rv34_idct_dc_add, 3, 3
|
yading@10
|
69 ; calculate DC
|
yading@10
|
70 IDCT_DC_ROUND r2
|
yading@10
|
71 pxor m1, m1
|
yading@10
|
72 movd m0, r2d
|
yading@10
|
73 psubw m1, m0
|
yading@10
|
74 packuswb m0, m0
|
yading@10
|
75 packuswb m1, m1
|
yading@10
|
76 punpcklbw m0, m0
|
yading@10
|
77 punpcklbw m1, m1
|
yading@10
|
78 punpcklwd m0, m0
|
yading@10
|
79 punpcklwd m1, m1
|
yading@10
|
80
|
yading@10
|
81 ; add DC
|
yading@10
|
82 lea r2, [r0+r1*2]
|
yading@10
|
83 movh m2, [r0]
|
yading@10
|
84 movh m3, [r0+r1]
|
yading@10
|
85 movh m4, [r2]
|
yading@10
|
86 movh m5, [r2+r1]
|
yading@10
|
87 paddusb m2, m0
|
yading@10
|
88 paddusb m3, m0
|
yading@10
|
89 paddusb m4, m0
|
yading@10
|
90 paddusb m5, m0
|
yading@10
|
91 psubusb m2, m1
|
yading@10
|
92 psubusb m3, m1
|
yading@10
|
93 psubusb m4, m1
|
yading@10
|
94 psubusb m5, m1
|
yading@10
|
95 movh [r0], m2
|
yading@10
|
96 movh [r0+r1], m3
|
yading@10
|
97 movh [r2], m4
|
yading@10
|
98 movh [r2+r1], m5
|
yading@10
|
99 RET
|
yading@10
|
100
|
yading@10
|
101 ; Load coeffs and perform row transform
|
yading@10
|
102 ; Output: coeffs in mm[0467], rounder in mm5
|
yading@10
|
103 %macro ROW_TRANSFORM 1
|
yading@10
|
104 pxor mm7, mm7
|
yading@10
|
105 mova mm0, [%1+ 0*8]
|
yading@10
|
106 mova mm1, [%1+ 1*8]
|
yading@10
|
107 mova mm2, [%1+ 2*8]
|
yading@10
|
108 mova mm3, [%1+ 3*8]
|
yading@10
|
109 mova [%1+ 0*8], mm7
|
yading@10
|
110 mova [%1+ 1*8], mm7
|
yading@10
|
111 mova [%1+ 2*8], mm7
|
yading@10
|
112 mova [%1+ 3*8], mm7
|
yading@10
|
113 mova mm4, mm0
|
yading@10
|
114 mova mm6, [pw_row_coeffs+ 0]
|
yading@10
|
115 paddsw mm0, mm2 ; b0 + b2
|
yading@10
|
116 psubsw mm4, mm2 ; b0 - b2
|
yading@10
|
117 pmullw mm0, mm6 ; *13 = z0
|
yading@10
|
118 pmullw mm4, mm6 ; *13 = z1
|
yading@10
|
119 mova mm5, mm1
|
yading@10
|
120 pmullw mm1, [pw_row_coeffs+ 8] ; b1*17
|
yading@10
|
121 pmullw mm5, [pw_row_coeffs+16] ; b1* 7
|
yading@10
|
122 mova mm7, mm3
|
yading@10
|
123 pmullw mm3, [pw_row_coeffs+ 8] ; b3*17
|
yading@10
|
124 pmullw mm7, [pw_row_coeffs+16] ; b3* 7
|
yading@10
|
125 paddsw mm1, mm7 ; z3 = b1*17 + b3* 7
|
yading@10
|
126 psubsw mm5, mm3 ; z2 = b1* 7 - b3*17
|
yading@10
|
127 mova mm7, mm0
|
yading@10
|
128 mova mm6, mm4
|
yading@10
|
129 paddsw mm0, mm1 ; z0 + z3
|
yading@10
|
130 psubsw mm7, mm1 ; z0 - z3
|
yading@10
|
131 paddsw mm4, mm5 ; z1 + z2
|
yading@10
|
132 psubsw mm6, mm5 ; z1 - z2
|
yading@10
|
133 mova mm5, [pd_512] ; 0x200
|
yading@10
|
134 %endmacro
|
yading@10
|
135
|
yading@10
|
136 ; ff_rv34_idct_add_mmxext(uint8_t *dst, ptrdiff_t stride, int16_t *block);
|
yading@10
|
137 %macro COL_TRANSFORM 4
|
yading@10
|
138 pshufw mm3, %2, 0xDD ; col. 1,3,1,3
|
yading@10
|
139 pshufw %2, %2, 0x88 ; col. 0,2,0,2
|
yading@10
|
140 pmaddwd %2, %3 ; 13*c0+13*c2 | 13*c0-13*c2 = z0 | z1
|
yading@10
|
141 pmaddwd mm3, %4 ; 17*c1+ 7*c3 | 7*c1-17*c3 = z3 | z2
|
yading@10
|
142 paddd %2, mm5
|
yading@10
|
143 pshufw mm1, %2, 01001110b ; z1 | z0
|
yading@10
|
144 pshufw mm2, mm3, 01001110b ; z2 | z3
|
yading@10
|
145 paddd %2, mm3 ; z0+z3 | z1+z2
|
yading@10
|
146 psubd mm1, mm2 ; z1-z2 | z0-z3
|
yading@10
|
147 movd mm3, %1
|
yading@10
|
148 psrad %2, 10
|
yading@10
|
149 pxor mm2, mm2
|
yading@10
|
150 psrad mm1, 10
|
yading@10
|
151 punpcklbw mm3, mm2
|
yading@10
|
152 packssdw %2, mm1
|
yading@10
|
153 paddw %2, mm3
|
yading@10
|
154 packuswb %2, %2
|
yading@10
|
155 movd %1, %2
|
yading@10
|
156 %endmacro
|
yading@10
|
157 INIT_MMX mmxext
|
yading@10
|
158 cglobal rv34_idct_add, 3,3,0, d, s, b
|
yading@10
|
159 ROW_TRANSFORM bq
|
yading@10
|
160 COL_TRANSFORM [dq], mm0, [pw_col_coeffs+ 0], [pw_col_coeffs+ 8]
|
yading@10
|
161 mova mm0, [pw_col_coeffs+ 0]
|
yading@10
|
162 COL_TRANSFORM [dq+sq], mm4, mm0, [pw_col_coeffs+ 8]
|
yading@10
|
163 mova mm4, [pw_col_coeffs+ 8]
|
yading@10
|
164 lea dq, [dq + 2*sq]
|
yading@10
|
165 COL_TRANSFORM [dq], mm6, mm0, mm4
|
yading@10
|
166 COL_TRANSFORM [dq+sq], mm7, mm0, mm4
|
yading@10
|
167 ret
|
yading@10
|
168
|
yading@10
|
169 ; ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc);
|
yading@10
|
170 INIT_XMM sse4
|
yading@10
|
171 cglobal rv34_idct_dc_add, 3, 3, 6
|
yading@10
|
172 ; load data
|
yading@10
|
173 IDCT_DC_ROUND r2
|
yading@10
|
174 pxor m1, m1
|
yading@10
|
175
|
yading@10
|
176 ; calculate DC
|
yading@10
|
177 movd m0, r2d
|
yading@10
|
178 lea r2, [r0+r1*2]
|
yading@10
|
179 movd m2, [r0]
|
yading@10
|
180 movd m3, [r0+r1]
|
yading@10
|
181 pshuflw m0, m0, 0
|
yading@10
|
182 movd m4, [r2]
|
yading@10
|
183 movd m5, [r2+r1]
|
yading@10
|
184 punpcklqdq m0, m0
|
yading@10
|
185 punpckldq m2, m3
|
yading@10
|
186 punpckldq m4, m5
|
yading@10
|
187 punpcklbw m2, m1
|
yading@10
|
188 punpcklbw m4, m1
|
yading@10
|
189 paddw m2, m0
|
yading@10
|
190 paddw m4, m0
|
yading@10
|
191 packuswb m2, m4
|
yading@10
|
192 movd [r0], m2
|
yading@10
|
193 pextrd [r0+r1], m2, 1
|
yading@10
|
194 pextrd [r2], m2, 2
|
yading@10
|
195 pextrd [r2+r1], m2, 3
|
yading@10
|
196 RET
|