yading@10
|
1 ;******************************************************************************
|
yading@10
|
2 ;*
|
yading@10
|
3 ;* Copyright (c) 2000-2001 Fabrice Bellard <fabrice@bellard.org>
|
yading@10
|
4 ;* Copyright (c) Nick Kurshev <nickols_k@mail.ru>
|
yading@10
|
5 ;* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
|
yading@10
|
6 ;* Copyright (c) 2002 Zdenek Kabelac <kabi@informatics.muni.cz>
|
yading@10
|
7 ;* Copyright (c) 2013 Daniel Kang
|
yading@10
|
8 ;*
|
yading@10
|
9 ;* MMX optimized hpel functions
|
yading@10
|
10 ;*
|
yading@10
|
11 ;* This file is part of FFmpeg.
|
yading@10
|
12 ;*
|
yading@10
|
13 ;* FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
14 ;* modify it under the terms of the GNU Lesser General Public
|
yading@10
|
15 ;* License as published by the Free Software Foundation; either
|
yading@10
|
16 ;* version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
17 ;*
|
yading@10
|
18 ;* FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
19 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
20 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
21 ;* Lesser General Public License for more details.
|
yading@10
|
22 ;*
|
yading@10
|
23 ;* You should have received a copy of the GNU Lesser General Public
|
yading@10
|
24 ;* License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
25 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
26 ;******************************************************************************
|
yading@10
|
27
|
yading@10
|
28 %include "libavutil/x86/x86util.asm"
|
yading@10
|
29
|
yading@10
|
30 SECTION_RODATA
|
yading@10
|
31 cextern pb_1
|
yading@10
|
32
|
yading@10
|
33 SECTION_TEXT
|
yading@10
|
34
|
yading@10
|
35 ; put_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
36 %macro PUT_PIXELS8_X2 0
|
yading@10
|
37 cglobal put_pixels8_x2, 4,5
|
yading@10
|
38 lea r4, [r2*2]
|
yading@10
|
39 .loop:
|
yading@10
|
40 mova m0, [r1]
|
yading@10
|
41 mova m1, [r1+r2]
|
yading@10
|
42 PAVGB m0, [r1+1]
|
yading@10
|
43 PAVGB m1, [r1+r2+1]
|
yading@10
|
44 mova [r0], m0
|
yading@10
|
45 mova [r0+r2], m1
|
yading@10
|
46 add r1, r4
|
yading@10
|
47 add r0, r4
|
yading@10
|
48 mova m0, [r1]
|
yading@10
|
49 mova m1, [r1+r2]
|
yading@10
|
50 PAVGB m0, [r1+1]
|
yading@10
|
51 PAVGB m1, [r1+r2+1]
|
yading@10
|
52 add r1, r4
|
yading@10
|
53 mova [r0], m0
|
yading@10
|
54 mova [r0+r2], m1
|
yading@10
|
55 add r0, r4
|
yading@10
|
56 sub r3d, 4
|
yading@10
|
57 jne .loop
|
yading@10
|
58 REP_RET
|
yading@10
|
59 %endmacro
|
yading@10
|
60
|
yading@10
|
61 INIT_MMX mmxext
|
yading@10
|
62 PUT_PIXELS8_X2
|
yading@10
|
63 INIT_MMX 3dnow
|
yading@10
|
64 PUT_PIXELS8_X2
|
yading@10
|
65
|
yading@10
|
66
|
yading@10
|
67 ; put_pixels16_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
68 %macro PUT_PIXELS_16 0
|
yading@10
|
69 cglobal put_pixels16_x2, 4,5
|
yading@10
|
70 lea r4, [r2*2]
|
yading@10
|
71 .loop:
|
yading@10
|
72 mova m0, [r1]
|
yading@10
|
73 mova m1, [r1+r2]
|
yading@10
|
74 mova m2, [r1+8]
|
yading@10
|
75 mova m3, [r1+r2+8]
|
yading@10
|
76 PAVGB m0, [r1+1]
|
yading@10
|
77 PAVGB m1, [r1+r2+1]
|
yading@10
|
78 PAVGB m2, [r1+9]
|
yading@10
|
79 PAVGB m3, [r1+r2+9]
|
yading@10
|
80 mova [r0], m0
|
yading@10
|
81 mova [r0+r2], m1
|
yading@10
|
82 mova [r0+8], m2
|
yading@10
|
83 mova [r0+r2+8], m3
|
yading@10
|
84 add r1, r4
|
yading@10
|
85 add r0, r4
|
yading@10
|
86 mova m0, [r1]
|
yading@10
|
87 mova m1, [r1+r2]
|
yading@10
|
88 mova m2, [r1+8]
|
yading@10
|
89 mova m3, [r1+r2+8]
|
yading@10
|
90 PAVGB m0, [r1+1]
|
yading@10
|
91 PAVGB m1, [r1+r2+1]
|
yading@10
|
92 PAVGB m2, [r1+9]
|
yading@10
|
93 PAVGB m3, [r1+r2+9]
|
yading@10
|
94 add r1, r4
|
yading@10
|
95 mova [r0], m0
|
yading@10
|
96 mova [r0+r2], m1
|
yading@10
|
97 mova [r0+8], m2
|
yading@10
|
98 mova [r0+r2+8], m3
|
yading@10
|
99 add r0, r4
|
yading@10
|
100 sub r3d, 4
|
yading@10
|
101 jne .loop
|
yading@10
|
102 REP_RET
|
yading@10
|
103 %endmacro
|
yading@10
|
104
|
yading@10
|
105 INIT_MMX mmxext
|
yading@10
|
106 PUT_PIXELS_16
|
yading@10
|
107 INIT_MMX 3dnow
|
yading@10
|
108 PUT_PIXELS_16
|
yading@10
|
109
|
yading@10
|
110
|
yading@10
|
111 ; put_no_rnd_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
112 %macro PUT_NO_RND_PIXELS8_X2 0
|
yading@10
|
113 cglobal put_no_rnd_pixels8_x2, 4,5
|
yading@10
|
114 mova m6, [pb_1]
|
yading@10
|
115 lea r4, [r2*2]
|
yading@10
|
116 .loop:
|
yading@10
|
117 mova m0, [r1]
|
yading@10
|
118 mova m2, [r1+r2]
|
yading@10
|
119 mova m1, [r1+1]
|
yading@10
|
120 mova m3, [r1+r2+1]
|
yading@10
|
121 add r1, r4
|
yading@10
|
122 psubusb m0, m6
|
yading@10
|
123 psubusb m2, m6
|
yading@10
|
124 PAVGB m0, m1
|
yading@10
|
125 PAVGB m2, m3
|
yading@10
|
126 mova [r0], m0
|
yading@10
|
127 mova [r0+r2], m2
|
yading@10
|
128 mova m0, [r1]
|
yading@10
|
129 mova m1, [r1+1]
|
yading@10
|
130 mova m2, [r1+r2]
|
yading@10
|
131 mova m3, [r1+r2+1]
|
yading@10
|
132 add r0, r4
|
yading@10
|
133 add r1, r4
|
yading@10
|
134 psubusb m0, m6
|
yading@10
|
135 psubusb m2, m6
|
yading@10
|
136 PAVGB m0, m1
|
yading@10
|
137 PAVGB m2, m3
|
yading@10
|
138 mova [r0], m0
|
yading@10
|
139 mova [r0+r2], m2
|
yading@10
|
140 add r0, r4
|
yading@10
|
141 sub r3d, 4
|
yading@10
|
142 jne .loop
|
yading@10
|
143 REP_RET
|
yading@10
|
144 %endmacro
|
yading@10
|
145
|
yading@10
|
146 INIT_MMX mmxext
|
yading@10
|
147 PUT_NO_RND_PIXELS8_X2
|
yading@10
|
148 INIT_MMX 3dnow
|
yading@10
|
149 PUT_NO_RND_PIXELS8_X2
|
yading@10
|
150
|
yading@10
|
151
|
yading@10
|
152 ; put_no_rnd_pixels8_x2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
153 %macro PUT_NO_RND_PIXELS8_X2_EXACT 0
|
yading@10
|
154 cglobal put_no_rnd_pixels8_x2_exact, 4,5
|
yading@10
|
155 lea r4, [r2*3]
|
yading@10
|
156 pcmpeqb m6, m6
|
yading@10
|
157 .loop:
|
yading@10
|
158 mova m0, [r1]
|
yading@10
|
159 mova m2, [r1+r2]
|
yading@10
|
160 mova m1, [r1+1]
|
yading@10
|
161 mova m3, [r1+r2+1]
|
yading@10
|
162 pxor m0, m6
|
yading@10
|
163 pxor m2, m6
|
yading@10
|
164 pxor m1, m6
|
yading@10
|
165 pxor m3, m6
|
yading@10
|
166 PAVGB m0, m1
|
yading@10
|
167 PAVGB m2, m3
|
yading@10
|
168 pxor m0, m6
|
yading@10
|
169 pxor m2, m6
|
yading@10
|
170 mova [r0], m0
|
yading@10
|
171 mova [r0+r2], m2
|
yading@10
|
172 mova m0, [r1+r2*2]
|
yading@10
|
173 mova m1, [r1+r2*2+1]
|
yading@10
|
174 mova m2, [r1+r4]
|
yading@10
|
175 mova m3, [r1+r4+1]
|
yading@10
|
176 pxor m0, m6
|
yading@10
|
177 pxor m1, m6
|
yading@10
|
178 pxor m2, m6
|
yading@10
|
179 pxor m3, m6
|
yading@10
|
180 PAVGB m0, m1
|
yading@10
|
181 PAVGB m2, m3
|
yading@10
|
182 pxor m0, m6
|
yading@10
|
183 pxor m2, m6
|
yading@10
|
184 mova [r0+r2*2], m0
|
yading@10
|
185 mova [r0+r4], m2
|
yading@10
|
186 lea r1, [r1+r2*4]
|
yading@10
|
187 lea r0, [r0+r2*4]
|
yading@10
|
188 sub r3d, 4
|
yading@10
|
189 jg .loop
|
yading@10
|
190 REP_RET
|
yading@10
|
191 %endmacro
|
yading@10
|
192
|
yading@10
|
193 INIT_MMX mmxext
|
yading@10
|
194 PUT_NO_RND_PIXELS8_X2_EXACT
|
yading@10
|
195 INIT_MMX 3dnow
|
yading@10
|
196 PUT_NO_RND_PIXELS8_X2_EXACT
|
yading@10
|
197
|
yading@10
|
198
|
yading@10
|
199 ; put_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
200 %macro PUT_PIXELS8_Y2 0
|
yading@10
|
201 cglobal put_pixels8_y2, 4,5
|
yading@10
|
202 lea r4, [r2*2]
|
yading@10
|
203 mova m0, [r1]
|
yading@10
|
204 sub r0, r2
|
yading@10
|
205 .loop:
|
yading@10
|
206 mova m1, [r1+r2]
|
yading@10
|
207 mova m2, [r1+r4]
|
yading@10
|
208 add r1, r4
|
yading@10
|
209 PAVGB m0, m1
|
yading@10
|
210 PAVGB m1, m2
|
yading@10
|
211 mova [r0+r2], m0
|
yading@10
|
212 mova [r0+r4], m1
|
yading@10
|
213 mova m1, [r1+r2]
|
yading@10
|
214 mova m0, [r1+r4]
|
yading@10
|
215 add r0, r4
|
yading@10
|
216 add r1, r4
|
yading@10
|
217 PAVGB m2, m1
|
yading@10
|
218 PAVGB m1, m0
|
yading@10
|
219 mova [r0+r2], m2
|
yading@10
|
220 mova [r0+r4], m1
|
yading@10
|
221 add r0, r4
|
yading@10
|
222 sub r3d, 4
|
yading@10
|
223 jne .loop
|
yading@10
|
224 REP_RET
|
yading@10
|
225 %endmacro
|
yading@10
|
226
|
yading@10
|
227 INIT_MMX mmxext
|
yading@10
|
228 PUT_PIXELS8_Y2
|
yading@10
|
229 INIT_MMX 3dnow
|
yading@10
|
230 PUT_PIXELS8_Y2
|
yading@10
|
231
|
yading@10
|
232
|
yading@10
|
233 ; put_no_rnd_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
234 %macro PUT_NO_RND_PIXELS8_Y2 0
|
yading@10
|
235 cglobal put_no_rnd_pixels8_y2, 4,5
|
yading@10
|
236 mova m6, [pb_1]
|
yading@10
|
237 lea r4, [r2+r2]
|
yading@10
|
238 mova m0, [r1]
|
yading@10
|
239 sub r0, r2
|
yading@10
|
240 .loop:
|
yading@10
|
241 mova m1, [r1+r2]
|
yading@10
|
242 mova m2, [r1+r4]
|
yading@10
|
243 add r1, r4
|
yading@10
|
244 psubusb m1, m6
|
yading@10
|
245 PAVGB m0, m1
|
yading@10
|
246 PAVGB m1, m2
|
yading@10
|
247 mova [r0+r2], m0
|
yading@10
|
248 mova [r0+r4], m1
|
yading@10
|
249 mova m1, [r1+r2]
|
yading@10
|
250 mova m0, [r1+r4]
|
yading@10
|
251 add r0, r4
|
yading@10
|
252 add r1, r4
|
yading@10
|
253 psubusb m1, m6
|
yading@10
|
254 PAVGB m2, m1
|
yading@10
|
255 PAVGB m1, m0
|
yading@10
|
256 mova [r0+r2], m2
|
yading@10
|
257 mova [r0+r4], m1
|
yading@10
|
258 add r0, r4
|
yading@10
|
259 sub r3d, 4
|
yading@10
|
260 jne .loop
|
yading@10
|
261 REP_RET
|
yading@10
|
262 %endmacro
|
yading@10
|
263
|
yading@10
|
264 INIT_MMX mmxext
|
yading@10
|
265 PUT_NO_RND_PIXELS8_Y2
|
yading@10
|
266 INIT_MMX 3dnow
|
yading@10
|
267 PUT_NO_RND_PIXELS8_Y2
|
yading@10
|
268
|
yading@10
|
269
|
yading@10
|
270 ; put_no_rnd_pixels8_y2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
271 %macro PUT_NO_RND_PIXELS8_Y2_EXACT 0
|
yading@10
|
272 cglobal put_no_rnd_pixels8_y2_exact, 4,5
|
yading@10
|
273 lea r4, [r2*3]
|
yading@10
|
274 mova m0, [r1]
|
yading@10
|
275 pcmpeqb m6, m6
|
yading@10
|
276 add r1, r2
|
yading@10
|
277 pxor m0, m6
|
yading@10
|
278 .loop:
|
yading@10
|
279 mova m1, [r1]
|
yading@10
|
280 mova m2, [r1+r2]
|
yading@10
|
281 pxor m1, m6
|
yading@10
|
282 pxor m2, m6
|
yading@10
|
283 PAVGB m0, m1
|
yading@10
|
284 PAVGB m1, m2
|
yading@10
|
285 pxor m0, m6
|
yading@10
|
286 pxor m1, m6
|
yading@10
|
287 mova [r0], m0
|
yading@10
|
288 mova [r0+r2], m1
|
yading@10
|
289 mova m1, [r1+r2*2]
|
yading@10
|
290 mova m0, [r1+r4]
|
yading@10
|
291 pxor m1, m6
|
yading@10
|
292 pxor m0, m6
|
yading@10
|
293 PAVGB m2, m1
|
yading@10
|
294 PAVGB m1, m0
|
yading@10
|
295 pxor m2, m6
|
yading@10
|
296 pxor m1, m6
|
yading@10
|
297 mova [r0+r2*2], m2
|
yading@10
|
298 mova [r0+r4], m1
|
yading@10
|
299 lea r1, [r1+r2*4]
|
yading@10
|
300 lea r0, [r0+r2*4]
|
yading@10
|
301 sub r3d, 4
|
yading@10
|
302 jg .loop
|
yading@10
|
303 REP_RET
|
yading@10
|
304 %endmacro
|
yading@10
|
305
|
yading@10
|
306 INIT_MMX mmxext
|
yading@10
|
307 PUT_NO_RND_PIXELS8_Y2_EXACT
|
yading@10
|
308 INIT_MMX 3dnow
|
yading@10
|
309 PUT_NO_RND_PIXELS8_Y2_EXACT
|
yading@10
|
310
|
yading@10
|
311
|
yading@10
|
312 ; avg_pixels8(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
313 %macro AVG_PIXELS8 0
|
yading@10
|
314 cglobal avg_pixels8, 4,5
|
yading@10
|
315 lea r4, [r2*2]
|
yading@10
|
316 .loop:
|
yading@10
|
317 mova m0, [r0]
|
yading@10
|
318 mova m1, [r0+r2]
|
yading@10
|
319 PAVGB m0, [r1]
|
yading@10
|
320 PAVGB m1, [r1+r2]
|
yading@10
|
321 mova [r0], m0
|
yading@10
|
322 mova [r0+r2], m1
|
yading@10
|
323 add r1, r4
|
yading@10
|
324 add r0, r4
|
yading@10
|
325 mova m0, [r0]
|
yading@10
|
326 mova m1, [r0+r2]
|
yading@10
|
327 PAVGB m0, [r1]
|
yading@10
|
328 PAVGB m1, [r1+r2]
|
yading@10
|
329 add r1, r4
|
yading@10
|
330 mova [r0], m0
|
yading@10
|
331 mova [r0+r2], m1
|
yading@10
|
332 add r0, r4
|
yading@10
|
333 sub r3d, 4
|
yading@10
|
334 jne .loop
|
yading@10
|
335 REP_RET
|
yading@10
|
336 %endmacro
|
yading@10
|
337
|
yading@10
|
338 INIT_MMX 3dnow
|
yading@10
|
339 AVG_PIXELS8
|
yading@10
|
340
|
yading@10
|
341
|
yading@10
|
342 ; avg_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
343 %macro AVG_PIXELS8_X2 0
|
yading@10
|
344 cglobal avg_pixels8_x2, 4,5
|
yading@10
|
345 lea r4, [r2*2]
|
yading@10
|
346 .loop:
|
yading@10
|
347 mova m0, [r1]
|
yading@10
|
348 mova m2, [r1+r2]
|
yading@10
|
349 PAVGB m0, [r1+1]
|
yading@10
|
350 PAVGB m2, [r1+r2+1]
|
yading@10
|
351 PAVGB m0, [r0]
|
yading@10
|
352 PAVGB m2, [r0+r2]
|
yading@10
|
353 add r1, r4
|
yading@10
|
354 mova [r0], m0
|
yading@10
|
355 mova [r0+r2], m2
|
yading@10
|
356 mova m0, [r1]
|
yading@10
|
357 mova m2, [r1+r2]
|
yading@10
|
358 PAVGB m0, [r1+1]
|
yading@10
|
359 PAVGB m2, [r1+r2+1]
|
yading@10
|
360 add r0, r4
|
yading@10
|
361 add r1, r4
|
yading@10
|
362 PAVGB m0, [r0]
|
yading@10
|
363 PAVGB m2, [r0+r2]
|
yading@10
|
364 mova [r0], m0
|
yading@10
|
365 mova [r0+r2], m2
|
yading@10
|
366 add r0, r4
|
yading@10
|
367 sub r3d, 4
|
yading@10
|
368 jne .loop
|
yading@10
|
369 REP_RET
|
yading@10
|
370 %endmacro
|
yading@10
|
371
|
yading@10
|
372 INIT_MMX mmxext
|
yading@10
|
373 AVG_PIXELS8_X2
|
yading@10
|
374 INIT_MMX 3dnow
|
yading@10
|
375 AVG_PIXELS8_X2
|
yading@10
|
376
|
yading@10
|
377
|
yading@10
|
378 ; avg_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
379 %macro AVG_PIXELS8_Y2 0
|
yading@10
|
380 cglobal avg_pixels8_y2, 4,5
|
yading@10
|
381 lea r4, [r2*2]
|
yading@10
|
382 mova m0, [r1]
|
yading@10
|
383 sub r0, r2
|
yading@10
|
384 .loop:
|
yading@10
|
385 mova m1, [r1+r2]
|
yading@10
|
386 mova m2, [r1+r4]
|
yading@10
|
387 add r1, r4
|
yading@10
|
388 PAVGB m0, m1
|
yading@10
|
389 PAVGB m1, m2
|
yading@10
|
390 mova m3, [r0+r2]
|
yading@10
|
391 mova m4, [r0+r4]
|
yading@10
|
392 PAVGB m0, m3
|
yading@10
|
393 PAVGB m1, m4
|
yading@10
|
394 mova [r0+r2], m0
|
yading@10
|
395 mova [r0+r4], m1
|
yading@10
|
396 mova m1, [r1+r2]
|
yading@10
|
397 mova m0, [r1+r4]
|
yading@10
|
398 PAVGB m2, m1
|
yading@10
|
399 PAVGB m1, m0
|
yading@10
|
400 add r0, r4
|
yading@10
|
401 add r1, r4
|
yading@10
|
402 mova m3, [r0+r2]
|
yading@10
|
403 mova m4, [r0+r4]
|
yading@10
|
404 PAVGB m2, m3
|
yading@10
|
405 PAVGB m1, m4
|
yading@10
|
406 mova [r0+r2], m2
|
yading@10
|
407 mova [r0+r4], m1
|
yading@10
|
408 add r0, r4
|
yading@10
|
409 sub r3d, 4
|
yading@10
|
410 jne .loop
|
yading@10
|
411 REP_RET
|
yading@10
|
412 %endmacro
|
yading@10
|
413
|
yading@10
|
414 INIT_MMX mmxext
|
yading@10
|
415 AVG_PIXELS8_Y2
|
yading@10
|
416 INIT_MMX 3dnow
|
yading@10
|
417 AVG_PIXELS8_Y2
|
yading@10
|
418
|
yading@10
|
419
|
yading@10
|
420 ; avg_pixels8_xy2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
421 %macro AVG_PIXELS8_XY2 0
|
yading@10
|
422 cglobal avg_pixels8_xy2, 4,5
|
yading@10
|
423 mova m6, [pb_1]
|
yading@10
|
424 lea r4, [r2*2]
|
yading@10
|
425 mova m0, [r1]
|
yading@10
|
426 pavgb m0, [r1+1]
|
yading@10
|
427 .loop:
|
yading@10
|
428 mova m2, [r1+r4]
|
yading@10
|
429 mova m1, [r1+r2]
|
yading@10
|
430 psubusb m2, m6
|
yading@10
|
431 pavgb m1, [r1+r2+1]
|
yading@10
|
432 pavgb m2, [r1+r4+1]
|
yading@10
|
433 add r1, r4
|
yading@10
|
434 pavgb m0, m1
|
yading@10
|
435 pavgb m1, m2
|
yading@10
|
436 pavgb m0, [r0]
|
yading@10
|
437 pavgb m1, [r0+r2]
|
yading@10
|
438 mova [r0], m0
|
yading@10
|
439 mova [r0+r2], m1
|
yading@10
|
440 mova m1, [r1+r2]
|
yading@10
|
441 mova m0, [r1+r4]
|
yading@10
|
442 pavgb m1, [r1+r2+1]
|
yading@10
|
443 pavgb m0, [r1+r4+1]
|
yading@10
|
444 add r0, r4
|
yading@10
|
445 add r1, r4
|
yading@10
|
446 pavgb m2, m1
|
yading@10
|
447 pavgb m1, m0
|
yading@10
|
448 pavgb m2, [r0]
|
yading@10
|
449 pavgb m1, [r0+r2]
|
yading@10
|
450 mova [r0], m2
|
yading@10
|
451 mova [r0+r2], m1
|
yading@10
|
452 add r0, r4
|
yading@10
|
453 sub r3d, 4
|
yading@10
|
454 jne .loop
|
yading@10
|
455 REP_RET
|
yading@10
|
456 %endmacro
|
yading@10
|
457
|
yading@10
|
458 INIT_MMX mmxext
|
yading@10
|
459 AVG_PIXELS8_XY2
|
yading@10
|
460 INIT_MMX 3dnow
|
yading@10
|
461 AVG_PIXELS8_XY2
|