yading@10
|
1 ;******************************************************************************
|
yading@10
|
2 ;* Core video DSP functions
|
yading@10
|
3 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
yading@10
|
4 ;*
|
yading@10
|
5 ;* This file is part of FFmpeg.
|
yading@10
|
6 ;*
|
yading@10
|
7 ;* FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 ;* modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 ;* License as published by the Free Software Foundation; either
|
yading@10
|
10 ;* version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 ;*
|
yading@10
|
12 ;* FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 ;* Lesser General Public License for more details.
|
yading@10
|
16 ;*
|
yading@10
|
17 ;* You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 ;* License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 ;******************************************************************************
|
yading@10
|
21
|
yading@10
|
22 %include "libavutil/x86/x86util.asm"
|
yading@10
|
23
|
yading@10
|
24 SECTION .text
|
yading@10
|
25
|
yading@10
|
26 ; void ff_emu_edge_core(uint8_t *buf, const uint8_t *src, x86_reg linesize,
|
yading@10
|
27 ; x86_reg start_y, x86_reg end_y, x86_reg block_h,
|
yading@10
|
28 ; x86_reg start_x, x86_reg end_x, x86_reg block_w);
|
yading@10
|
29 ;
|
yading@10
|
30 ; The actual function itself is below. It basically wraps a very simple
|
yading@10
|
31 ; w = end_x - start_x
|
yading@10
|
32 ; if (w) {
|
yading@10
|
33 ; if (w > 22) {
|
yading@10
|
34 ; jump to the slow loop functions
|
yading@10
|
35 ; } else {
|
yading@10
|
36 ; jump to the fast loop functions
|
yading@10
|
37 ; }
|
yading@10
|
38 ; }
|
yading@10
|
39 ;
|
yading@10
|
40 ; ... and then the same for left/right extend also. See below for loop
|
yading@10
|
41 ; function implementations. Fast are fixed-width, slow is variable-width
|
yading@10
|
42
|
yading@10
|
43 %macro EMU_EDGE_FUNC 0
|
yading@10
|
44 %if ARCH_X86_64
|
yading@10
|
45 %define w_reg r7
|
yading@10
|
46 cglobal emu_edge_core, 6, 9, 1
|
yading@10
|
47 mov r8, r5 ; save block_h
|
yading@10
|
48 %else
|
yading@10
|
49 %define w_reg r6
|
yading@10
|
50 cglobal emu_edge_core, 2, 7, 0
|
yading@10
|
51 mov r4, r4m ; end_y
|
yading@10
|
52 mov r5, r5m ; block_h
|
yading@10
|
53 %endif
|
yading@10
|
54
|
yading@10
|
55 ; start with vertical extend (top/bottom) and body pixel copy
|
yading@10
|
56 mov w_reg, r7m
|
yading@10
|
57 sub w_reg, r6m ; w = start_x - end_x
|
yading@10
|
58 sub r5, r4
|
yading@10
|
59 %if ARCH_X86_64
|
yading@10
|
60 sub r4, r3
|
yading@10
|
61 %else
|
yading@10
|
62 sub r4, dword r3m
|
yading@10
|
63 %endif
|
yading@10
|
64 cmp w_reg, 22
|
yading@10
|
65 jg .slow_v_extend_loop
|
yading@10
|
66 %if ARCH_X86_32
|
yading@10
|
67 mov r2, r2m ; linesize
|
yading@10
|
68 %endif
|
yading@10
|
69 sal w_reg, 7 ; w * 128
|
yading@10
|
70 %ifdef PIC
|
yading@10
|
71 lea rax, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)]
|
yading@10
|
72 add w_reg, rax
|
yading@10
|
73 %else
|
yading@10
|
74 lea w_reg, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)+w_reg]
|
yading@10
|
75 %endif
|
yading@10
|
76 call w_reg ; fast top extend, body copy and bottom extend
|
yading@10
|
77 .v_extend_end:
|
yading@10
|
78
|
yading@10
|
79 ; horizontal extend (left/right)
|
yading@10
|
80 mov w_reg, r6m ; start_x
|
yading@10
|
81 sub r0, w_reg
|
yading@10
|
82 %if ARCH_X86_64
|
yading@10
|
83 mov r3, r0 ; backup of buf+block_h*linesize
|
yading@10
|
84 mov r5, r8
|
yading@10
|
85 %else
|
yading@10
|
86 mov r0m, r0 ; backup of buf+block_h*linesize
|
yading@10
|
87 mov r5, r5m
|
yading@10
|
88 %endif
|
yading@10
|
89 test w_reg, w_reg
|
yading@10
|
90 jz .right_extend
|
yading@10
|
91 cmp w_reg, 22
|
yading@10
|
92 jg .slow_left_extend_loop
|
yading@10
|
93 mov r1, w_reg
|
yading@10
|
94 dec w_reg
|
yading@10
|
95 ; FIXME we can do a if size == 1 here if that makes any speed difference, test me
|
yading@10
|
96 sar w_reg, 1
|
yading@10
|
97 sal w_reg, 6
|
yading@10
|
98 ; r0=buf+block_h*linesize,r7(64)/r6(32)=start_x offset for funcs
|
yading@10
|
99 ; r6(rax)/r3(ebx)=val,r2=linesize,r1=start_x,r5=block_h
|
yading@10
|
100 %ifdef PIC
|
yading@10
|
101 lea rax, [.emuedge_extend_left_2]
|
yading@10
|
102 add w_reg, rax
|
yading@10
|
103 %else
|
yading@10
|
104 lea w_reg, [.emuedge_extend_left_2+w_reg]
|
yading@10
|
105 %endif
|
yading@10
|
106 call w_reg
|
yading@10
|
107
|
yading@10
|
108 ; now r3(64)/r0(32)=buf,r2=linesize,r8/r5=block_h,r6/r3=val, r7/r6=end_x, r1=block_w
|
yading@10
|
109 .right_extend:
|
yading@10
|
110 %if ARCH_X86_32
|
yading@10
|
111 mov r0, r0m
|
yading@10
|
112 mov r5, r5m
|
yading@10
|
113 %endif
|
yading@10
|
114 mov w_reg, r7m ; end_x
|
yading@10
|
115 mov r1, r8m ; block_w
|
yading@10
|
116 mov r4, r1
|
yading@10
|
117 sub r1, w_reg
|
yading@10
|
118 jz .h_extend_end ; if (end_x == block_w) goto h_extend_end
|
yading@10
|
119 cmp r1, 22
|
yading@10
|
120 jg .slow_right_extend_loop
|
yading@10
|
121 dec r1
|
yading@10
|
122 ; FIXME we can do a if size == 1 here if that makes any speed difference, test me
|
yading@10
|
123 sar r1, 1
|
yading@10
|
124 sal r1, 6
|
yading@10
|
125 %ifdef PIC
|
yading@10
|
126 lea rax, [.emuedge_extend_right_2]
|
yading@10
|
127 add r1, rax
|
yading@10
|
128 %else
|
yading@10
|
129 lea r1, [.emuedge_extend_right_2+r1]
|
yading@10
|
130 %endif
|
yading@10
|
131 call r1
|
yading@10
|
132 .h_extend_end:
|
yading@10
|
133 RET
|
yading@10
|
134
|
yading@10
|
135 %if ARCH_X86_64
|
yading@10
|
136 %define vall al
|
yading@10
|
137 %define valh ah
|
yading@10
|
138 %define valw ax
|
yading@10
|
139 %define valw2 r7w
|
yading@10
|
140 %define valw3 r3w
|
yading@10
|
141 %if WIN64
|
yading@10
|
142 %define valw4 r7w
|
yading@10
|
143 %else ; unix64
|
yading@10
|
144 %define valw4 r3w
|
yading@10
|
145 %endif
|
yading@10
|
146 %define vald eax
|
yading@10
|
147 %else
|
yading@10
|
148 %define vall bl
|
yading@10
|
149 %define valh bh
|
yading@10
|
150 %define valw bx
|
yading@10
|
151 %define valw2 r6w
|
yading@10
|
152 %define valw3 valw2
|
yading@10
|
153 %define valw4 valw3
|
yading@10
|
154 %define vald ebx
|
yading@10
|
155 %define stack_offset 0x14
|
yading@10
|
156 %endif
|
yading@10
|
157
|
yading@10
|
158 %endmacro
|
yading@10
|
159
|
yading@10
|
160 ; macro to read/write a horizontal number of pixels (%2) to/from registers
|
yading@10
|
161 ; on x86-64, - fills xmm0-15 for consecutive sets of 16 pixels
|
yading@10
|
162 ; - if (%2 & 15 == 8) fills the last 8 bytes into rax
|
yading@10
|
163 ; - else if (%2 & 8) fills 8 bytes into mm0
|
yading@10
|
164 ; - if (%2 & 7 == 4) fills the last 4 bytes into rax
|
yading@10
|
165 ; - else if (%2 & 4) fills 4 bytes into mm0-1
|
yading@10
|
166 ; - if (%2 & 3 == 3) fills 2 bytes into r7/r3, and 1 into eax
|
yading@10
|
167 ; (note that we're using r3 for body/bottom because it's a shorter
|
yading@10
|
168 ; opcode, and then the loop fits in 128 bytes)
|
yading@10
|
169 ; - else fills remaining bytes into rax
|
yading@10
|
170 ; on x86-32, - fills mm0-7 for consecutive sets of 8 pixels
|
yading@10
|
171 ; - if (%2 & 7 == 4) fills 4 bytes into ebx
|
yading@10
|
172 ; - else if (%2 & 4) fills 4 bytes into mm0-7
|
yading@10
|
173 ; - if (%2 & 3 == 3) fills 2 bytes into r6, and 1 into ebx
|
yading@10
|
174 ; - else fills remaining bytes into ebx
|
yading@10
|
175 ; writing data out is in the same way
|
yading@10
|
176 %macro READ_NUM_BYTES 2
|
yading@10
|
177 %assign %%src_off 0 ; offset in source buffer
|
yading@10
|
178 %assign %%smidx 0 ; mmx register idx
|
yading@10
|
179 %assign %%sxidx 0 ; xmm register idx
|
yading@10
|
180
|
yading@10
|
181 %if cpuflag(sse)
|
yading@10
|
182 %rep %2/16
|
yading@10
|
183 movups xmm %+ %%sxidx, [r1+%%src_off]
|
yading@10
|
184 %assign %%src_off %%src_off+16
|
yading@10
|
185 %assign %%sxidx %%sxidx+1
|
yading@10
|
186 %endrep ; %2/16
|
yading@10
|
187 %endif
|
yading@10
|
188
|
yading@10
|
189 %if ARCH_X86_64
|
yading@10
|
190 %if (%2-%%src_off) == 8
|
yading@10
|
191 mov rax, [r1+%%src_off]
|
yading@10
|
192 %assign %%src_off %%src_off+8
|
yading@10
|
193 %endif ; (%2-%%src_off) == 8
|
yading@10
|
194 %endif ; x86-64
|
yading@10
|
195
|
yading@10
|
196 %rep (%2-%%src_off)/8
|
yading@10
|
197 movq mm %+ %%smidx, [r1+%%src_off]
|
yading@10
|
198 %assign %%src_off %%src_off+8
|
yading@10
|
199 %assign %%smidx %%smidx+1
|
yading@10
|
200 %endrep ; (%2-%%dst_off)/8
|
yading@10
|
201
|
yading@10
|
202 %if (%2-%%src_off) == 4
|
yading@10
|
203 mov vald, [r1+%%src_off]
|
yading@10
|
204 %elif (%2-%%src_off) & 4
|
yading@10
|
205 movd mm %+ %%smidx, [r1+%%src_off]
|
yading@10
|
206 %assign %%src_off %%src_off+4
|
yading@10
|
207 %endif ; (%2-%%src_off) ==/& 4
|
yading@10
|
208
|
yading@10
|
209 %if (%2-%%src_off) == 1
|
yading@10
|
210 mov vall, [r1+%%src_off]
|
yading@10
|
211 %elif (%2-%%src_off) == 2
|
yading@10
|
212 mov valw, [r1+%%src_off]
|
yading@10
|
213 %elif (%2-%%src_off) == 3
|
yading@10
|
214 %ifidn %1, top
|
yading@10
|
215 mov valw2, [r1+%%src_off]
|
yading@10
|
216 %elifidn %1, body
|
yading@10
|
217 mov valw3, [r1+%%src_off]
|
yading@10
|
218 %elifidn %1, bottom
|
yading@10
|
219 mov valw4, [r1+%%src_off]
|
yading@10
|
220 %endif ; %1 ==/!= top
|
yading@10
|
221 mov vall, [r1+%%src_off+2]
|
yading@10
|
222 %endif ; (%2-%%src_off) == 1/2/3
|
yading@10
|
223 %endmacro ; READ_NUM_BYTES
|
yading@10
|
224
|
yading@10
|
225 %macro WRITE_NUM_BYTES 2
|
yading@10
|
226 %assign %%dst_off 0 ; offset in destination buffer
|
yading@10
|
227 %assign %%dmidx 0 ; mmx register idx
|
yading@10
|
228 %assign %%dxidx 0 ; xmm register idx
|
yading@10
|
229
|
yading@10
|
230 %if cpuflag(sse)
|
yading@10
|
231 %rep %2/16
|
yading@10
|
232 movups [r0+%%dst_off], xmm %+ %%dxidx
|
yading@10
|
233 %assign %%dst_off %%dst_off+16
|
yading@10
|
234 %assign %%dxidx %%dxidx+1
|
yading@10
|
235 %endrep ; %2/16
|
yading@10
|
236 %endif
|
yading@10
|
237
|
yading@10
|
238 %if ARCH_X86_64
|
yading@10
|
239 %if (%2-%%dst_off) == 8
|
yading@10
|
240 mov [r0+%%dst_off], rax
|
yading@10
|
241 %assign %%dst_off %%dst_off+8
|
yading@10
|
242 %endif ; (%2-%%dst_off) == 8
|
yading@10
|
243 %endif ; x86-64
|
yading@10
|
244
|
yading@10
|
245 %rep (%2-%%dst_off)/8
|
yading@10
|
246 movq [r0+%%dst_off], mm %+ %%dmidx
|
yading@10
|
247 %assign %%dst_off %%dst_off+8
|
yading@10
|
248 %assign %%dmidx %%dmidx+1
|
yading@10
|
249 %endrep ; (%2-%%dst_off)/8
|
yading@10
|
250
|
yading@10
|
251 %if (%2-%%dst_off) == 4
|
yading@10
|
252 mov [r0+%%dst_off], vald
|
yading@10
|
253 %elif (%2-%%dst_off) & 4
|
yading@10
|
254 movd [r0+%%dst_off], mm %+ %%dmidx
|
yading@10
|
255 %assign %%dst_off %%dst_off+4
|
yading@10
|
256 %endif ; (%2-%%dst_off) ==/& 4
|
yading@10
|
257
|
yading@10
|
258 %if (%2-%%dst_off) == 1
|
yading@10
|
259 mov [r0+%%dst_off], vall
|
yading@10
|
260 %elif (%2-%%dst_off) == 2
|
yading@10
|
261 mov [r0+%%dst_off], valw
|
yading@10
|
262 %elif (%2-%%dst_off) == 3
|
yading@10
|
263 %ifidn %1, top
|
yading@10
|
264 mov [r0+%%dst_off], valw2
|
yading@10
|
265 %elifidn %1, body
|
yading@10
|
266 mov [r0+%%dst_off], valw3
|
yading@10
|
267 %elifidn %1, bottom
|
yading@10
|
268 mov [r0+%%dst_off], valw4
|
yading@10
|
269 %endif ; %1 ==/!= top
|
yading@10
|
270 mov [r0+%%dst_off+2], vall
|
yading@10
|
271 %endif ; (%2-%%dst_off) == 1/2/3
|
yading@10
|
272 %endmacro ; WRITE_NUM_BYTES
|
yading@10
|
273
|
yading@10
|
274 ; vertical top/bottom extend and body copy fast loops
|
yading@10
|
275 ; these are function pointers to set-width line copy functions, i.e.
|
yading@10
|
276 ; they read a fixed number of pixels into set registers, and write
|
yading@10
|
277 ; those out into the destination buffer
|
yading@10
|
278 ; r0=buf,r1=src,r2=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
|
yading@10
|
279 ; r6(eax/64)/r3(ebx/32)=val_reg
|
yading@10
|
280 %macro VERTICAL_EXTEND 0
|
yading@10
|
281 %assign %%n 1
|
yading@10
|
282 %rep 22
|
yading@10
|
283 ALIGN 128
|
yading@10
|
284 .emuedge_v_extend_ %+ %%n:
|
yading@10
|
285 ; extend pixels above body
|
yading@10
|
286 %if ARCH_X86_64
|
yading@10
|
287 test r3 , r3 ; if (!start_y)
|
yading@10
|
288 jz .emuedge_copy_body_ %+ %%n %+ _loop ; goto body
|
yading@10
|
289 %else ; ARCH_X86_32
|
yading@10
|
290 cmp dword r3m, 0
|
yading@10
|
291 je .emuedge_copy_body_ %+ %%n %+ _loop
|
yading@10
|
292 %endif ; ARCH_X86_64/32
|
yading@10
|
293 READ_NUM_BYTES top, %%n ; read bytes
|
yading@10
|
294 .emuedge_extend_top_ %+ %%n %+ _loop: ; do {
|
yading@10
|
295 WRITE_NUM_BYTES top, %%n ; write bytes
|
yading@10
|
296 add r0 , r2 ; dst += linesize
|
yading@10
|
297 %if ARCH_X86_64
|
yading@10
|
298 dec r3d
|
yading@10
|
299 %else ; ARCH_X86_32
|
yading@10
|
300 dec dword r3m
|
yading@10
|
301 %endif ; ARCH_X86_64/32
|
yading@10
|
302 jnz .emuedge_extend_top_ %+ %%n %+ _loop ; } while (--start_y)
|
yading@10
|
303
|
yading@10
|
304 ; copy body pixels
|
yading@10
|
305 .emuedge_copy_body_ %+ %%n %+ _loop: ; do {
|
yading@10
|
306 READ_NUM_BYTES body, %%n ; read bytes
|
yading@10
|
307 WRITE_NUM_BYTES body, %%n ; write bytes
|
yading@10
|
308 add r0 , r2 ; dst += linesize
|
yading@10
|
309 add r1 , r2 ; src += linesize
|
yading@10
|
310 dec r4d
|
yading@10
|
311 jnz .emuedge_copy_body_ %+ %%n %+ _loop ; } while (--end_y)
|
yading@10
|
312
|
yading@10
|
313 ; copy bottom pixels
|
yading@10
|
314 test r5 , r5 ; if (!block_h)
|
yading@10
|
315 jz .emuedge_v_extend_end_ %+ %%n ; goto end
|
yading@10
|
316 sub r1 , r2 ; src -= linesize
|
yading@10
|
317 READ_NUM_BYTES bottom, %%n ; read bytes
|
yading@10
|
318 .emuedge_extend_bottom_ %+ %%n %+ _loop: ; do {
|
yading@10
|
319 WRITE_NUM_BYTES bottom, %%n ; write bytes
|
yading@10
|
320 add r0 , r2 ; dst += linesize
|
yading@10
|
321 dec r5d
|
yading@10
|
322 jnz .emuedge_extend_bottom_ %+ %%n %+ _loop ; } while (--block_h)
|
yading@10
|
323
|
yading@10
|
324 .emuedge_v_extend_end_ %+ %%n:
|
yading@10
|
325 %if ARCH_X86_64
|
yading@10
|
326 ret
|
yading@10
|
327 %else ; ARCH_X86_32
|
yading@10
|
328 rep ret
|
yading@10
|
329 %endif ; ARCH_X86_64/32
|
yading@10
|
330 %assign %%n %%n+1
|
yading@10
|
331 %endrep
|
yading@10
|
332 %endmacro VERTICAL_EXTEND
|
yading@10
|
333
|
yading@10
|
334 ; left/right (horizontal) fast extend functions
|
yading@10
|
335 ; these are essentially identical to the vertical extend ones above,
|
yading@10
|
336 ; just left/right separated because number of pixels to extend is
|
yading@10
|
337 ; obviously not the same on both sides.
|
yading@10
|
338 ; for reading, pixels are placed in eax (x86-64) or ebx (x86-64) in the
|
yading@10
|
339 ; lowest two bytes of the register (so val*0x0101), and are splatted
|
yading@10
|
340 ; into each byte of mm0 as well if n_pixels >= 8
|
yading@10
|
341
|
yading@10
|
342 %macro READ_V_PIXEL 2
|
yading@10
|
343 mov vall, %2
|
yading@10
|
344 mov valh, vall
|
yading@10
|
345 %if %1 >= 8
|
yading@10
|
346 movd mm0, vald
|
yading@10
|
347 %if cpuflag(mmxext)
|
yading@10
|
348 pshufw mm0, mm0, 0
|
yading@10
|
349 %else ; mmx
|
yading@10
|
350 punpcklwd mm0, mm0
|
yading@10
|
351 punpckldq mm0, mm0
|
yading@10
|
352 %endif ; sse
|
yading@10
|
353 %endif ; %1 >= 8
|
yading@10
|
354 %endmacro
|
yading@10
|
355
|
yading@10
|
356 %macro WRITE_V_PIXEL 2
|
yading@10
|
357 %assign %%dst_off 0
|
yading@10
|
358 %rep %1/8
|
yading@10
|
359 movq [%2+%%dst_off], mm0
|
yading@10
|
360 %assign %%dst_off %%dst_off+8
|
yading@10
|
361 %endrep
|
yading@10
|
362 %if %1 & 4
|
yading@10
|
363 %if %1 >= 8
|
yading@10
|
364 movd [%2+%%dst_off], mm0
|
yading@10
|
365 %else ; %1 < 8
|
yading@10
|
366 mov [%2+%%dst_off] , valw
|
yading@10
|
367 mov [%2+%%dst_off+2], valw
|
yading@10
|
368 %endif ; %1 >=/< 8
|
yading@10
|
369 %assign %%dst_off %%dst_off+4
|
yading@10
|
370 %endif ; %1 & 4
|
yading@10
|
371 %if %1&2
|
yading@10
|
372 mov [%2+%%dst_off], valw
|
yading@10
|
373 %endif ; %1 & 2
|
yading@10
|
374 %endmacro
|
yading@10
|
375
|
yading@10
|
376 ; r0=buf+block_h*linesize, r1=start_x, r2=linesize, r5=block_h, r6/r3=val
|
yading@10
|
377 %macro LEFT_EXTEND 0
|
yading@10
|
378 %assign %%n 2
|
yading@10
|
379 %rep 11
|
yading@10
|
380 ALIGN 64
|
yading@10
|
381 .emuedge_extend_left_ %+ %%n: ; do {
|
yading@10
|
382 sub r0, r2 ; dst -= linesize
|
yading@10
|
383 READ_V_PIXEL %%n, [r0+r1] ; read pixels
|
yading@10
|
384 WRITE_V_PIXEL %%n, r0 ; write pixels
|
yading@10
|
385 dec r5
|
yading@10
|
386 jnz .emuedge_extend_left_ %+ %%n ; } while (--block_h)
|
yading@10
|
387 %if ARCH_X86_64
|
yading@10
|
388 ret
|
yading@10
|
389 %else ; ARCH_X86_32
|
yading@10
|
390 rep ret
|
yading@10
|
391 %endif ; ARCH_X86_64/32
|
yading@10
|
392 %assign %%n %%n+2
|
yading@10
|
393 %endrep
|
yading@10
|
394 %endmacro ; LEFT_EXTEND
|
yading@10
|
395
|
yading@10
|
396 ; r3/r0=buf+block_h*linesize, r2=linesize, r8/r5=block_h, r0/r6=end_x, r6/r3=val
|
yading@10
|
397 %macro RIGHT_EXTEND 0
|
yading@10
|
398 %assign %%n 2
|
yading@10
|
399 %rep 11
|
yading@10
|
400 ALIGN 64
|
yading@10
|
401 .emuedge_extend_right_ %+ %%n: ; do {
|
yading@10
|
402 %if ARCH_X86_64
|
yading@10
|
403 sub r3, r2 ; dst -= linesize
|
yading@10
|
404 READ_V_PIXEL %%n, [r3+w_reg-1] ; read pixels
|
yading@10
|
405 WRITE_V_PIXEL %%n, r3+r4-%%n ; write pixels
|
yading@10
|
406 dec r8
|
yading@10
|
407 %else ; ARCH_X86_32
|
yading@10
|
408 sub r0, r2 ; dst -= linesize
|
yading@10
|
409 READ_V_PIXEL %%n, [r0+w_reg-1] ; read pixels
|
yading@10
|
410 WRITE_V_PIXEL %%n, r0+r4-%%n ; write pixels
|
yading@10
|
411 dec r5
|
yading@10
|
412 %endif ; ARCH_X86_64/32
|
yading@10
|
413 jnz .emuedge_extend_right_ %+ %%n ; } while (--block_h)
|
yading@10
|
414 %if ARCH_X86_64
|
yading@10
|
415 ret
|
yading@10
|
416 %else ; ARCH_X86_32
|
yading@10
|
417 rep ret
|
yading@10
|
418 %endif ; ARCH_X86_64/32
|
yading@10
|
419 %assign %%n %%n+2
|
yading@10
|
420 %endrep
|
yading@10
|
421
|
yading@10
|
422 %if ARCH_X86_32
|
yading@10
|
423 %define stack_offset 0x10
|
yading@10
|
424 %endif
|
yading@10
|
425 %endmacro ; RIGHT_EXTEND
|
yading@10
|
426
|
yading@10
|
427 ; below follow the "slow" copy/extend functions, these act on a non-fixed
|
yading@10
|
428 ; width specified in a register, and run a loop to copy the full amount
|
yading@10
|
429 ; of bytes. They are optimized for copying of large amounts of pixels per
|
yading@10
|
430 ; line, so they unconditionally splat data into mm registers to copy 8
|
yading@10
|
431 ; bytes per loop iteration. It could be considered to use xmm for x86-64
|
yading@10
|
432 ; also, but I haven't optimized this as much (i.e. FIXME)
|
yading@10
|
433 %macro V_COPY_NPX 4-5
|
yading@10
|
434 %if %0 == 4
|
yading@10
|
435 test w_reg, %4
|
yading@10
|
436 jz .%1_skip_%4_px
|
yading@10
|
437 %else ; %0 == 5
|
yading@10
|
438 .%1_%4_px_loop:
|
yading@10
|
439 %endif
|
yading@10
|
440 %3 %2, [r1+cnt_reg]
|
yading@10
|
441 %3 [r0+cnt_reg], %2
|
yading@10
|
442 add cnt_reg, %4
|
yading@10
|
443 %if %0 == 5
|
yading@10
|
444 sub w_reg, %4
|
yading@10
|
445 test w_reg, %5
|
yading@10
|
446 jnz .%1_%4_px_loop
|
yading@10
|
447 %endif
|
yading@10
|
448 .%1_skip_%4_px:
|
yading@10
|
449 %endmacro
|
yading@10
|
450
|
yading@10
|
451 %macro V_COPY_ROW 2
|
yading@10
|
452 %ifidn %1, bottom
|
yading@10
|
453 sub r1, linesize
|
yading@10
|
454 %endif
|
yading@10
|
455 .%1_copy_loop:
|
yading@10
|
456 xor cnt_reg, cnt_reg
|
yading@10
|
457 %if notcpuflag(sse)
|
yading@10
|
458 %define linesize r2m
|
yading@10
|
459 V_COPY_NPX %1, mm0, movq, 8, 0xFFFFFFF8
|
yading@10
|
460 %else ; sse
|
yading@10
|
461 V_COPY_NPX %1, xmm0, movups, 16, 0xFFFFFFF0
|
yading@10
|
462 %if ARCH_X86_64
|
yading@10
|
463 %define linesize r2
|
yading@10
|
464 V_COPY_NPX %1, rax , mov, 8
|
yading@10
|
465 %else ; ARCH_X86_32
|
yading@10
|
466 %define linesize r2m
|
yading@10
|
467 V_COPY_NPX %1, mm0, movq, 8
|
yading@10
|
468 %endif ; ARCH_X86_64/32
|
yading@10
|
469 %endif ; sse
|
yading@10
|
470 V_COPY_NPX %1, vald, mov, 4
|
yading@10
|
471 V_COPY_NPX %1, valw, mov, 2
|
yading@10
|
472 V_COPY_NPX %1, vall, mov, 1
|
yading@10
|
473 mov w_reg, cnt_reg
|
yading@10
|
474 %ifidn %1, body
|
yading@10
|
475 add r1, linesize
|
yading@10
|
476 %endif
|
yading@10
|
477 add r0, linesize
|
yading@10
|
478 dec %2
|
yading@10
|
479 jnz .%1_copy_loop
|
yading@10
|
480 %endmacro
|
yading@10
|
481
|
yading@10
|
482 %macro SLOW_V_EXTEND 0
|
yading@10
|
483 .slow_v_extend_loop:
|
yading@10
|
484 ; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
|
yading@10
|
485 ; r8(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r7(64)/r6(32)=w=end_x-start_x
|
yading@10
|
486 %if ARCH_X86_64
|
yading@10
|
487 push r8 ; save old value of block_h
|
yading@10
|
488 test r3, r3
|
yading@10
|
489 %define cnt_reg r8
|
yading@10
|
490 jz .do_body_copy ; if (!start_y) goto do_body_copy
|
yading@10
|
491 V_COPY_ROW top, r3
|
yading@10
|
492 %else
|
yading@10
|
493 cmp dword r3m, 0
|
yading@10
|
494 %define cnt_reg r2
|
yading@10
|
495 je .do_body_copy ; if (!start_y) goto do_body_copy
|
yading@10
|
496 V_COPY_ROW top, dword r3m
|
yading@10
|
497 %endif
|
yading@10
|
498
|
yading@10
|
499 .do_body_copy:
|
yading@10
|
500 V_COPY_ROW body, r4
|
yading@10
|
501
|
yading@10
|
502 %if ARCH_X86_64
|
yading@10
|
503 pop r8 ; restore old value of block_h
|
yading@10
|
504 %define cnt_reg r3
|
yading@10
|
505 %endif
|
yading@10
|
506 test r5, r5
|
yading@10
|
507 %if ARCH_X86_64
|
yading@10
|
508 jz .v_extend_end
|
yading@10
|
509 %else
|
yading@10
|
510 jz .skip_bottom_extend
|
yading@10
|
511 %endif
|
yading@10
|
512 V_COPY_ROW bottom, r5
|
yading@10
|
513 %if ARCH_X86_32
|
yading@10
|
514 .skip_bottom_extend:
|
yading@10
|
515 mov r2, r2m
|
yading@10
|
516 %endif
|
yading@10
|
517 jmp .v_extend_end
|
yading@10
|
518 %endmacro
|
yading@10
|
519
|
yading@10
|
520 %macro SLOW_LEFT_EXTEND 0
|
yading@10
|
521 .slow_left_extend_loop:
|
yading@10
|
522 ; r0=buf+block_h*linesize,r2=linesize,r6(64)/r3(32)=val,r5=block_h,r4=cntr,r7/r6=start_x
|
yading@10
|
523 mov r4, 8
|
yading@10
|
524 sub r0, linesize
|
yading@10
|
525 READ_V_PIXEL 8, [r0+w_reg]
|
yading@10
|
526 .left_extend_8px_loop:
|
yading@10
|
527 movq [r0+r4-8], mm0
|
yading@10
|
528 add r4, 8
|
yading@10
|
529 cmp r4, w_reg
|
yading@10
|
530 jle .left_extend_8px_loop
|
yading@10
|
531 sub r4, 8
|
yading@10
|
532 cmp r4, w_reg
|
yading@10
|
533 jge .left_extend_loop_end
|
yading@10
|
534 .left_extend_2px_loop:
|
yading@10
|
535 mov [r0+r4], valw
|
yading@10
|
536 add r4, 2
|
yading@10
|
537 cmp r4, w_reg
|
yading@10
|
538 jl .left_extend_2px_loop
|
yading@10
|
539 .left_extend_loop_end:
|
yading@10
|
540 dec r5
|
yading@10
|
541 jnz .slow_left_extend_loop
|
yading@10
|
542 %if ARCH_X86_32
|
yading@10
|
543 mov r2, r2m
|
yading@10
|
544 %endif
|
yading@10
|
545 jmp .right_extend
|
yading@10
|
546 %endmacro
|
yading@10
|
547
|
yading@10
|
548 %macro SLOW_RIGHT_EXTEND 0
|
yading@10
|
549 .slow_right_extend_loop:
|
yading@10
|
550 ; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r8(64)/r5(32)=block_h,
|
yading@10
|
551 ; r7(64)/r6(32)=end_x,r6/r3=val,r1=cntr
|
yading@10
|
552 %if ARCH_X86_64
|
yading@10
|
553 %define buf_reg r3
|
yading@10
|
554 %define bh_reg r8
|
yading@10
|
555 %else
|
yading@10
|
556 %define buf_reg r0
|
yading@10
|
557 %define bh_reg r5
|
yading@10
|
558 %endif
|
yading@10
|
559 lea r1, [r4-8]
|
yading@10
|
560 sub buf_reg, linesize
|
yading@10
|
561 READ_V_PIXEL 8, [buf_reg+w_reg-1]
|
yading@10
|
562 .right_extend_8px_loop:
|
yading@10
|
563 movq [buf_reg+r1], mm0
|
yading@10
|
564 sub r1, 8
|
yading@10
|
565 cmp r1, w_reg
|
yading@10
|
566 jge .right_extend_8px_loop
|
yading@10
|
567 add r1, 8
|
yading@10
|
568 cmp r1, w_reg
|
yading@10
|
569 je .right_extend_loop_end
|
yading@10
|
570 .right_extend_2px_loop:
|
yading@10
|
571 sub r1, 2
|
yading@10
|
572 mov [buf_reg+r1], valw
|
yading@10
|
573 cmp r1, w_reg
|
yading@10
|
574 jg .right_extend_2px_loop
|
yading@10
|
575 .right_extend_loop_end:
|
yading@10
|
576 dec bh_reg
|
yading@10
|
577 jnz .slow_right_extend_loop
|
yading@10
|
578 jmp .h_extend_end
|
yading@10
|
579 %endmacro
|
yading@10
|
580
|
yading@10
|
581 %macro emu_edge 1
|
yading@10
|
582 INIT_XMM %1
|
yading@10
|
583 EMU_EDGE_FUNC
|
yading@10
|
584 VERTICAL_EXTEND
|
yading@10
|
585 LEFT_EXTEND
|
yading@10
|
586 RIGHT_EXTEND
|
yading@10
|
587 SLOW_V_EXTEND
|
yading@10
|
588 SLOW_LEFT_EXTEND
|
yading@10
|
589 SLOW_RIGHT_EXTEND
|
yading@10
|
590 %endmacro
|
yading@10
|
591
|
yading@10
|
592 emu_edge sse
|
yading@10
|
593 %if ARCH_X86_32
|
yading@10
|
594 emu_edge mmx
|
yading@10
|
595 %endif
|
yading@10
|
596
|
yading@10
|
597 %macro PREFETCH_FN 1
|
yading@10
|
598 cglobal prefetch, 3, 3, 0, buf, stride, h
|
yading@10
|
599 .loop:
|
yading@10
|
600 %1 [bufq]
|
yading@10
|
601 add bufq, strideq
|
yading@10
|
602 dec hd
|
yading@10
|
603 jg .loop
|
yading@10
|
604 REP_RET
|
yading@10
|
605 %endmacro
|
yading@10
|
606
|
yading@10
|
607 INIT_MMX mmxext
|
yading@10
|
608 PREFETCH_FN prefetcht0
|
yading@10
|
609 %if ARCH_X86_32
|
yading@10
|
610 INIT_MMX 3dnow
|
yading@10
|
611 PREFETCH_FN prefetch
|
yading@10
|
612 %endif
|