yading@10: ;****************************************************************************** yading@10: ;* Core video DSP functions yading@10: ;* Copyright (c) 2012 Ronald S. Bultje yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION .text yading@10: yading@10: ; void ff_emu_edge_core(uint8_t *buf, const uint8_t *src, x86_reg linesize, yading@10: ; x86_reg start_y, x86_reg end_y, x86_reg block_h, yading@10: ; x86_reg start_x, x86_reg end_x, x86_reg block_w); yading@10: ; yading@10: ; The actual function itself is below. It basically wraps a very simple yading@10: ; w = end_x - start_x yading@10: ; if (w) { yading@10: ; if (w > 22) { yading@10: ; jump to the slow loop functions yading@10: ; } else { yading@10: ; jump to the fast loop functions yading@10: ; } yading@10: ; } yading@10: ; yading@10: ; ... and then the same for left/right extend also. See below for loop yading@10: ; function implementations. Fast are fixed-width, slow is variable-width yading@10: yading@10: %macro EMU_EDGE_FUNC 0 yading@10: %if ARCH_X86_64 yading@10: %define w_reg r7 yading@10: cglobal emu_edge_core, 6, 9, 1 yading@10: mov r8, r5 ; save block_h yading@10: %else yading@10: %define w_reg r6 yading@10: cglobal emu_edge_core, 2, 7, 0 yading@10: mov r4, r4m ; end_y yading@10: mov r5, r5m ; block_h yading@10: %endif yading@10: yading@10: ; start with vertical extend (top/bottom) and body pixel copy yading@10: mov w_reg, r7m yading@10: sub w_reg, r6m ; w = start_x - end_x yading@10: sub r5, r4 yading@10: %if ARCH_X86_64 yading@10: sub r4, r3 yading@10: %else yading@10: sub r4, dword r3m yading@10: %endif yading@10: cmp w_reg, 22 yading@10: jg .slow_v_extend_loop yading@10: %if ARCH_X86_32 yading@10: mov r2, r2m ; linesize yading@10: %endif yading@10: sal w_reg, 7 ; w * 128 yading@10: %ifdef PIC yading@10: lea rax, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)] yading@10: add w_reg, rax yading@10: %else yading@10: lea w_reg, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)+w_reg] yading@10: %endif yading@10: call w_reg ; fast top extend, body copy and bottom extend yading@10: .v_extend_end: yading@10: yading@10: ; horizontal extend (left/right) yading@10: mov w_reg, r6m ; start_x yading@10: sub r0, w_reg yading@10: %if ARCH_X86_64 yading@10: mov r3, r0 ; backup of buf+block_h*linesize yading@10: mov r5, r8 yading@10: %else yading@10: mov r0m, r0 ; backup of buf+block_h*linesize yading@10: mov r5, r5m yading@10: %endif yading@10: test w_reg, w_reg yading@10: jz .right_extend yading@10: cmp w_reg, 22 yading@10: jg .slow_left_extend_loop yading@10: mov r1, w_reg yading@10: dec w_reg yading@10: ; FIXME we can do a if size == 1 here if that makes any speed difference, test me yading@10: sar w_reg, 1 yading@10: sal w_reg, 6 yading@10: ; r0=buf+block_h*linesize,r7(64)/r6(32)=start_x offset for funcs yading@10: ; r6(rax)/r3(ebx)=val,r2=linesize,r1=start_x,r5=block_h yading@10: %ifdef PIC yading@10: lea rax, [.emuedge_extend_left_2] yading@10: add w_reg, rax yading@10: %else yading@10: lea w_reg, [.emuedge_extend_left_2+w_reg] yading@10: %endif yading@10: call w_reg yading@10: yading@10: ; now r3(64)/r0(32)=buf,r2=linesize,r8/r5=block_h,r6/r3=val, r7/r6=end_x, r1=block_w yading@10: .right_extend: yading@10: %if ARCH_X86_32 yading@10: mov r0, r0m yading@10: mov r5, r5m yading@10: %endif yading@10: mov w_reg, r7m ; end_x yading@10: mov r1, r8m ; block_w yading@10: mov r4, r1 yading@10: sub r1, w_reg yading@10: jz .h_extend_end ; if (end_x == block_w) goto h_extend_end yading@10: cmp r1, 22 yading@10: jg .slow_right_extend_loop yading@10: dec r1 yading@10: ; FIXME we can do a if size == 1 here if that makes any speed difference, test me yading@10: sar r1, 1 yading@10: sal r1, 6 yading@10: %ifdef PIC yading@10: lea rax, [.emuedge_extend_right_2] yading@10: add r1, rax yading@10: %else yading@10: lea r1, [.emuedge_extend_right_2+r1] yading@10: %endif yading@10: call r1 yading@10: .h_extend_end: yading@10: RET yading@10: yading@10: %if ARCH_X86_64 yading@10: %define vall al yading@10: %define valh ah yading@10: %define valw ax yading@10: %define valw2 r7w yading@10: %define valw3 r3w yading@10: %if WIN64 yading@10: %define valw4 r7w yading@10: %else ; unix64 yading@10: %define valw4 r3w yading@10: %endif yading@10: %define vald eax yading@10: %else yading@10: %define vall bl yading@10: %define valh bh yading@10: %define valw bx yading@10: %define valw2 r6w yading@10: %define valw3 valw2 yading@10: %define valw4 valw3 yading@10: %define vald ebx yading@10: %define stack_offset 0x14 yading@10: %endif yading@10: yading@10: %endmacro yading@10: yading@10: ; macro to read/write a horizontal number of pixels (%2) to/from registers yading@10: ; on x86-64, - fills xmm0-15 for consecutive sets of 16 pixels yading@10: ; - if (%2 & 15 == 8) fills the last 8 bytes into rax yading@10: ; - else if (%2 & 8) fills 8 bytes into mm0 yading@10: ; - if (%2 & 7 == 4) fills the last 4 bytes into rax yading@10: ; - else if (%2 & 4) fills 4 bytes into mm0-1 yading@10: ; - if (%2 & 3 == 3) fills 2 bytes into r7/r3, and 1 into eax yading@10: ; (note that we're using r3 for body/bottom because it's a shorter yading@10: ; opcode, and then the loop fits in 128 bytes) yading@10: ; - else fills remaining bytes into rax yading@10: ; on x86-32, - fills mm0-7 for consecutive sets of 8 pixels yading@10: ; - if (%2 & 7 == 4) fills 4 bytes into ebx yading@10: ; - else if (%2 & 4) fills 4 bytes into mm0-7 yading@10: ; - if (%2 & 3 == 3) fills 2 bytes into r6, and 1 into ebx yading@10: ; - else fills remaining bytes into ebx yading@10: ; writing data out is in the same way yading@10: %macro READ_NUM_BYTES 2 yading@10: %assign %%src_off 0 ; offset in source buffer yading@10: %assign %%smidx 0 ; mmx register idx yading@10: %assign %%sxidx 0 ; xmm register idx yading@10: yading@10: %if cpuflag(sse) yading@10: %rep %2/16 yading@10: movups xmm %+ %%sxidx, [r1+%%src_off] yading@10: %assign %%src_off %%src_off+16 yading@10: %assign %%sxidx %%sxidx+1 yading@10: %endrep ; %2/16 yading@10: %endif yading@10: yading@10: %if ARCH_X86_64 yading@10: %if (%2-%%src_off) == 8 yading@10: mov rax, [r1+%%src_off] yading@10: %assign %%src_off %%src_off+8 yading@10: %endif ; (%2-%%src_off) == 8 yading@10: %endif ; x86-64 yading@10: yading@10: %rep (%2-%%src_off)/8 yading@10: movq mm %+ %%smidx, [r1+%%src_off] yading@10: %assign %%src_off %%src_off+8 yading@10: %assign %%smidx %%smidx+1 yading@10: %endrep ; (%2-%%dst_off)/8 yading@10: yading@10: %if (%2-%%src_off) == 4 yading@10: mov vald, [r1+%%src_off] yading@10: %elif (%2-%%src_off) & 4 yading@10: movd mm %+ %%smidx, [r1+%%src_off] yading@10: %assign %%src_off %%src_off+4 yading@10: %endif ; (%2-%%src_off) ==/& 4 yading@10: yading@10: %if (%2-%%src_off) == 1 yading@10: mov vall, [r1+%%src_off] yading@10: %elif (%2-%%src_off) == 2 yading@10: mov valw, [r1+%%src_off] yading@10: %elif (%2-%%src_off) == 3 yading@10: %ifidn %1, top yading@10: mov valw2, [r1+%%src_off] yading@10: %elifidn %1, body yading@10: mov valw3, [r1+%%src_off] yading@10: %elifidn %1, bottom yading@10: mov valw4, [r1+%%src_off] yading@10: %endif ; %1 ==/!= top yading@10: mov vall, [r1+%%src_off+2] yading@10: %endif ; (%2-%%src_off) == 1/2/3 yading@10: %endmacro ; READ_NUM_BYTES yading@10: yading@10: %macro WRITE_NUM_BYTES 2 yading@10: %assign %%dst_off 0 ; offset in destination buffer yading@10: %assign %%dmidx 0 ; mmx register idx yading@10: %assign %%dxidx 0 ; xmm register idx yading@10: yading@10: %if cpuflag(sse) yading@10: %rep %2/16 yading@10: movups [r0+%%dst_off], xmm %+ %%dxidx yading@10: %assign %%dst_off %%dst_off+16 yading@10: %assign %%dxidx %%dxidx+1 yading@10: %endrep ; %2/16 yading@10: %endif yading@10: yading@10: %if ARCH_X86_64 yading@10: %if (%2-%%dst_off) == 8 yading@10: mov [r0+%%dst_off], rax yading@10: %assign %%dst_off %%dst_off+8 yading@10: %endif ; (%2-%%dst_off) == 8 yading@10: %endif ; x86-64 yading@10: yading@10: %rep (%2-%%dst_off)/8 yading@10: movq [r0+%%dst_off], mm %+ %%dmidx yading@10: %assign %%dst_off %%dst_off+8 yading@10: %assign %%dmidx %%dmidx+1 yading@10: %endrep ; (%2-%%dst_off)/8 yading@10: yading@10: %if (%2-%%dst_off) == 4 yading@10: mov [r0+%%dst_off], vald yading@10: %elif (%2-%%dst_off) & 4 yading@10: movd [r0+%%dst_off], mm %+ %%dmidx yading@10: %assign %%dst_off %%dst_off+4 yading@10: %endif ; (%2-%%dst_off) ==/& 4 yading@10: yading@10: %if (%2-%%dst_off) == 1 yading@10: mov [r0+%%dst_off], vall yading@10: %elif (%2-%%dst_off) == 2 yading@10: mov [r0+%%dst_off], valw yading@10: %elif (%2-%%dst_off) == 3 yading@10: %ifidn %1, top yading@10: mov [r0+%%dst_off], valw2 yading@10: %elifidn %1, body yading@10: mov [r0+%%dst_off], valw3 yading@10: %elifidn %1, bottom yading@10: mov [r0+%%dst_off], valw4 yading@10: %endif ; %1 ==/!= top yading@10: mov [r0+%%dst_off+2], vall yading@10: %endif ; (%2-%%dst_off) == 1/2/3 yading@10: %endmacro ; WRITE_NUM_BYTES yading@10: yading@10: ; vertical top/bottom extend and body copy fast loops yading@10: ; these are function pointers to set-width line copy functions, i.e. yading@10: ; they read a fixed number of pixels into set registers, and write yading@10: ; those out into the destination buffer yading@10: ; r0=buf,r1=src,r2=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h yading@10: ; r6(eax/64)/r3(ebx/32)=val_reg yading@10: %macro VERTICAL_EXTEND 0 yading@10: %assign %%n 1 yading@10: %rep 22 yading@10: ALIGN 128 yading@10: .emuedge_v_extend_ %+ %%n: yading@10: ; extend pixels above body yading@10: %if ARCH_X86_64 yading@10: test r3 , r3 ; if (!start_y) yading@10: jz .emuedge_copy_body_ %+ %%n %+ _loop ; goto body yading@10: %else ; ARCH_X86_32 yading@10: cmp dword r3m, 0 yading@10: je .emuedge_copy_body_ %+ %%n %+ _loop yading@10: %endif ; ARCH_X86_64/32 yading@10: READ_NUM_BYTES top, %%n ; read bytes yading@10: .emuedge_extend_top_ %+ %%n %+ _loop: ; do { yading@10: WRITE_NUM_BYTES top, %%n ; write bytes yading@10: add r0 , r2 ; dst += linesize yading@10: %if ARCH_X86_64 yading@10: dec r3d yading@10: %else ; ARCH_X86_32 yading@10: dec dword r3m yading@10: %endif ; ARCH_X86_64/32 yading@10: jnz .emuedge_extend_top_ %+ %%n %+ _loop ; } while (--start_y) yading@10: yading@10: ; copy body pixels yading@10: .emuedge_copy_body_ %+ %%n %+ _loop: ; do { yading@10: READ_NUM_BYTES body, %%n ; read bytes yading@10: WRITE_NUM_BYTES body, %%n ; write bytes yading@10: add r0 , r2 ; dst += linesize yading@10: add r1 , r2 ; src += linesize yading@10: dec r4d yading@10: jnz .emuedge_copy_body_ %+ %%n %+ _loop ; } while (--end_y) yading@10: yading@10: ; copy bottom pixels yading@10: test r5 , r5 ; if (!block_h) yading@10: jz .emuedge_v_extend_end_ %+ %%n ; goto end yading@10: sub r1 , r2 ; src -= linesize yading@10: READ_NUM_BYTES bottom, %%n ; read bytes yading@10: .emuedge_extend_bottom_ %+ %%n %+ _loop: ; do { yading@10: WRITE_NUM_BYTES bottom, %%n ; write bytes yading@10: add r0 , r2 ; dst += linesize yading@10: dec r5d yading@10: jnz .emuedge_extend_bottom_ %+ %%n %+ _loop ; } while (--block_h) yading@10: yading@10: .emuedge_v_extend_end_ %+ %%n: yading@10: %if ARCH_X86_64 yading@10: ret yading@10: %else ; ARCH_X86_32 yading@10: rep ret yading@10: %endif ; ARCH_X86_64/32 yading@10: %assign %%n %%n+1 yading@10: %endrep yading@10: %endmacro VERTICAL_EXTEND yading@10: yading@10: ; left/right (horizontal) fast extend functions yading@10: ; these are essentially identical to the vertical extend ones above, yading@10: ; just left/right separated because number of pixels to extend is yading@10: ; obviously not the same on both sides. yading@10: ; for reading, pixels are placed in eax (x86-64) or ebx (x86-64) in the yading@10: ; lowest two bytes of the register (so val*0x0101), and are splatted yading@10: ; into each byte of mm0 as well if n_pixels >= 8 yading@10: yading@10: %macro READ_V_PIXEL 2 yading@10: mov vall, %2 yading@10: mov valh, vall yading@10: %if %1 >= 8 yading@10: movd mm0, vald yading@10: %if cpuflag(mmxext) yading@10: pshufw mm0, mm0, 0 yading@10: %else ; mmx yading@10: punpcklwd mm0, mm0 yading@10: punpckldq mm0, mm0 yading@10: %endif ; sse yading@10: %endif ; %1 >= 8 yading@10: %endmacro yading@10: yading@10: %macro WRITE_V_PIXEL 2 yading@10: %assign %%dst_off 0 yading@10: %rep %1/8 yading@10: movq [%2+%%dst_off], mm0 yading@10: %assign %%dst_off %%dst_off+8 yading@10: %endrep yading@10: %if %1 & 4 yading@10: %if %1 >= 8 yading@10: movd [%2+%%dst_off], mm0 yading@10: %else ; %1 < 8 yading@10: mov [%2+%%dst_off] , valw yading@10: mov [%2+%%dst_off+2], valw yading@10: %endif ; %1 >=/< 8 yading@10: %assign %%dst_off %%dst_off+4 yading@10: %endif ; %1 & 4 yading@10: %if %1&2 yading@10: mov [%2+%%dst_off], valw yading@10: %endif ; %1 & 2 yading@10: %endmacro yading@10: yading@10: ; r0=buf+block_h*linesize, r1=start_x, r2=linesize, r5=block_h, r6/r3=val yading@10: %macro LEFT_EXTEND 0 yading@10: %assign %%n 2 yading@10: %rep 11 yading@10: ALIGN 64 yading@10: .emuedge_extend_left_ %+ %%n: ; do { yading@10: sub r0, r2 ; dst -= linesize yading@10: READ_V_PIXEL %%n, [r0+r1] ; read pixels yading@10: WRITE_V_PIXEL %%n, r0 ; write pixels yading@10: dec r5 yading@10: jnz .emuedge_extend_left_ %+ %%n ; } while (--block_h) yading@10: %if ARCH_X86_64 yading@10: ret yading@10: %else ; ARCH_X86_32 yading@10: rep ret yading@10: %endif ; ARCH_X86_64/32 yading@10: %assign %%n %%n+2 yading@10: %endrep yading@10: %endmacro ; LEFT_EXTEND yading@10: yading@10: ; r3/r0=buf+block_h*linesize, r2=linesize, r8/r5=block_h, r0/r6=end_x, r6/r3=val yading@10: %macro RIGHT_EXTEND 0 yading@10: %assign %%n 2 yading@10: %rep 11 yading@10: ALIGN 64 yading@10: .emuedge_extend_right_ %+ %%n: ; do { yading@10: %if ARCH_X86_64 yading@10: sub r3, r2 ; dst -= linesize yading@10: READ_V_PIXEL %%n, [r3+w_reg-1] ; read pixels yading@10: WRITE_V_PIXEL %%n, r3+r4-%%n ; write pixels yading@10: dec r8 yading@10: %else ; ARCH_X86_32 yading@10: sub r0, r2 ; dst -= linesize yading@10: READ_V_PIXEL %%n, [r0+w_reg-1] ; read pixels yading@10: WRITE_V_PIXEL %%n, r0+r4-%%n ; write pixels yading@10: dec r5 yading@10: %endif ; ARCH_X86_64/32 yading@10: jnz .emuedge_extend_right_ %+ %%n ; } while (--block_h) yading@10: %if ARCH_X86_64 yading@10: ret yading@10: %else ; ARCH_X86_32 yading@10: rep ret yading@10: %endif ; ARCH_X86_64/32 yading@10: %assign %%n %%n+2 yading@10: %endrep yading@10: yading@10: %if ARCH_X86_32 yading@10: %define stack_offset 0x10 yading@10: %endif yading@10: %endmacro ; RIGHT_EXTEND yading@10: yading@10: ; below follow the "slow" copy/extend functions, these act on a non-fixed yading@10: ; width specified in a register, and run a loop to copy the full amount yading@10: ; of bytes. They are optimized for copying of large amounts of pixels per yading@10: ; line, so they unconditionally splat data into mm registers to copy 8 yading@10: ; bytes per loop iteration. It could be considered to use xmm for x86-64 yading@10: ; also, but I haven't optimized this as much (i.e. FIXME) yading@10: %macro V_COPY_NPX 4-5 yading@10: %if %0 == 4 yading@10: test w_reg, %4 yading@10: jz .%1_skip_%4_px yading@10: %else ; %0 == 5 yading@10: .%1_%4_px_loop: yading@10: %endif yading@10: %3 %2, [r1+cnt_reg] yading@10: %3 [r0+cnt_reg], %2 yading@10: add cnt_reg, %4 yading@10: %if %0 == 5 yading@10: sub w_reg, %4 yading@10: test w_reg, %5 yading@10: jnz .%1_%4_px_loop yading@10: %endif yading@10: .%1_skip_%4_px: yading@10: %endmacro yading@10: yading@10: %macro V_COPY_ROW 2 yading@10: %ifidn %1, bottom yading@10: sub r1, linesize yading@10: %endif yading@10: .%1_copy_loop: yading@10: xor cnt_reg, cnt_reg yading@10: %if notcpuflag(sse) yading@10: %define linesize r2m yading@10: V_COPY_NPX %1, mm0, movq, 8, 0xFFFFFFF8 yading@10: %else ; sse yading@10: V_COPY_NPX %1, xmm0, movups, 16, 0xFFFFFFF0 yading@10: %if ARCH_X86_64 yading@10: %define linesize r2 yading@10: V_COPY_NPX %1, rax , mov, 8 yading@10: %else ; ARCH_X86_32 yading@10: %define linesize r2m yading@10: V_COPY_NPX %1, mm0, movq, 8 yading@10: %endif ; ARCH_X86_64/32 yading@10: %endif ; sse yading@10: V_COPY_NPX %1, vald, mov, 4 yading@10: V_COPY_NPX %1, valw, mov, 2 yading@10: V_COPY_NPX %1, vall, mov, 1 yading@10: mov w_reg, cnt_reg yading@10: %ifidn %1, body yading@10: add r1, linesize yading@10: %endif yading@10: add r0, linesize yading@10: dec %2 yading@10: jnz .%1_copy_loop yading@10: %endmacro yading@10: yading@10: %macro SLOW_V_EXTEND 0 yading@10: .slow_v_extend_loop: yading@10: ; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h yading@10: ; r8(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r7(64)/r6(32)=w=end_x-start_x yading@10: %if ARCH_X86_64 yading@10: push r8 ; save old value of block_h yading@10: test r3, r3 yading@10: %define cnt_reg r8 yading@10: jz .do_body_copy ; if (!start_y) goto do_body_copy yading@10: V_COPY_ROW top, r3 yading@10: %else yading@10: cmp dword r3m, 0 yading@10: %define cnt_reg r2 yading@10: je .do_body_copy ; if (!start_y) goto do_body_copy yading@10: V_COPY_ROW top, dword r3m yading@10: %endif yading@10: yading@10: .do_body_copy: yading@10: V_COPY_ROW body, r4 yading@10: yading@10: %if ARCH_X86_64 yading@10: pop r8 ; restore old value of block_h yading@10: %define cnt_reg r3 yading@10: %endif yading@10: test r5, r5 yading@10: %if ARCH_X86_64 yading@10: jz .v_extend_end yading@10: %else yading@10: jz .skip_bottom_extend yading@10: %endif yading@10: V_COPY_ROW bottom, r5 yading@10: %if ARCH_X86_32 yading@10: .skip_bottom_extend: yading@10: mov r2, r2m yading@10: %endif yading@10: jmp .v_extend_end yading@10: %endmacro yading@10: yading@10: %macro SLOW_LEFT_EXTEND 0 yading@10: .slow_left_extend_loop: yading@10: ; r0=buf+block_h*linesize,r2=linesize,r6(64)/r3(32)=val,r5=block_h,r4=cntr,r7/r6=start_x yading@10: mov r4, 8 yading@10: sub r0, linesize yading@10: READ_V_PIXEL 8, [r0+w_reg] yading@10: .left_extend_8px_loop: yading@10: movq [r0+r4-8], mm0 yading@10: add r4, 8 yading@10: cmp r4, w_reg yading@10: jle .left_extend_8px_loop yading@10: sub r4, 8 yading@10: cmp r4, w_reg yading@10: jge .left_extend_loop_end yading@10: .left_extend_2px_loop: yading@10: mov [r0+r4], valw yading@10: add r4, 2 yading@10: cmp r4, w_reg yading@10: jl .left_extend_2px_loop yading@10: .left_extend_loop_end: yading@10: dec r5 yading@10: jnz .slow_left_extend_loop yading@10: %if ARCH_X86_32 yading@10: mov r2, r2m yading@10: %endif yading@10: jmp .right_extend yading@10: %endmacro yading@10: yading@10: %macro SLOW_RIGHT_EXTEND 0 yading@10: .slow_right_extend_loop: yading@10: ; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r8(64)/r5(32)=block_h, yading@10: ; r7(64)/r6(32)=end_x,r6/r3=val,r1=cntr yading@10: %if ARCH_X86_64 yading@10: %define buf_reg r3 yading@10: %define bh_reg r8 yading@10: %else yading@10: %define buf_reg r0 yading@10: %define bh_reg r5 yading@10: %endif yading@10: lea r1, [r4-8] yading@10: sub buf_reg, linesize yading@10: READ_V_PIXEL 8, [buf_reg+w_reg-1] yading@10: .right_extend_8px_loop: yading@10: movq [buf_reg+r1], mm0 yading@10: sub r1, 8 yading@10: cmp r1, w_reg yading@10: jge .right_extend_8px_loop yading@10: add r1, 8 yading@10: cmp r1, w_reg yading@10: je .right_extend_loop_end yading@10: .right_extend_2px_loop: yading@10: sub r1, 2 yading@10: mov [buf_reg+r1], valw yading@10: cmp r1, w_reg yading@10: jg .right_extend_2px_loop yading@10: .right_extend_loop_end: yading@10: dec bh_reg yading@10: jnz .slow_right_extend_loop yading@10: jmp .h_extend_end yading@10: %endmacro yading@10: yading@10: %macro emu_edge 1 yading@10: INIT_XMM %1 yading@10: EMU_EDGE_FUNC yading@10: VERTICAL_EXTEND yading@10: LEFT_EXTEND yading@10: RIGHT_EXTEND yading@10: SLOW_V_EXTEND yading@10: SLOW_LEFT_EXTEND yading@10: SLOW_RIGHT_EXTEND yading@10: %endmacro yading@10: yading@10: emu_edge sse yading@10: %if ARCH_X86_32 yading@10: emu_edge mmx yading@10: %endif yading@10: yading@10: %macro PREFETCH_FN 1 yading@10: cglobal prefetch, 3, 3, 0, buf, stride, h yading@10: .loop: yading@10: %1 [bufq] yading@10: add bufq, strideq yading@10: dec hd yading@10: jg .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: PREFETCH_FN prefetcht0 yading@10: %if ARCH_X86_32 yading@10: INIT_MMX 3dnow yading@10: PREFETCH_FN prefetch yading@10: %endif