yading@10: ;****************************************************************************** yading@10: ;* MMX optimized DSP utils yading@10: ;* Copyright (c) 2008 Loren Merritt yading@10: ;* Copyright (c) 2003-2013 Michael Niedermayer yading@10: ;* Copyright (c) 2013 Daniel Kang yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: pb_f: times 16 db 15 yading@10: pb_zzzzzzzz77777777: times 8 db -1 yading@10: pb_7: times 8 db 7 yading@10: pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11 yading@10: pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13 yading@10: pb_revwords: SHUFFLE_MASK_W 7, 6, 5, 4, 3, 2, 1, 0 yading@10: pd_16384: times 4 dd 16384 yading@10: pb_bswap32: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 yading@10: yading@10: SECTION_TEXT yading@10: yading@10: %macro SCALARPRODUCT 0 yading@10: ; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order) yading@10: cglobal scalarproduct_int16, 3,3,3, v1, v2, order yading@10: shl orderq, 1 yading@10: add v1q, orderq yading@10: add v2q, orderq yading@10: neg orderq yading@10: pxor m2, m2 yading@10: .loop: yading@10: movu m0, [v1q + orderq] yading@10: movu m1, [v1q + orderq + mmsize] yading@10: pmaddwd m0, [v2q + orderq] yading@10: pmaddwd m1, [v2q + orderq + mmsize] yading@10: paddd m2, m0 yading@10: paddd m2, m1 yading@10: add orderq, mmsize*2 yading@10: jl .loop yading@10: %if mmsize == 16 yading@10: movhlps m0, m2 yading@10: paddd m2, m0 yading@10: pshuflw m0, m2, 0x4e yading@10: %else yading@10: pshufw m0, m2, 0x4e yading@10: %endif yading@10: paddd m2, m0 yading@10: movd eax, m2 yading@10: RET yading@10: yading@10: ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) yading@10: cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul yading@10: shl orderq, 1 yading@10: movd m7, mulm yading@10: %if mmsize == 16 yading@10: pshuflw m7, m7, 0 yading@10: punpcklqdq m7, m7 yading@10: %else yading@10: pshufw m7, m7, 0 yading@10: %endif yading@10: pxor m6, m6 yading@10: add v1q, orderq yading@10: add v2q, orderq yading@10: add v3q, orderq yading@10: neg orderq yading@10: .loop: yading@10: movu m0, [v2q + orderq] yading@10: movu m1, [v2q + orderq + mmsize] yading@10: mova m4, [v1q + orderq] yading@10: mova m5, [v1q + orderq + mmsize] yading@10: movu m2, [v3q + orderq] yading@10: movu m3, [v3q + orderq + mmsize] yading@10: pmaddwd m0, m4 yading@10: pmaddwd m1, m5 yading@10: pmullw m2, m7 yading@10: pmullw m3, m7 yading@10: paddd m6, m0 yading@10: paddd m6, m1 yading@10: paddw m2, m4 yading@10: paddw m3, m5 yading@10: mova [v1q + orderq], m2 yading@10: mova [v1q + orderq + mmsize], m3 yading@10: add orderq, mmsize*2 yading@10: jl .loop yading@10: %if mmsize == 16 yading@10: movhlps m0, m6 yading@10: paddd m6, m0 yading@10: pshuflw m0, m6, 0x4e yading@10: %else yading@10: pshufw m0, m6, 0x4e yading@10: %endif yading@10: paddd m6, m0 yading@10: movd eax, m6 yading@10: RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: SCALARPRODUCT yading@10: INIT_XMM sse2 yading@10: SCALARPRODUCT yading@10: yading@10: %macro SCALARPRODUCT_LOOP 1 yading@10: align 16 yading@10: .loop%1: yading@10: sub orderq, mmsize*2 yading@10: %if %1 yading@10: mova m1, m4 yading@10: mova m4, [v2q + orderq] yading@10: mova m0, [v2q + orderq + mmsize] yading@10: palignr m1, m0, %1 yading@10: palignr m0, m4, %1 yading@10: mova m3, m5 yading@10: mova m5, [v3q + orderq] yading@10: mova m2, [v3q + orderq + mmsize] yading@10: palignr m3, m2, %1 yading@10: palignr m2, m5, %1 yading@10: %else yading@10: mova m0, [v2q + orderq] yading@10: mova m1, [v2q + orderq + mmsize] yading@10: mova m2, [v3q + orderq] yading@10: mova m3, [v3q + orderq + mmsize] yading@10: %endif yading@10: %define t0 [v1q + orderq] yading@10: %define t1 [v1q + orderq + mmsize] yading@10: %if ARCH_X86_64 yading@10: mova m8, t0 yading@10: mova m9, t1 yading@10: %define t0 m8 yading@10: %define t1 m9 yading@10: %endif yading@10: pmaddwd m0, t0 yading@10: pmaddwd m1, t1 yading@10: pmullw m2, m7 yading@10: pmullw m3, m7 yading@10: paddw m2, t0 yading@10: paddw m3, t1 yading@10: paddd m6, m0 yading@10: paddd m6, m1 yading@10: mova [v1q + orderq], m2 yading@10: mova [v1q + orderq + mmsize], m3 yading@10: jg .loop%1 yading@10: %if %1 yading@10: jmp .end yading@10: %endif yading@10: %endmacro yading@10: yading@10: ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul) yading@10: INIT_XMM ssse3 yading@10: cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul yading@10: shl orderq, 1 yading@10: movd m7, mulm yading@10: pshuflw m7, m7, 0 yading@10: punpcklqdq m7, m7 yading@10: pxor m6, m6 yading@10: mov r4d, v2d yading@10: and r4d, 15 yading@10: and v2q, ~15 yading@10: and v3q, ~15 yading@10: mova m4, [v2q + orderq] yading@10: mova m5, [v3q + orderq] yading@10: ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable) yading@10: cmp r4d, 0 yading@10: je .loop0 yading@10: cmp r4d, 2 yading@10: je .loop2 yading@10: cmp r4d, 4 yading@10: je .loop4 yading@10: cmp r4d, 6 yading@10: je .loop6 yading@10: cmp r4d, 8 yading@10: je .loop8 yading@10: cmp r4d, 10 yading@10: je .loop10 yading@10: cmp r4d, 12 yading@10: je .loop12 yading@10: SCALARPRODUCT_LOOP 14 yading@10: SCALARPRODUCT_LOOP 12 yading@10: SCALARPRODUCT_LOOP 10 yading@10: SCALARPRODUCT_LOOP 8 yading@10: SCALARPRODUCT_LOOP 6 yading@10: SCALARPRODUCT_LOOP 4 yading@10: SCALARPRODUCT_LOOP 2 yading@10: SCALARPRODUCT_LOOP 0 yading@10: .end: yading@10: movhlps m0, m6 yading@10: paddd m6, m0 yading@10: pshuflw m0, m6, 0x4e yading@10: paddd m6, m0 yading@10: movd eax, m6 yading@10: RET yading@10: yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void ff_apply_window_int16(int16_t *output, const int16_t *input, yading@10: ; const int16_t *window, unsigned int len) yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro REVERSE_WORDS 1-2 yading@10: %if cpuflag(ssse3) && notcpuflag(atom) yading@10: pshufb %1, %2 yading@10: %elif cpuflag(sse2) yading@10: pshuflw %1, %1, 0x1B yading@10: pshufhw %1, %1, 0x1B yading@10: pshufd %1, %1, 0x4E yading@10: %elif cpuflag(mmxext) yading@10: pshufw %1, %1, 0x1B yading@10: %endif yading@10: %endmacro yading@10: yading@10: %macro MUL16FIXED 3 yading@10: %if cpuflag(ssse3) ; dst, src, unused yading@10: ; dst = ((dst * src) + (1<<14)) >> 15 yading@10: pmulhrsw %1, %2 yading@10: %elif cpuflag(mmxext) ; dst, src, temp yading@10: ; dst = (dst * src) >> 15 yading@10: ; pmulhw cuts off the bottom bit, so we have to lshift by 1 and add it back yading@10: ; in from the pmullw result. yading@10: mova %3, %1 yading@10: pmulhw %1, %2 yading@10: pmullw %3, %2 yading@10: psrlw %3, 15 yading@10: psllw %1, 1 yading@10: por %1, %3 yading@10: %endif yading@10: %endmacro yading@10: yading@10: %macro APPLY_WINDOW_INT16 1 ; %1 bitexact version yading@10: %if %1 yading@10: cglobal apply_window_int16, 4,5,6, output, input, window, offset, offset2 yading@10: %else yading@10: cglobal apply_window_int16_round, 4,5,6, output, input, window, offset, offset2 yading@10: %endif yading@10: lea offset2q, [offsetq-mmsize] yading@10: %if cpuflag(ssse3) && notcpuflag(atom) yading@10: mova m5, [pb_revwords] yading@10: ALIGN 16 yading@10: %elif %1 yading@10: mova m5, [pd_16384] yading@10: %endif yading@10: .loop: yading@10: %if cpuflag(ssse3) yading@10: ; This version does the 16x16->16 multiplication in-place without expanding yading@10: ; to 32-bit. The ssse3 version is bit-identical. yading@10: mova m0, [windowq+offset2q] yading@10: mova m1, [ inputq+offset2q] yading@10: pmulhrsw m1, m0 yading@10: REVERSE_WORDS m0, m5 yading@10: pmulhrsw m0, [ inputq+offsetq ] yading@10: mova [outputq+offset2q], m1 yading@10: mova [outputq+offsetq ], m0 yading@10: %elif %1 yading@10: ; This version expands 16-bit to 32-bit, multiplies by the window, yading@10: ; adds 16384 for rounding, right shifts 15, then repacks back to words to yading@10: ; save to the output. The window is reversed for the second half. yading@10: mova m3, [windowq+offset2q] yading@10: mova m4, [ inputq+offset2q] yading@10: pxor m0, m0 yading@10: punpcklwd m0, m3 yading@10: punpcklwd m1, m4 yading@10: pmaddwd m0, m1 yading@10: paddd m0, m5 yading@10: psrad m0, 15 yading@10: pxor m2, m2 yading@10: punpckhwd m2, m3 yading@10: punpckhwd m1, m4 yading@10: pmaddwd m2, m1 yading@10: paddd m2, m5 yading@10: psrad m2, 15 yading@10: packssdw m0, m2 yading@10: mova [outputq+offset2q], m0 yading@10: REVERSE_WORDS m3 yading@10: mova m4, [ inputq+offsetq] yading@10: pxor m0, m0 yading@10: punpcklwd m0, m3 yading@10: punpcklwd m1, m4 yading@10: pmaddwd m0, m1 yading@10: paddd m0, m5 yading@10: psrad m0, 15 yading@10: pxor m2, m2 yading@10: punpckhwd m2, m3 yading@10: punpckhwd m1, m4 yading@10: pmaddwd m2, m1 yading@10: paddd m2, m5 yading@10: psrad m2, 15 yading@10: packssdw m0, m2 yading@10: mova [outputq+offsetq], m0 yading@10: %else yading@10: ; This version does the 16x16->16 multiplication in-place without expanding yading@10: ; to 32-bit. The mmxext and sse2 versions do not use rounding, and yading@10: ; therefore are not bit-identical to the C version. yading@10: mova m0, [windowq+offset2q] yading@10: mova m1, [ inputq+offset2q] yading@10: mova m2, [ inputq+offsetq ] yading@10: MUL16FIXED m1, m0, m3 yading@10: REVERSE_WORDS m0 yading@10: MUL16FIXED m2, m0, m3 yading@10: mova [outputq+offset2q], m1 yading@10: mova [outputq+offsetq ], m2 yading@10: %endif yading@10: add offsetd, mmsize yading@10: sub offset2d, mmsize yading@10: jae .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: APPLY_WINDOW_INT16 0 yading@10: INIT_XMM sse2 yading@10: APPLY_WINDOW_INT16 0 yading@10: yading@10: INIT_MMX mmxext yading@10: APPLY_WINDOW_INT16 1 yading@10: INIT_XMM sse2 yading@10: APPLY_WINDOW_INT16 1 yading@10: INIT_XMM ssse3 yading@10: APPLY_WINDOW_INT16 1 yading@10: INIT_XMM ssse3, atom yading@10: APPLY_WINDOW_INT16 1 yading@10: yading@10: yading@10: ; void add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) yading@10: INIT_MMX mmxext yading@10: cglobal add_hfyu_median_prediction, 6,6,0, dst, top, diff, w, left, left_top yading@10: movq mm0, [topq] yading@10: movq mm2, mm0 yading@10: movd mm4, [left_topq] yading@10: psllq mm2, 8 yading@10: movq mm1, mm0 yading@10: por mm4, mm2 yading@10: movd mm3, [leftq] yading@10: psubb mm0, mm4 ; t-tl yading@10: add dstq, wq yading@10: add topq, wq yading@10: add diffq, wq yading@10: neg wq yading@10: jmp .skip yading@10: .loop: yading@10: movq mm4, [topq+wq] yading@10: movq mm0, mm4 yading@10: psllq mm4, 8 yading@10: por mm4, mm1 yading@10: movq mm1, mm0 ; t yading@10: psubb mm0, mm4 ; t-tl yading@10: .skip: yading@10: movq mm2, [diffq+wq] yading@10: %assign i 0 yading@10: %rep 8 yading@10: movq mm4, mm0 yading@10: paddb mm4, mm3 ; t-tl+l yading@10: movq mm5, mm3 yading@10: pmaxub mm3, mm1 yading@10: pminub mm5, mm1 yading@10: pminub mm3, mm4 yading@10: pmaxub mm3, mm5 ; median yading@10: paddb mm3, mm2 ; +residual yading@10: %if i==0 yading@10: movq mm7, mm3 yading@10: psllq mm7, 56 yading@10: %else yading@10: movq mm6, mm3 yading@10: psrlq mm7, 8 yading@10: psllq mm6, 56 yading@10: por mm7, mm6 yading@10: %endif yading@10: %if i<7 yading@10: psrlq mm0, 8 yading@10: psrlq mm1, 8 yading@10: psrlq mm2, 8 yading@10: %endif yading@10: %assign i i+1 yading@10: %endrep yading@10: movq [dstq+wq], mm7 yading@10: add wq, 8 yading@10: jl .loop yading@10: movzx r2d, byte [dstq-1] yading@10: mov [leftq], r2d yading@10: movzx r2d, byte [topq-1] yading@10: mov [left_topq], r2d yading@10: RET yading@10: yading@10: yading@10: %macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned yading@10: add srcq, wq yading@10: add dstq, wq yading@10: neg wq yading@10: %%.loop: yading@10: %if %2 yading@10: mova m1, [srcq+wq] yading@10: %else yading@10: movu m1, [srcq+wq] yading@10: %endif yading@10: mova m2, m1 yading@10: psllw m1, 8 yading@10: paddb m1, m2 yading@10: mova m2, m1 yading@10: pshufb m1, m3 yading@10: paddb m1, m2 yading@10: pshufb m0, m5 yading@10: mova m2, m1 yading@10: pshufb m1, m4 yading@10: paddb m1, m2 yading@10: %if mmsize == 16 yading@10: mova m2, m1 yading@10: pshufb m1, m6 yading@10: paddb m1, m2 yading@10: %endif yading@10: paddb m0, m1 yading@10: %if %1 yading@10: mova [dstq+wq], m0 yading@10: %else yading@10: movq [dstq+wq], m0 yading@10: movhps [dstq+wq+8], m0 yading@10: %endif yading@10: add wq, mmsize yading@10: jl %%.loop yading@10: mov eax, mmsize-1 yading@10: sub eax, wd yading@10: movd m1, eax yading@10: pshufb m0, m1 yading@10: movd eax, m0 yading@10: RET yading@10: %endmacro yading@10: yading@10: ; int add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left) yading@10: INIT_MMX ssse3 yading@10: cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left yading@10: .skip_prologue: yading@10: mova m5, [pb_7] yading@10: mova m4, [pb_zzzz3333zzzzbbbb] yading@10: mova m3, [pb_zz11zz55zz99zzdd] yading@10: movd m0, leftm yading@10: psllq m0, 56 yading@10: ADD_HFYU_LEFT_LOOP 1, 1 yading@10: yading@10: INIT_XMM sse4 yading@10: cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left yading@10: mova m5, [pb_f] yading@10: mova m6, [pb_zzzzzzzz77777777] yading@10: mova m4, [pb_zzzz3333zzzzbbbb] yading@10: mova m3, [pb_zz11zz55zz99zzdd] yading@10: movd m0, leftm yading@10: pslldq m0, 15 yading@10: test srcq, 15 yading@10: jnz .src_unaligned yading@10: test dstq, 15 yading@10: jnz .dst_unaligned yading@10: ADD_HFYU_LEFT_LOOP 1, 1 yading@10: .dst_unaligned: yading@10: ADD_HFYU_LEFT_LOOP 0, 1 yading@10: .src_unaligned: yading@10: ADD_HFYU_LEFT_LOOP 0, 0 yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min, yading@10: ; int32_t max, unsigned int len) yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: ; %1 = number of xmm registers used yading@10: ; %2 = number of inline load/process/store loops per asm loop yading@10: ; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop yading@10: ; %4 = CLIPD function takes min/max as float instead of int (CLIPD_SSE2) yading@10: ; %5 = suffix yading@10: %macro VECTOR_CLIP_INT32 4-5 yading@10: cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len yading@10: %if %4 yading@10: cvtsi2ss m4, minm yading@10: cvtsi2ss m5, maxm yading@10: %else yading@10: movd m4, minm yading@10: movd m5, maxm yading@10: %endif yading@10: SPLATD m4 yading@10: SPLATD m5 yading@10: .loop: yading@10: %assign %%i 1 yading@10: %rep %2 yading@10: mova m0, [srcq+mmsize*0*%%i] yading@10: mova m1, [srcq+mmsize*1*%%i] yading@10: mova m2, [srcq+mmsize*2*%%i] yading@10: mova m3, [srcq+mmsize*3*%%i] yading@10: %if %3 yading@10: mova m7, [srcq+mmsize*4*%%i] yading@10: mova m8, [srcq+mmsize*5*%%i] yading@10: mova m9, [srcq+mmsize*6*%%i] yading@10: mova m10, [srcq+mmsize*7*%%i] yading@10: %endif yading@10: CLIPD m0, m4, m5, m6 yading@10: CLIPD m1, m4, m5, m6 yading@10: CLIPD m2, m4, m5, m6 yading@10: CLIPD m3, m4, m5, m6 yading@10: %if %3 yading@10: CLIPD m7, m4, m5, m6 yading@10: CLIPD m8, m4, m5, m6 yading@10: CLIPD m9, m4, m5, m6 yading@10: CLIPD m10, m4, m5, m6 yading@10: %endif yading@10: mova [dstq+mmsize*0*%%i], m0 yading@10: mova [dstq+mmsize*1*%%i], m1 yading@10: mova [dstq+mmsize*2*%%i], m2 yading@10: mova [dstq+mmsize*3*%%i], m3 yading@10: %if %3 yading@10: mova [dstq+mmsize*4*%%i], m7 yading@10: mova [dstq+mmsize*5*%%i], m8 yading@10: mova [dstq+mmsize*6*%%i], m9 yading@10: mova [dstq+mmsize*7*%%i], m10 yading@10: %endif yading@10: %assign %%i %%i+1 yading@10: %endrep yading@10: add srcq, mmsize*4*(%2+%3) yading@10: add dstq, mmsize*4*(%2+%3) yading@10: sub lend, mmsize*(%2+%3) yading@10: jg .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmx yading@10: %define CLIPD CLIPD_MMX yading@10: VECTOR_CLIP_INT32 0, 1, 0, 0 yading@10: INIT_XMM sse2 yading@10: VECTOR_CLIP_INT32 6, 1, 0, 0, _int yading@10: %define CLIPD CLIPD_SSE2 yading@10: VECTOR_CLIP_INT32 6, 2, 0, 1 yading@10: INIT_XMM sse4 yading@10: %define CLIPD CLIPD_SSE41 yading@10: %ifdef m8 yading@10: VECTOR_CLIP_INT32 11, 1, 1, 0 yading@10: %else yading@10: VECTOR_CLIP_INT32 6, 1, 0, 0 yading@10: %endif yading@10: yading@10: ; %1 = aligned/unaligned yading@10: %macro BSWAP_LOOPS 1 yading@10: mov r3, r2 yading@10: sar r2, 3 yading@10: jz .left4_%1 yading@10: .loop8_%1: yading@10: mov%1 m0, [r1 + 0] yading@10: mov%1 m1, [r1 + 16] yading@10: %if cpuflag(ssse3) yading@10: pshufb m0, m2 yading@10: pshufb m1, m2 yading@10: mov%1 [r0 + 0], m0 yading@10: mov%1 [r0 + 16], m1 yading@10: %else yading@10: pshuflw m0, m0, 10110001b yading@10: pshuflw m1, m1, 10110001b yading@10: pshufhw m0, m0, 10110001b yading@10: pshufhw m1, m1, 10110001b yading@10: mova m2, m0 yading@10: mova m3, m1 yading@10: psllw m0, 8 yading@10: psllw m1, 8 yading@10: psrlw m2, 8 yading@10: psrlw m3, 8 yading@10: por m2, m0 yading@10: por m3, m1 yading@10: mov%1 [r0 + 0], m2 yading@10: mov%1 [r0 + 16], m3 yading@10: %endif yading@10: add r0, 32 yading@10: add r1, 32 yading@10: dec r2 yading@10: jnz .loop8_%1 yading@10: .left4_%1: yading@10: mov r2, r3 yading@10: and r3, 4 yading@10: jz .left yading@10: mov%1 m0, [r1] yading@10: %if cpuflag(ssse3) yading@10: pshufb m0, m2 yading@10: mov%1 [r0], m0 yading@10: %else yading@10: pshuflw m0, m0, 10110001b yading@10: pshufhw m0, m0, 10110001b yading@10: mova m2, m0 yading@10: psllw m0, 8 yading@10: psrlw m2, 8 yading@10: por m2, m0 yading@10: mov%1 [r0], m2 yading@10: %endif yading@10: add r1, 16 yading@10: add r0, 16 yading@10: %endmacro yading@10: yading@10: ; void bswap_buf(uint32_t *dst, const uint32_t *src, int w); yading@10: %macro BSWAP32_BUF 0 yading@10: %if cpuflag(ssse3) yading@10: cglobal bswap32_buf, 3,4,3 yading@10: mov r3, r1 yading@10: mova m2, [pb_bswap32] yading@10: %else yading@10: cglobal bswap32_buf, 3,4,5 yading@10: mov r3, r1 yading@10: %endif yading@10: or r3, r0 yading@10: and r3, 15 yading@10: jz .start_align yading@10: BSWAP_LOOPS u yading@10: jmp .left yading@10: .start_align: yading@10: BSWAP_LOOPS a yading@10: .left: yading@10: %if cpuflag(ssse3) yading@10: mov r3, r2 yading@10: and r2, 2 yading@10: jz .left1 yading@10: movq m0, [r1] yading@10: pshufb m0, m2 yading@10: movq [r0], m0 yading@10: add r1, 8 yading@10: add r0, 8 yading@10: .left1: yading@10: and r3, 1 yading@10: jz .end yading@10: mov r2d, [r1] yading@10: bswap r2d yading@10: mov [r0], r2d yading@10: %else yading@10: and r2, 3 yading@10: jz .end yading@10: .loop2: yading@10: mov r3d, [r1] yading@10: bswap r3d yading@10: mov [r0], r3d yading@10: add r1, 4 yading@10: add r0, 4 yading@10: dec r2 yading@10: jnz .loop2 yading@10: %endif yading@10: .end: yading@10: RET yading@10: %endmacro yading@10: yading@10: INIT_XMM sse2 yading@10: BSWAP32_BUF yading@10: yading@10: INIT_XMM ssse3 yading@10: BSWAP32_BUF