yading@10: ;****************************************************************************** yading@10: ;* VP8 MMXEXT optimizations yading@10: ;* Copyright (c) 2010 Ronald S. Bultje yading@10: ;* Copyright (c) 2010 Jason Garrett-Glaser yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: yading@10: fourtap_filter_hw_m: times 4 dw -6, 123 yading@10: times 4 dw 12, -1 yading@10: times 4 dw -9, 93 yading@10: times 4 dw 50, -6 yading@10: times 4 dw -6, 50 yading@10: times 4 dw 93, -9 yading@10: times 4 dw -1, 12 yading@10: times 4 dw 123, -6 yading@10: yading@10: sixtap_filter_hw_m: times 4 dw 2, -11 yading@10: times 4 dw 108, 36 yading@10: times 4 dw -8, 1 yading@10: times 4 dw 3, -16 yading@10: times 4 dw 77, 77 yading@10: times 4 dw -16, 3 yading@10: times 4 dw 1, -8 yading@10: times 4 dw 36, 108 yading@10: times 4 dw -11, 2 yading@10: yading@10: fourtap_filter_hb_m: times 8 db -6, 123 yading@10: times 8 db 12, -1 yading@10: times 8 db -9, 93 yading@10: times 8 db 50, -6 yading@10: times 8 db -6, 50 yading@10: times 8 db 93, -9 yading@10: times 8 db -1, 12 yading@10: times 8 db 123, -6 yading@10: yading@10: sixtap_filter_hb_m: times 8 db 2, 1 yading@10: times 8 db -11, 108 yading@10: times 8 db 36, -8 yading@10: times 8 db 3, 3 yading@10: times 8 db -16, 77 yading@10: times 8 db 77, -16 yading@10: times 8 db 1, 2 yading@10: times 8 db -8, 36 yading@10: times 8 db 108, -11 yading@10: yading@10: fourtap_filter_v_m: times 8 dw -6 yading@10: times 8 dw 123 yading@10: times 8 dw 12 yading@10: times 8 dw -1 yading@10: times 8 dw -9 yading@10: times 8 dw 93 yading@10: times 8 dw 50 yading@10: times 8 dw -6 yading@10: times 8 dw -6 yading@10: times 8 dw 50 yading@10: times 8 dw 93 yading@10: times 8 dw -9 yading@10: times 8 dw -1 yading@10: times 8 dw 12 yading@10: times 8 dw 123 yading@10: times 8 dw -6 yading@10: yading@10: sixtap_filter_v_m: times 8 dw 2 yading@10: times 8 dw -11 yading@10: times 8 dw 108 yading@10: times 8 dw 36 yading@10: times 8 dw -8 yading@10: times 8 dw 1 yading@10: times 8 dw 3 yading@10: times 8 dw -16 yading@10: times 8 dw 77 yading@10: times 8 dw 77 yading@10: times 8 dw -16 yading@10: times 8 dw 3 yading@10: times 8 dw 1 yading@10: times 8 dw -8 yading@10: times 8 dw 36 yading@10: times 8 dw 108 yading@10: times 8 dw -11 yading@10: times 8 dw 2 yading@10: yading@10: bilinear_filter_vw_m: times 8 dw 1 yading@10: times 8 dw 2 yading@10: times 8 dw 3 yading@10: times 8 dw 4 yading@10: times 8 dw 5 yading@10: times 8 dw 6 yading@10: times 8 dw 7 yading@10: yading@10: bilinear_filter_vb_m: times 8 db 7, 1 yading@10: times 8 db 6, 2 yading@10: times 8 db 5, 3 yading@10: times 8 db 4, 4 yading@10: times 8 db 3, 5 yading@10: times 8 db 2, 6 yading@10: times 8 db 1, 7 yading@10: yading@10: %ifdef PIC yading@10: %define fourtap_filter_hw picregq yading@10: %define sixtap_filter_hw picregq yading@10: %define fourtap_filter_hb picregq yading@10: %define sixtap_filter_hb picregq yading@10: %define fourtap_filter_v picregq yading@10: %define sixtap_filter_v picregq yading@10: %define bilinear_filter_vw picregq yading@10: %define bilinear_filter_vb picregq yading@10: %define npicregs 1 yading@10: %else yading@10: %define fourtap_filter_hw fourtap_filter_hw_m yading@10: %define sixtap_filter_hw sixtap_filter_hw_m yading@10: %define fourtap_filter_hb fourtap_filter_hb_m yading@10: %define sixtap_filter_hb sixtap_filter_hb_m yading@10: %define fourtap_filter_v fourtap_filter_v_m yading@10: %define sixtap_filter_v sixtap_filter_v_m yading@10: %define bilinear_filter_vw bilinear_filter_vw_m yading@10: %define bilinear_filter_vb bilinear_filter_vb_m yading@10: %define npicregs 0 yading@10: %endif yading@10: yading@10: filter_h2_shuf: db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 yading@10: filter_h4_shuf: db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10 yading@10: yading@10: filter_h6_shuf1: db 0, 5, 1, 6, 2, 7, 3, 8, 4, 9, 5, 10, 6, 11, 7, 12 yading@10: filter_h6_shuf2: db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9 yading@10: filter_h6_shuf3: db 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11 yading@10: yading@10: pw_27: times 8 dw 27 yading@10: pw_63: times 8 dw 63 yading@10: pw_256: times 8 dw 256 yading@10: pw_20091: times 4 dw 20091 yading@10: pw_17734: times 4 dw 17734 yading@10: yading@10: pb_4: times 16 db 4 yading@10: pb_F8: times 16 db 0xF8 yading@10: pb_FE: times 16 db 0xFE yading@10: pb_27_63: times 8 db 27, 63 yading@10: pb_18_63: times 8 db 18, 63 yading@10: pb_9_63: times 8 db 9, 63 yading@10: yading@10: cextern pb_1 yading@10: cextern pw_3 yading@10: cextern pb_3 yading@10: cextern pw_4 yading@10: cextern pw_9 yading@10: cextern pw_18 yading@10: cextern pw_64 yading@10: cextern pb_80 yading@10: yading@10: SECTION .text yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; subpel MC functions: yading@10: ; yading@10: ; void put_vp8_epel_hv_(uint8_t *dst, int deststride, yading@10: ; uint8_t *src, int srcstride, yading@10: ; int height, int mx, int my); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro FILTER_SSSE3 1 yading@10: cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, height, mx, picreg yading@10: lea mxd, [mxq*3] yading@10: mova m3, [filter_h6_shuf2] yading@10: mova m4, [filter_h6_shuf3] yading@10: %ifdef PIC yading@10: lea picregq, [sixtap_filter_hb_m] yading@10: %endif yading@10: mova m5, [sixtap_filter_hb+mxq*8-48] ; set up 6tap filter in bytes yading@10: mova m6, [sixtap_filter_hb+mxq*8-32] yading@10: mova m7, [sixtap_filter_hb+mxq*8-16] yading@10: yading@10: .nextrow: yading@10: movu m0, [srcq-2] yading@10: mova m1, m0 yading@10: mova m2, m0 yading@10: %if mmsize == 8 yading@10: ; For epel4, we need 9 bytes, but only 8 get loaded; to compensate, do the yading@10: ; shuffle with a memory operand yading@10: punpcklbw m0, [srcq+3] yading@10: %else yading@10: pshufb m0, [filter_h6_shuf1] yading@10: %endif yading@10: pshufb m1, m3 yading@10: pshufb m2, m4 yading@10: pmaddubsw m0, m5 yading@10: pmaddubsw m1, m6 yading@10: pmaddubsw m2, m7 yading@10: paddsw m0, m1 yading@10: paddsw m0, m2 yading@10: pmulhrsw m0, [pw_256] yading@10: packuswb m0, m0 yading@10: movh [dstq], m0 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg yading@10: shl mxd, 4 yading@10: mova m2, [pw_256] yading@10: mova m3, [filter_h2_shuf] yading@10: mova m4, [filter_h4_shuf] yading@10: %ifdef PIC yading@10: lea picregq, [fourtap_filter_hb_m] yading@10: %endif yading@10: mova m5, [fourtap_filter_hb+mxq-16] ; set up 4tap filter in bytes yading@10: mova m6, [fourtap_filter_hb+mxq] yading@10: yading@10: .nextrow: yading@10: movu m0, [srcq-1] yading@10: mova m1, m0 yading@10: pshufb m0, m3 yading@10: pshufb m1, m4 yading@10: pmaddubsw m0, m5 yading@10: pmaddubsw m1, m6 yading@10: paddsw m0, m1 yading@10: pmulhrsw m0, m2 yading@10: packuswb m0, m0 yading@10: movh [dstq], m0 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my yading@10: shl myd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [fourtap_filter_hb_m] yading@10: %endif yading@10: mova m5, [fourtap_filter_hb+myq-16] yading@10: mova m6, [fourtap_filter_hb+myq] yading@10: mova m7, [pw_256] yading@10: yading@10: ; read 3 lines yading@10: sub srcq, srcstrideq yading@10: movh m0, [srcq] yading@10: movh m1, [srcq+ srcstrideq] yading@10: movh m2, [srcq+2*srcstrideq] yading@10: add srcq, srcstrideq yading@10: yading@10: .nextrow: yading@10: movh m3, [srcq+2*srcstrideq] ; read new row yading@10: mova m4, m0 yading@10: mova m0, m1 yading@10: punpcklbw m4, m1 yading@10: mova m1, m2 yading@10: punpcklbw m2, m3 yading@10: pmaddubsw m4, m5 yading@10: pmaddubsw m2, m6 yading@10: paddsw m4, m2 yading@10: mova m2, m3 yading@10: pmulhrsw m4, m7 yading@10: packuswb m4, m4 yading@10: movh [dstq], m4 yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my yading@10: lea myd, [myq*3] yading@10: %ifdef PIC yading@10: lea picregq, [sixtap_filter_hb_m] yading@10: %endif yading@10: lea myq, [sixtap_filter_hb+myq*8] yading@10: yading@10: ; read 5 lines yading@10: sub srcq, srcstrideq yading@10: sub srcq, srcstrideq yading@10: movh m0, [srcq] yading@10: movh m1, [srcq+srcstrideq] yading@10: movh m2, [srcq+srcstrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: add srcq, srcstrideq yading@10: movh m3, [srcq] yading@10: movh m4, [srcq+srcstrideq] yading@10: yading@10: .nextrow: yading@10: movh m5, [srcq+2*srcstrideq] ; read new row yading@10: mova m6, m0 yading@10: punpcklbw m6, m5 yading@10: mova m0, m1 yading@10: punpcklbw m1, m2 yading@10: mova m7, m3 yading@10: punpcklbw m7, m4 yading@10: pmaddubsw m6, [myq-48] yading@10: pmaddubsw m1, [myq-32] yading@10: pmaddubsw m7, [myq-16] yading@10: paddsw m6, m1 yading@10: paddsw m6, m7 yading@10: mova m1, m2 yading@10: mova m2, m3 yading@10: pmulhrsw m6, [pw_256] yading@10: mova m3, m4 yading@10: packuswb m6, m6 yading@10: mova m4, m5 yading@10: movh [dstq], m6 yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX ssse3 yading@10: FILTER_SSSE3 4 yading@10: INIT_XMM ssse3 yading@10: FILTER_SSSE3 8 yading@10: yading@10: ; 4x4 block, H-only 4-tap filter yading@10: INIT_MMX mmxext yading@10: cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg yading@10: shl mxd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [fourtap_filter_hw_m] yading@10: %endif yading@10: movq mm4, [fourtap_filter_hw+mxq-16] ; set up 4tap filter in words yading@10: movq mm5, [fourtap_filter_hw+mxq] yading@10: movq mm7, [pw_64] yading@10: pxor mm6, mm6 yading@10: yading@10: .nextrow: yading@10: movq mm1, [srcq-1] ; (ABCDEFGH) load 8 horizontal pixels yading@10: yading@10: ; first set of 2 pixels yading@10: movq mm2, mm1 ; byte ABCD.. yading@10: punpcklbw mm1, mm6 ; byte->word ABCD yading@10: pshufw mm0, mm2, 9 ; byte CDEF.. yading@10: punpcklbw mm0, mm6 ; byte->word CDEF yading@10: pshufw mm3, mm1, 0x94 ; word ABBC yading@10: pshufw mm1, mm0, 0x94 ; word CDDE yading@10: pmaddwd mm3, mm4 ; multiply 2px with F0/F1 yading@10: movq mm0, mm1 ; backup for second set of pixels yading@10: pmaddwd mm1, mm5 ; multiply 2px with F2/F3 yading@10: paddd mm3, mm1 ; finish 1st 2px yading@10: yading@10: ; second set of 2 pixels, use backup of above yading@10: punpckhbw mm2, mm6 ; byte->word EFGH yading@10: pmaddwd mm0, mm4 ; multiply backed up 2px with F0/F1 yading@10: pshufw mm1, mm2, 0x94 ; word EFFG yading@10: pmaddwd mm1, mm5 ; multiply 2px with F2/F3 yading@10: paddd mm0, mm1 ; finish 2nd 2px yading@10: yading@10: ; merge two sets of 2 pixels into one set of 4, round/clip/store yading@10: packssdw mm3, mm0 ; merge dword->word (4px) yading@10: paddsw mm3, mm7 ; rounding yading@10: psraw mm3, 7 yading@10: packuswb mm3, mm6 ; clip and word->bytes yading@10: movd [dstq], mm3 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: ; 4x4 block, H-only 6-tap filter yading@10: INIT_MMX mmxext yading@10: cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, height, mx, picreg yading@10: lea mxd, [mxq*3] yading@10: %ifdef PIC yading@10: lea picregq, [sixtap_filter_hw_m] yading@10: %endif yading@10: movq mm4, [sixtap_filter_hw+mxq*8-48] ; set up 4tap filter in words yading@10: movq mm5, [sixtap_filter_hw+mxq*8-32] yading@10: movq mm6, [sixtap_filter_hw+mxq*8-16] yading@10: movq mm7, [pw_64] yading@10: pxor mm3, mm3 yading@10: yading@10: .nextrow: yading@10: movq mm1, [srcq-2] ; (ABCDEFGH) load 8 horizontal pixels yading@10: yading@10: ; first set of 2 pixels yading@10: movq mm2, mm1 ; byte ABCD.. yading@10: punpcklbw mm1, mm3 ; byte->word ABCD yading@10: pshufw mm0, mm2, 0x9 ; byte CDEF.. yading@10: punpckhbw mm2, mm3 ; byte->word EFGH yading@10: punpcklbw mm0, mm3 ; byte->word CDEF yading@10: pshufw mm1, mm1, 0x94 ; word ABBC yading@10: pshufw mm2, mm2, 0x94 ; word EFFG yading@10: pmaddwd mm1, mm4 ; multiply 2px with F0/F1 yading@10: pshufw mm3, mm0, 0x94 ; word CDDE yading@10: movq mm0, mm3 ; backup for second set of pixels yading@10: pmaddwd mm3, mm5 ; multiply 2px with F2/F3 yading@10: paddd mm1, mm3 ; add to 1st 2px cache yading@10: movq mm3, mm2 ; backup for second set of pixels yading@10: pmaddwd mm2, mm6 ; multiply 2px with F4/F5 yading@10: paddd mm1, mm2 ; finish 1st 2px yading@10: yading@10: ; second set of 2 pixels, use backup of above yading@10: movd mm2, [srcq+3] ; byte FGHI (prevent overreads) yading@10: pmaddwd mm0, mm4 ; multiply 1st backed up 2px with F0/F1 yading@10: pmaddwd mm3, mm5 ; multiply 2nd backed up 2px with F2/F3 yading@10: paddd mm0, mm3 ; add to 2nd 2px cache yading@10: pxor mm3, mm3 yading@10: punpcklbw mm2, mm3 ; byte->word FGHI yading@10: pshufw mm2, mm2, 0xE9 ; word GHHI yading@10: pmaddwd mm2, mm6 ; multiply 2px with F4/F5 yading@10: paddd mm0, mm2 ; finish 2nd 2px yading@10: yading@10: ; merge two sets of 2 pixels into one set of 4, round/clip/store yading@10: packssdw mm1, mm0 ; merge dword->word (4px) yading@10: paddsw mm1, mm7 ; rounding yading@10: psraw mm1, 7 yading@10: packuswb mm1, mm3 ; clip and word->bytes yading@10: movd [dstq], mm1 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: INIT_XMM sse2 yading@10: cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg yading@10: shl mxd, 5 yading@10: %ifdef PIC yading@10: lea picregq, [fourtap_filter_v_m] yading@10: %endif yading@10: lea mxq, [fourtap_filter_v+mxq-32] yading@10: pxor m7, m7 yading@10: mova m4, [pw_64] yading@10: mova m5, [mxq+ 0] yading@10: mova m6, [mxq+16] yading@10: %ifdef m8 yading@10: mova m8, [mxq+32] yading@10: mova m9, [mxq+48] yading@10: %endif yading@10: .nextrow: yading@10: movq m0, [srcq-1] yading@10: movq m1, [srcq-0] yading@10: movq m2, [srcq+1] yading@10: movq m3, [srcq+2] yading@10: punpcklbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: punpcklbw m3, m7 yading@10: pmullw m0, m5 yading@10: pmullw m1, m6 yading@10: %ifdef m8 yading@10: pmullw m2, m8 yading@10: pmullw m3, m9 yading@10: %else yading@10: pmullw m2, [mxq+32] yading@10: pmullw m3, [mxq+48] yading@10: %endif yading@10: paddsw m0, m1 yading@10: paddsw m2, m3 yading@10: paddsw m0, m2 yading@10: paddsw m0, m4 yading@10: psraw m0, 7 yading@10: packuswb m0, m7 yading@10: movh [dstq], m0 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: INIT_XMM sse2 yading@10: cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg yading@10: lea mxd, [mxq*3] yading@10: shl mxd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [sixtap_filter_v_m] yading@10: %endif yading@10: lea mxq, [sixtap_filter_v+mxq-96] yading@10: pxor m7, m7 yading@10: mova m6, [pw_64] yading@10: %ifdef m8 yading@10: mova m8, [mxq+ 0] yading@10: mova m9, [mxq+16] yading@10: mova m10, [mxq+32] yading@10: mova m11, [mxq+48] yading@10: mova m12, [mxq+64] yading@10: mova m13, [mxq+80] yading@10: %endif yading@10: .nextrow: yading@10: movq m0, [srcq-2] yading@10: movq m1, [srcq-1] yading@10: movq m2, [srcq-0] yading@10: movq m3, [srcq+1] yading@10: movq m4, [srcq+2] yading@10: movq m5, [srcq+3] yading@10: punpcklbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: punpcklbw m3, m7 yading@10: punpcklbw m4, m7 yading@10: punpcklbw m5, m7 yading@10: %ifdef m8 yading@10: pmullw m0, m8 yading@10: pmullw m1, m9 yading@10: pmullw m2, m10 yading@10: pmullw m3, m11 yading@10: pmullw m4, m12 yading@10: pmullw m5, m13 yading@10: %else yading@10: pmullw m0, [mxq+ 0] yading@10: pmullw m1, [mxq+16] yading@10: pmullw m2, [mxq+32] yading@10: pmullw m3, [mxq+48] yading@10: pmullw m4, [mxq+64] yading@10: pmullw m5, [mxq+80] yading@10: %endif yading@10: paddsw m1, m4 yading@10: paddsw m0, m5 yading@10: paddsw m1, m2 yading@10: paddsw m0, m3 yading@10: paddsw m0, m1 yading@10: paddsw m0, m6 yading@10: psraw m0, 7 yading@10: packuswb m0, m7 yading@10: movh [dstq], m0 ; store yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: %macro FILTER_V 1 yading@10: ; 4x4 block, V-only 4-tap filter yading@10: cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my yading@10: shl myd, 5 yading@10: %ifdef PIC yading@10: lea picregq, [fourtap_filter_v_m] yading@10: %endif yading@10: lea myq, [fourtap_filter_v+myq-32] yading@10: mova m6, [pw_64] yading@10: pxor m7, m7 yading@10: mova m5, [myq+48] yading@10: yading@10: ; read 3 lines yading@10: sub srcq, srcstrideq yading@10: movh m0, [srcq] yading@10: movh m1, [srcq+ srcstrideq] yading@10: movh m2, [srcq+2*srcstrideq] yading@10: add srcq, srcstrideq yading@10: punpcklbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: yading@10: .nextrow: yading@10: ; first calculate negative taps (to prevent losing positive overflows) yading@10: movh m4, [srcq+2*srcstrideq] ; read new row yading@10: punpcklbw m4, m7 yading@10: mova m3, m4 yading@10: pmullw m0, [myq+0] yading@10: pmullw m4, m5 yading@10: paddsw m4, m0 yading@10: yading@10: ; then calculate positive taps yading@10: mova m0, m1 yading@10: pmullw m1, [myq+16] yading@10: paddsw m4, m1 yading@10: mova m1, m2 yading@10: pmullw m2, [myq+32] yading@10: paddsw m4, m2 yading@10: mova m2, m3 yading@10: yading@10: ; round/clip/store yading@10: paddsw m4, m6 yading@10: psraw m4, 7 yading@10: packuswb m4, m7 yading@10: movh [dstq], m4 yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: yading@10: ; 4x4 block, V-only 6-tap filter yading@10: cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my yading@10: shl myd, 4 yading@10: lea myq, [myq*3] yading@10: %ifdef PIC yading@10: lea picregq, [sixtap_filter_v_m] yading@10: %endif yading@10: lea myq, [sixtap_filter_v+myq-96] yading@10: pxor m7, m7 yading@10: yading@10: ; read 5 lines yading@10: sub srcq, srcstrideq yading@10: sub srcq, srcstrideq yading@10: movh m0, [srcq] yading@10: movh m1, [srcq+srcstrideq] yading@10: movh m2, [srcq+srcstrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: add srcq, srcstrideq yading@10: movh m3, [srcq] yading@10: movh m4, [srcq+srcstrideq] yading@10: punpcklbw m0, m7 yading@10: punpcklbw m1, m7 yading@10: punpcklbw m2, m7 yading@10: punpcklbw m3, m7 yading@10: punpcklbw m4, m7 yading@10: yading@10: .nextrow: yading@10: ; first calculate negative taps (to prevent losing positive overflows) yading@10: mova m5, m1 yading@10: pmullw m5, [myq+16] yading@10: mova m6, m4 yading@10: pmullw m6, [myq+64] yading@10: paddsw m6, m5 yading@10: yading@10: ; then calculate positive taps yading@10: movh m5, [srcq+2*srcstrideq] ; read new row yading@10: punpcklbw m5, m7 yading@10: pmullw m0, [myq+0] yading@10: paddsw m6, m0 yading@10: mova m0, m1 yading@10: mova m1, m2 yading@10: pmullw m2, [myq+32] yading@10: paddsw m6, m2 yading@10: mova m2, m3 yading@10: pmullw m3, [myq+48] yading@10: paddsw m6, m3 yading@10: mova m3, m4 yading@10: mova m4, m5 yading@10: pmullw m5, [myq+80] yading@10: paddsw m6, m5 yading@10: yading@10: ; round/clip/store yading@10: paddsw m6, [pw_64] yading@10: psraw m6, 7 yading@10: packuswb m6, m7 yading@10: movh [dstq], m6 yading@10: yading@10: ; go to next line yading@10: add dstq, dststrideq yading@10: add srcq, srcstrideq yading@10: dec heightd ; next row yading@10: jg .nextrow yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: FILTER_V 4 yading@10: INIT_XMM sse2 yading@10: FILTER_V 8 yading@10: yading@10: %macro FILTER_BILINEAR 1 yading@10: cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, picreg, my yading@10: shl myd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [bilinear_filter_vw_m] yading@10: %endif yading@10: pxor m6, m6 yading@10: mova m5, [bilinear_filter_vw+myq-1*16] yading@10: neg myq yading@10: mova m4, [bilinear_filter_vw+myq+7*16] yading@10: .nextrow: yading@10: movh m0, [srcq+srcstrideq*0] yading@10: movh m1, [srcq+srcstrideq*1] yading@10: movh m3, [srcq+srcstrideq*2] yading@10: punpcklbw m0, m6 yading@10: punpcklbw m1, m6 yading@10: punpcklbw m3, m6 yading@10: mova m2, m1 yading@10: pmullw m0, m4 yading@10: pmullw m1, m5 yading@10: pmullw m2, m4 yading@10: pmullw m3, m5 yading@10: paddsw m0, m1 yading@10: paddsw m2, m3 yading@10: psraw m0, 2 yading@10: psraw m2, 2 yading@10: pavgw m0, m6 yading@10: pavgw m2, m6 yading@10: %if mmsize == 8 yading@10: packuswb m0, m0 yading@10: packuswb m2, m2 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movh [dstq+dststrideq*1], m2 yading@10: %else yading@10: packuswb m0, m2 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movhps [dstq+dststrideq*1], m0 yading@10: %endif yading@10: yading@10: lea dstq, [dstq+dststrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg yading@10: shl mxd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [bilinear_filter_vw_m] yading@10: %endif yading@10: pxor m6, m6 yading@10: mova m5, [bilinear_filter_vw+mxq-1*16] yading@10: neg mxq yading@10: mova m4, [bilinear_filter_vw+mxq+7*16] yading@10: .nextrow: yading@10: movh m0, [srcq+srcstrideq*0+0] yading@10: movh m1, [srcq+srcstrideq*0+1] yading@10: movh m2, [srcq+srcstrideq*1+0] yading@10: movh m3, [srcq+srcstrideq*1+1] yading@10: punpcklbw m0, m6 yading@10: punpcklbw m1, m6 yading@10: punpcklbw m2, m6 yading@10: punpcklbw m3, m6 yading@10: pmullw m0, m4 yading@10: pmullw m1, m5 yading@10: pmullw m2, m4 yading@10: pmullw m3, m5 yading@10: paddsw m0, m1 yading@10: paddsw m2, m3 yading@10: psraw m0, 2 yading@10: psraw m2, 2 yading@10: pavgw m0, m6 yading@10: pavgw m2, m6 yading@10: %if mmsize == 8 yading@10: packuswb m0, m0 yading@10: packuswb m2, m2 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movh [dstq+dststrideq*1], m2 yading@10: %else yading@10: packuswb m0, m2 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movhps [dstq+dststrideq*1], m0 yading@10: %endif yading@10: yading@10: lea dstq, [dstq+dststrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: FILTER_BILINEAR 4 yading@10: INIT_XMM sse2 yading@10: FILTER_BILINEAR 8 yading@10: yading@10: %macro FILTER_BILINEAR_SSSE3 1 yading@10: cglobal put_vp8_bilinear%1_v, 7, 7, 5, dst, dststride, src, srcstride, height, picreg, my yading@10: shl myd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [bilinear_filter_vb_m] yading@10: %endif yading@10: pxor m4, m4 yading@10: mova m3, [bilinear_filter_vb+myq-16] yading@10: .nextrow: yading@10: movh m0, [srcq+srcstrideq*0] yading@10: movh m1, [srcq+srcstrideq*1] yading@10: movh m2, [srcq+srcstrideq*2] yading@10: punpcklbw m0, m1 yading@10: punpcklbw m1, m2 yading@10: pmaddubsw m0, m3 yading@10: pmaddubsw m1, m3 yading@10: psraw m0, 2 yading@10: psraw m1, 2 yading@10: pavgw m0, m4 yading@10: pavgw m1, m4 yading@10: %if mmsize==8 yading@10: packuswb m0, m0 yading@10: packuswb m1, m1 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movh [dstq+dststrideq*1], m1 yading@10: %else yading@10: packuswb m0, m1 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movhps [dstq+dststrideq*1], m0 yading@10: %endif yading@10: yading@10: lea dstq, [dstq+dststrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg yading@10: shl mxd, 4 yading@10: %ifdef PIC yading@10: lea picregq, [bilinear_filter_vb_m] yading@10: %endif yading@10: pxor m4, m4 yading@10: mova m2, [filter_h2_shuf] yading@10: mova m3, [bilinear_filter_vb+mxq-16] yading@10: .nextrow: yading@10: movu m0, [srcq+srcstrideq*0] yading@10: movu m1, [srcq+srcstrideq*1] yading@10: pshufb m0, m2 yading@10: pshufb m1, m2 yading@10: pmaddubsw m0, m3 yading@10: pmaddubsw m1, m3 yading@10: psraw m0, 2 yading@10: psraw m1, 2 yading@10: pavgw m0, m4 yading@10: pavgw m1, m4 yading@10: %if mmsize==8 yading@10: packuswb m0, m0 yading@10: packuswb m1, m1 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movh [dstq+dststrideq*1], m1 yading@10: %else yading@10: packuswb m0, m1 yading@10: movh [dstq+dststrideq*0], m0 yading@10: movhps [dstq+dststrideq*1], m0 yading@10: %endif yading@10: yading@10: lea dstq, [dstq+dststrideq*2] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX ssse3 yading@10: FILTER_BILINEAR_SSSE3 4 yading@10: INIT_XMM ssse3 yading@10: FILTER_BILINEAR_SSSE3 8 yading@10: yading@10: INIT_MMX mmx yading@10: cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height yading@10: .nextrow: yading@10: movq mm0, [srcq+srcstrideq*0] yading@10: movq mm1, [srcq+srcstrideq*1] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: movq [dstq+dststrideq*0], mm0 yading@10: movq [dstq+dststrideq*1], mm1 yading@10: lea dstq, [dstq+dststrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height yading@10: .nextrow: yading@10: movq mm0, [srcq+srcstrideq*0+0] yading@10: movq mm1, [srcq+srcstrideq*0+8] yading@10: movq mm2, [srcq+srcstrideq*1+0] yading@10: movq mm3, [srcq+srcstrideq*1+8] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: movq [dstq+dststrideq*0+0], mm0 yading@10: movq [dstq+dststrideq*0+8], mm1 yading@10: movq [dstq+dststrideq*1+0], mm2 yading@10: movq [dstq+dststrideq*1+8], mm3 yading@10: lea dstq, [dstq+dststrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: %endif yading@10: yading@10: INIT_XMM sse yading@10: cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height yading@10: .nextrow: yading@10: movups xmm0, [srcq+srcstrideq*0] yading@10: movups xmm1, [srcq+srcstrideq*1] yading@10: lea srcq, [srcq+srcstrideq*2] yading@10: movaps [dstq+dststrideq*0], xmm0 yading@10: movaps [dstq+dststrideq*1], xmm1 yading@10: lea dstq, [dstq+dststrideq*2] yading@10: sub heightd, 2 yading@10: jg .nextrow yading@10: REP_RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_idct_dc_add_(uint8_t *dst, int16_t block[16], int stride); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro ADD_DC 4 yading@10: %4 m2, [dst1q+%3] yading@10: %4 m3, [dst1q+strideq+%3] yading@10: %4 m4, [dst2q+%3] yading@10: %4 m5, [dst2q+strideq+%3] yading@10: paddusb m2, %1 yading@10: paddusb m3, %1 yading@10: paddusb m4, %1 yading@10: paddusb m5, %1 yading@10: psubusb m2, %2 yading@10: psubusb m3, %2 yading@10: psubusb m4, %2 yading@10: psubusb m5, %2 yading@10: %4 [dst1q+%3], m2 yading@10: %4 [dst1q+strideq+%3], m3 yading@10: %4 [dst2q+%3], m4 yading@10: %4 [dst2q+strideq+%3], m5 yading@10: %endmacro yading@10: yading@10: INIT_MMX mmx yading@10: cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride yading@10: ; load data yading@10: movd m0, [blockq] yading@10: yading@10: ; calculate DC yading@10: paddw m0, [pw_4] yading@10: pxor m1, m1 yading@10: psraw m0, 3 yading@10: movd [blockq], m1 yading@10: psubw m1, m0 yading@10: packuswb m0, m0 yading@10: packuswb m1, m1 yading@10: punpcklbw m0, m0 yading@10: punpcklbw m1, m1 yading@10: punpcklwd m0, m0 yading@10: punpcklwd m1, m1 yading@10: yading@10: ; add DC yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+strideq*2] yading@10: ADD_DC m0, m1, 0, movh yading@10: RET yading@10: yading@10: INIT_XMM sse4 yading@10: cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride yading@10: ; load data yading@10: movd m0, [blockq] yading@10: pxor m1, m1 yading@10: yading@10: ; calculate DC yading@10: paddw m0, [pw_4] yading@10: movd [blockq], m1 yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+strideq*2] yading@10: movd m2, [dst1q] yading@10: movd m3, [dst1q+strideq] yading@10: movd m4, [dst2q] yading@10: movd m5, [dst2q+strideq] yading@10: psraw m0, 3 yading@10: pshuflw m0, m0, 0 yading@10: punpcklqdq m0, m0 yading@10: punpckldq m2, m3 yading@10: punpckldq m4, m5 yading@10: punpcklbw m2, m1 yading@10: punpcklbw m4, m1 yading@10: paddw m2, m0 yading@10: paddw m4, m0 yading@10: packuswb m2, m4 yading@10: movd [dst1q], m2 yading@10: pextrd [dst1q+strideq], m2, 1 yading@10: pextrd [dst2q], m2, 2 yading@10: pextrd [dst2q+strideq], m2, 3 yading@10: RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_idct_dc_add4y_(uint8_t *dst, int16_t block[4][16], int stride); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride yading@10: ; load data yading@10: movd m0, [blockq+32*0] ; A yading@10: movd m1, [blockq+32*2] ; C yading@10: punpcklwd m0, [blockq+32*1] ; A B yading@10: punpcklwd m1, [blockq+32*3] ; C D yading@10: punpckldq m0, m1 ; A B C D yading@10: pxor m6, m6 yading@10: yading@10: ; calculate DC yading@10: paddw m0, [pw_4] yading@10: movd [blockq+32*0], m6 yading@10: movd [blockq+32*1], m6 yading@10: movd [blockq+32*2], m6 yading@10: movd [blockq+32*3], m6 yading@10: psraw m0, 3 yading@10: psubw m6, m0 yading@10: packuswb m0, m0 yading@10: packuswb m6, m6 yading@10: punpcklbw m0, m0 ; AABBCCDD yading@10: punpcklbw m6, m6 ; AABBCCDD yading@10: movq m1, m0 yading@10: movq m7, m6 yading@10: punpcklbw m0, m0 ; AAAABBBB yading@10: punpckhbw m1, m1 ; CCCCDDDD yading@10: punpcklbw m6, m6 ; AAAABBBB yading@10: punpckhbw m7, m7 ; CCCCDDDD yading@10: yading@10: ; add DC yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+strideq*2] yading@10: ADD_DC m0, m6, 0, mova yading@10: ADD_DC m1, m7, 8, mova yading@10: RET yading@10: %endif yading@10: yading@10: INIT_XMM sse2 yading@10: cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride yading@10: ; load data yading@10: movd m0, [blockq+32*0] ; A yading@10: movd m1, [blockq+32*2] ; C yading@10: punpcklwd m0, [blockq+32*1] ; A B yading@10: punpcklwd m1, [blockq+32*3] ; C D yading@10: punpckldq m0, m1 ; A B C D yading@10: pxor m1, m1 yading@10: yading@10: ; calculate DC yading@10: paddw m0, [pw_4] yading@10: movd [blockq+32*0], m1 yading@10: movd [blockq+32*1], m1 yading@10: movd [blockq+32*2], m1 yading@10: movd [blockq+32*3], m1 yading@10: psraw m0, 3 yading@10: psubw m1, m0 yading@10: packuswb m0, m0 yading@10: packuswb m1, m1 yading@10: punpcklbw m0, m0 yading@10: punpcklbw m1, m1 yading@10: punpcklbw m0, m0 yading@10: punpcklbw m1, m1 yading@10: yading@10: ; add DC yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+strideq*2] yading@10: ADD_DC m0, m1, 0, mova yading@10: RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_idct_dc_add4uv_(uint8_t *dst, int16_t block[4][16], int stride); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: INIT_MMX mmx yading@10: cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride yading@10: ; load data yading@10: movd m0, [blockq+32*0] ; A yading@10: movd m1, [blockq+32*2] ; C yading@10: punpcklwd m0, [blockq+32*1] ; A B yading@10: punpcklwd m1, [blockq+32*3] ; C D yading@10: punpckldq m0, m1 ; A B C D yading@10: pxor m6, m6 yading@10: yading@10: ; calculate DC yading@10: paddw m0, [pw_4] yading@10: movd [blockq+32*0], m6 yading@10: movd [blockq+32*1], m6 yading@10: movd [blockq+32*2], m6 yading@10: movd [blockq+32*3], m6 yading@10: psraw m0, 3 yading@10: psubw m6, m0 yading@10: packuswb m0, m0 yading@10: packuswb m6, m6 yading@10: punpcklbw m0, m0 ; AABBCCDD yading@10: punpcklbw m6, m6 ; AABBCCDD yading@10: movq m1, m0 yading@10: movq m7, m6 yading@10: punpcklbw m0, m0 ; AAAABBBB yading@10: punpckhbw m1, m1 ; CCCCDDDD yading@10: punpcklbw m6, m6 ; AAAABBBB yading@10: punpckhbw m7, m7 ; CCCCDDDD yading@10: yading@10: ; add DC yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+strideq*2] yading@10: ADD_DC m0, m6, 0, mova yading@10: lea dst1q, [dst1q+strideq*4] yading@10: lea dst2q, [dst2q+strideq*4] yading@10: ADD_DC m1, m7, 0, mova yading@10: RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_idct_add_(uint8_t *dst, int16_t block[16], int stride); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: ; calculate %1=mul_35468(%1)-mul_20091(%2); %2=mul_20091(%1)+mul_35468(%2) yading@10: ; this macro assumes that m6/m7 have words for 20091/17734 loaded yading@10: %macro VP8_MULTIPLY_SUMSUB 4 yading@10: mova %3, %1 yading@10: mova %4, %2 yading@10: pmulhw %3, m6 ;20091(1) yading@10: pmulhw %4, m6 ;20091(2) yading@10: paddw %3, %1 yading@10: paddw %4, %2 yading@10: paddw %1, %1 yading@10: paddw %2, %2 yading@10: pmulhw %1, m7 ;35468(1) yading@10: pmulhw %2, m7 ;35468(2) yading@10: psubw %1, %4 yading@10: paddw %2, %3 yading@10: %endmacro yading@10: yading@10: ; calculate x0=%1+%3; x1=%1-%3 yading@10: ; x2=mul_35468(%2)-mul_20091(%4); x3=mul_20091(%2)+mul_35468(%4) yading@10: ; %1=x0+x3 (tmp0); %2=x1+x2 (tmp1); %3=x1-x2 (tmp2); %4=x0-x3 (tmp3) yading@10: ; %5/%6 are temporary registers yading@10: ; we assume m6/m7 have constant words 20091/17734 loaded in them yading@10: %macro VP8_IDCT_TRANSFORM4x4_1D 6 yading@10: SUMSUB_BA w, %3, %1, %5 ;t0, t1 yading@10: VP8_MULTIPLY_SUMSUB m%2, m%4, m%5,m%6 ;t2, t3 yading@10: SUMSUB_BA w, %4, %3, %5 ;tmp0, tmp3 yading@10: SUMSUB_BA w, %2, %1, %5 ;tmp1, tmp2 yading@10: SWAP %4, %1 yading@10: SWAP %4, %3 yading@10: %endmacro yading@10: yading@10: %macro VP8_IDCT_ADD 0 yading@10: cglobal vp8_idct_add, 3, 3, 0, dst, block, stride yading@10: ; load block data yading@10: movq m0, [blockq+ 0] yading@10: movq m1, [blockq+ 8] yading@10: movq m2, [blockq+16] yading@10: movq m3, [blockq+24] yading@10: movq m6, [pw_20091] yading@10: movq m7, [pw_17734] yading@10: %if cpuflag(sse) yading@10: xorps xmm0, xmm0 yading@10: movaps [blockq+ 0], xmm0 yading@10: movaps [blockq+16], xmm0 yading@10: %else yading@10: pxor m4, m4 yading@10: movq [blockq+ 0], m4 yading@10: movq [blockq+ 8], m4 yading@10: movq [blockq+16], m4 yading@10: movq [blockq+24], m4 yading@10: %endif yading@10: yading@10: ; actual IDCT yading@10: VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5 yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: paddw m0, [pw_4] yading@10: VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5 yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: yading@10: ; store yading@10: pxor m4, m4 yading@10: DEFINE_ARGS dst1, dst2, stride yading@10: lea dst2q, [dst1q+2*strideq] yading@10: STORE_DIFFx2 m0, m1, m6, m7, m4, 3, dst1q, strideq yading@10: STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq yading@10: yading@10: RET yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: VP8_IDCT_ADD yading@10: %endif yading@10: INIT_MMX sse yading@10: VP8_IDCT_ADD yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_luma_dc_wht_mmxext(int16_t block[4][4][16], int16_t dc[16]) yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro SCATTER_WHT 3 yading@10: movd dc1d, m%1 yading@10: movd dc2d, m%2 yading@10: mov [blockq+2*16*(0+%3)], dc1w yading@10: mov [blockq+2*16*(1+%3)], dc2w yading@10: shr dc1d, 16 yading@10: shr dc2d, 16 yading@10: psrlq m%1, 32 yading@10: psrlq m%2, 32 yading@10: mov [blockq+2*16*(4+%3)], dc1w yading@10: mov [blockq+2*16*(5+%3)], dc2w yading@10: movd dc1d, m%1 yading@10: movd dc2d, m%2 yading@10: mov [blockq+2*16*(8+%3)], dc1w yading@10: mov [blockq+2*16*(9+%3)], dc2w yading@10: shr dc1d, 16 yading@10: shr dc2d, 16 yading@10: mov [blockq+2*16*(12+%3)], dc1w yading@10: mov [blockq+2*16*(13+%3)], dc2w yading@10: %endmacro yading@10: yading@10: %macro HADAMARD4_1D 4 yading@10: SUMSUB_BADC w, %2, %1, %4, %3 yading@10: SUMSUB_BADC w, %4, %2, %3, %1 yading@10: SWAP %1, %4, %3 yading@10: %endmacro yading@10: yading@10: %macro VP8_DC_WHT 0 yading@10: cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2 yading@10: movq m0, [dc1q] yading@10: movq m1, [dc1q+8] yading@10: movq m2, [dc1q+16] yading@10: movq m3, [dc1q+24] yading@10: %if cpuflag(sse) yading@10: xorps xmm0, xmm0 yading@10: movaps [dc1q+ 0], xmm0 yading@10: movaps [dc1q+16], xmm0 yading@10: %else yading@10: pxor m4, m4 yading@10: movq [dc1q+ 0], m4 yading@10: movq [dc1q+ 8], m4 yading@10: movq [dc1q+16], m4 yading@10: movq [dc1q+24], m4 yading@10: %endif yading@10: HADAMARD4_1D 0, 1, 2, 3 yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: paddw m0, [pw_3] yading@10: HADAMARD4_1D 0, 1, 2, 3 yading@10: psraw m0, 3 yading@10: psraw m1, 3 yading@10: psraw m2, 3 yading@10: psraw m3, 3 yading@10: SCATTER_WHT 0, 1, 0 yading@10: SCATTER_WHT 2, 3, 2 yading@10: RET yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: VP8_DC_WHT yading@10: %endif yading@10: INIT_MMX sse yading@10: VP8_DC_WHT yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_h/v_loop_filter_simple_(uint8_t *dst, int stride, int flim); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: ; macro called with 7 mm register indexes as argument, and 4 regular registers yading@10: ; yading@10: ; first 4 mm registers will carry the transposed pixel data yading@10: ; the other three are scratchspace (one would be sufficient, but this allows yading@10: ; for more spreading/pipelining and thus faster execution on OOE CPUs) yading@10: ; yading@10: ; first two regular registers are buf+4*stride and buf+5*stride yading@10: ; third is -stride, fourth is +stride yading@10: %macro READ_8x4_INTERLEAVED 11 yading@10: ; interleave 8 (A-H) rows of 4 pixels each yading@10: movd m%1, [%8+%10*4] ; A0-3 yading@10: movd m%5, [%9+%10*4] ; B0-3 yading@10: movd m%2, [%8+%10*2] ; C0-3 yading@10: movd m%6, [%8+%10] ; D0-3 yading@10: movd m%3, [%8] ; E0-3 yading@10: movd m%7, [%9] ; F0-3 yading@10: movd m%4, [%9+%11] ; G0-3 yading@10: punpcklbw m%1, m%5 ; A/B interleaved yading@10: movd m%5, [%9+%11*2] ; H0-3 yading@10: punpcklbw m%2, m%6 ; C/D interleaved yading@10: punpcklbw m%3, m%7 ; E/F interleaved yading@10: punpcklbw m%4, m%5 ; G/H interleaved yading@10: %endmacro yading@10: yading@10: ; macro called with 7 mm register indexes as argument, and 5 regular registers yading@10: ; first 11 mean the same as READ_8x4_TRANSPOSED above yading@10: ; fifth regular register is scratchspace to reach the bottom 8 rows, it yading@10: ; will be set to second regular register + 8*stride at the end yading@10: %macro READ_16x4_INTERLEAVED 12 yading@10: ; transpose 16 (A-P) rows of 4 pixels each yading@10: lea %12, [r0+8*r2] yading@10: yading@10: ; read (and interleave) those addressable by %8 (=r0), A/C/D/E/I/K/L/M yading@10: movd m%1, [%8+%10*4] ; A0-3 yading@10: movd m%3, [%12+%10*4] ; I0-3 yading@10: movd m%2, [%8+%10*2] ; C0-3 yading@10: movd m%4, [%12+%10*2] ; K0-3 yading@10: movd m%6, [%8+%10] ; D0-3 yading@10: movd m%5, [%12+%10] ; L0-3 yading@10: movd m%7, [%12] ; M0-3 yading@10: add %12, %11 yading@10: punpcklbw m%1, m%3 ; A/I yading@10: movd m%3, [%8] ; E0-3 yading@10: punpcklbw m%2, m%4 ; C/K yading@10: punpcklbw m%6, m%5 ; D/L yading@10: punpcklbw m%3, m%7 ; E/M yading@10: punpcklbw m%2, m%6 ; C/D/K/L interleaved yading@10: yading@10: ; read (and interleave) those addressable by %9 (=r4), B/F/G/H/J/N/O/P yading@10: movd m%5, [%9+%10*4] ; B0-3 yading@10: movd m%4, [%12+%10*4] ; J0-3 yading@10: movd m%7, [%9] ; F0-3 yading@10: movd m%6, [%12] ; N0-3 yading@10: punpcklbw m%5, m%4 ; B/J yading@10: punpcklbw m%7, m%6 ; F/N yading@10: punpcklbw m%1, m%5 ; A/B/I/J interleaved yading@10: punpcklbw m%3, m%7 ; E/F/M/N interleaved yading@10: movd m%4, [%9+%11] ; G0-3 yading@10: movd m%6, [%12+%11] ; O0-3 yading@10: movd m%5, [%9+%11*2] ; H0-3 yading@10: movd m%7, [%12+%11*2] ; P0-3 yading@10: punpcklbw m%4, m%6 ; G/O yading@10: punpcklbw m%5, m%7 ; H/P yading@10: punpcklbw m%4, m%5 ; G/H/O/P interleaved yading@10: %endmacro yading@10: yading@10: ; write 4 mm registers of 2 dwords each yading@10: ; first four arguments are mm register indexes containing source data yading@10: ; last four are registers containing buf+4*stride, buf+5*stride, yading@10: ; -stride and +stride yading@10: %macro WRITE_4x2D 8 yading@10: ; write out (2 dwords per register) yading@10: movd [%5+%7*4], m%1 yading@10: movd [%5+%7*2], m%2 yading@10: movd [%5], m%3 yading@10: movd [%6+%8], m%4 yading@10: punpckhdq m%1, m%1 yading@10: punpckhdq m%2, m%2 yading@10: punpckhdq m%3, m%3 yading@10: punpckhdq m%4, m%4 yading@10: movd [%6+%7*4], m%1 yading@10: movd [%5+%7], m%2 yading@10: movd [%6], m%3 yading@10: movd [%6+%8*2], m%4 yading@10: %endmacro yading@10: yading@10: ; write 4 xmm registers of 4 dwords each yading@10: ; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular yading@10: ; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride yading@10: ; we add 1*stride to the third regular registry in the process yading@10: ; the 10th argument is 16 if it's a Y filter (i.e. all regular registers cover the yading@10: ; same memory region), or 8 if they cover two separate buffers (third one points to yading@10: ; a different memory region than the first two), allowing for more optimal code for yading@10: ; the 16-width case yading@10: %macro WRITE_4x4D 10 yading@10: ; write out (4 dwords per register), start with dwords zero yading@10: movd [%5+%8*4], m%1 yading@10: movd [%5], m%2 yading@10: movd [%7+%8*4], m%3 yading@10: movd [%7], m%4 yading@10: yading@10: ; store dwords 1 yading@10: psrldq m%1, 4 yading@10: psrldq m%2, 4 yading@10: psrldq m%3, 4 yading@10: psrldq m%4, 4 yading@10: movd [%6+%8*4], m%1 yading@10: movd [%6], m%2 yading@10: %if %10 == 16 yading@10: movd [%6+%9*4], m%3 yading@10: %endif yading@10: movd [%7+%9], m%4 yading@10: yading@10: ; write dwords 2 yading@10: psrldq m%1, 4 yading@10: psrldq m%2, 4 yading@10: %if %10 == 8 yading@10: movd [%5+%8*2], m%1 yading@10: movd %5d, m%3 yading@10: %endif yading@10: psrldq m%3, 4 yading@10: psrldq m%4, 4 yading@10: %if %10 == 16 yading@10: movd [%5+%8*2], m%1 yading@10: %endif yading@10: movd [%6+%9], m%2 yading@10: movd [%7+%8*2], m%3 yading@10: movd [%7+%9*2], m%4 yading@10: add %7, %9 yading@10: yading@10: ; store dwords 3 yading@10: psrldq m%1, 4 yading@10: psrldq m%2, 4 yading@10: psrldq m%3, 4 yading@10: psrldq m%4, 4 yading@10: %if %10 == 8 yading@10: mov [%7+%8*4], %5d yading@10: movd [%6+%8*2], m%1 yading@10: %else yading@10: movd [%5+%8], m%1 yading@10: %endif yading@10: movd [%6+%9*2], m%2 yading@10: movd [%7+%8*2], m%3 yading@10: movd [%7+%9*2], m%4 yading@10: %endmacro yading@10: yading@10: ; write 4 or 8 words in the mmx/xmm registers as 8 lines yading@10: ; 1 and 2 are the registers to write, this can be the same (for SSE2) yading@10: ; for pre-SSE4: yading@10: ; 3 is a general-purpose register that we will clobber yading@10: ; for SSE4: yading@10: ; 3 is a pointer to the destination's 5th line yading@10: ; 4 is a pointer to the destination's 4th line yading@10: ; 5/6 is -stride and +stride yading@10: %macro WRITE_2x4W 6 yading@10: movd %3d, %1 yading@10: punpckhdq %1, %1 yading@10: mov [%4+%5*4], %3w yading@10: shr %3, 16 yading@10: add %4, %6 yading@10: mov [%4+%5*4], %3w yading@10: yading@10: movd %3d, %1 yading@10: add %4, %5 yading@10: mov [%4+%5*2], %3w yading@10: shr %3, 16 yading@10: mov [%4+%5 ], %3w yading@10: yading@10: movd %3d, %2 yading@10: punpckhdq %2, %2 yading@10: mov [%4 ], %3w yading@10: shr %3, 16 yading@10: mov [%4+%6 ], %3w yading@10: yading@10: movd %3d, %2 yading@10: add %4, %6 yading@10: mov [%4+%6 ], %3w yading@10: shr %3, 16 yading@10: mov [%4+%6*2], %3w yading@10: add %4, %5 yading@10: %endmacro yading@10: yading@10: %macro WRITE_8W 5 yading@10: %if cpuflag(sse4) yading@10: pextrw [%3+%4*4], %1, 0 yading@10: pextrw [%2+%4*4], %1, 1 yading@10: pextrw [%3+%4*2], %1, 2 yading@10: pextrw [%3+%4 ], %1, 3 yading@10: pextrw [%3 ], %1, 4 yading@10: pextrw [%2 ], %1, 5 yading@10: pextrw [%2+%5 ], %1, 6 yading@10: pextrw [%2+%5*2], %1, 7 yading@10: %else yading@10: movd %2d, %1 yading@10: psrldq %1, 4 yading@10: mov [%3+%4*4], %2w yading@10: shr %2, 16 yading@10: add %3, %5 yading@10: mov [%3+%4*4], %2w yading@10: yading@10: movd %2d, %1 yading@10: psrldq %1, 4 yading@10: add %3, %4 yading@10: mov [%3+%4*2], %2w yading@10: shr %2, 16 yading@10: mov [%3+%4 ], %2w yading@10: yading@10: movd %2d, %1 yading@10: psrldq %1, 4 yading@10: mov [%3 ], %2w yading@10: shr %2, 16 yading@10: mov [%3+%5 ], %2w yading@10: yading@10: movd %2d, %1 yading@10: add %3, %5 yading@10: mov [%3+%5 ], %2w yading@10: shr %2, 16 yading@10: mov [%3+%5*2], %2w yading@10: %endif yading@10: %endmacro yading@10: yading@10: %macro SIMPLE_LOOPFILTER 2 yading@10: cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr yading@10: %if mmsize == 8 ; mmx/mmxext yading@10: mov cntrq, 2 yading@10: %endif yading@10: %if cpuflag(ssse3) yading@10: pxor m0, m0 yading@10: %endif yading@10: SPLATB_REG m7, flim, m0 ; splat "flim" into register yading@10: yading@10: ; set up indexes to address 4 rows yading@10: %if mmsize == 8 yading@10: DEFINE_ARGS dst1, mstride, stride, cntr, dst2 yading@10: %else yading@10: DEFINE_ARGS dst1, mstride, stride, dst3, dst2 yading@10: %endif yading@10: mov strideq, mstrideq yading@10: neg mstrideq yading@10: %ifidn %1, h yading@10: lea dst1q, [dst1q+4*strideq-2] yading@10: %endif yading@10: yading@10: %if mmsize == 8 ; mmx / mmxext yading@10: .next8px: yading@10: %endif yading@10: %ifidn %1, v yading@10: ; read 4 half/full rows of pixels yading@10: mova m0, [dst1q+mstrideq*2] ; p1 yading@10: mova m1, [dst1q+mstrideq] ; p0 yading@10: mova m2, [dst1q] ; q0 yading@10: mova m3, [dst1q+ strideq] ; q1 yading@10: %else ; h yading@10: lea dst2q, [dst1q+ strideq] yading@10: yading@10: %if mmsize == 8 ; mmx/mmxext yading@10: READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq yading@10: %else ; sse2 yading@10: READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q yading@10: %endif yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: %endif yading@10: yading@10: ; simple_limit yading@10: mova m5, m2 ; m5=backup of q0 yading@10: mova m6, m1 ; m6=backup of p0 yading@10: psubusb m1, m2 ; p0-q0 yading@10: psubusb m2, m6 ; q0-p0 yading@10: por m1, m2 ; FFABS(p0-q0) yading@10: paddusb m1, m1 ; m1=FFABS(p0-q0)*2 yading@10: yading@10: mova m4, m3 yading@10: mova m2, m0 yading@10: psubusb m3, m0 ; q1-p1 yading@10: psubusb m0, m4 ; p1-q1 yading@10: por m3, m0 ; FFABS(p1-q1) yading@10: mova m0, [pb_80] yading@10: pxor m2, m0 yading@10: pxor m4, m0 yading@10: psubsb m2, m4 ; m2=p1-q1 (signed) backup for below yading@10: pand m3, [pb_FE] yading@10: psrlq m3, 1 ; m3=FFABS(p1-q1)/2, this can be used signed yading@10: paddusb m3, m1 yading@10: psubusb m3, m7 yading@10: pxor m1, m1 yading@10: pcmpeqb m3, m1 ; abs(p0-q0)*2+abs(p1-q1)/2<=flim mask(0xff/0x0) yading@10: yading@10: ; filter_common (use m2/p1-q1, m4=q0, m6=p0, m5/q0-p0 and m3/mask) yading@10: mova m4, m5 yading@10: pxor m5, m0 yading@10: pxor m0, m6 yading@10: psubsb m5, m0 ; q0-p0 (signed) yading@10: paddsb m2, m5 yading@10: paddsb m2, m5 yading@10: paddsb m2, m5 ; a=(p1-q1) + 3*(q0-p0) yading@10: pand m2, m3 ; apply filter mask (m3) yading@10: yading@10: mova m3, [pb_F8] yading@10: mova m1, m2 yading@10: paddsb m2, [pb_4] ; f1<<3=a+4 yading@10: paddsb m1, [pb_3] ; f2<<3=a+3 yading@10: pand m2, m3 yading@10: pand m1, m3 ; cache f2<<3 yading@10: yading@10: pxor m0, m0 yading@10: pxor m3, m3 yading@10: pcmpgtb m0, m2 ; which values are <0? yading@10: psubb m3, m2 ; -f1<<3 yading@10: psrlq m2, 3 ; +f1 yading@10: psrlq m3, 3 ; -f1 yading@10: pand m3, m0 yading@10: pandn m0, m2 yading@10: psubusb m4, m0 yading@10: paddusb m4, m3 ; q0-f1 yading@10: yading@10: pxor m0, m0 yading@10: pxor m3, m3 yading@10: pcmpgtb m0, m1 ; which values are <0? yading@10: psubb m3, m1 ; -f2<<3 yading@10: psrlq m1, 3 ; +f2 yading@10: psrlq m3, 3 ; -f2 yading@10: pand m3, m0 yading@10: pandn m0, m1 yading@10: paddusb m6, m0 yading@10: psubusb m6, m3 ; p0+f2 yading@10: yading@10: ; store yading@10: %ifidn %1, v yading@10: mova [dst1q], m4 yading@10: mova [dst1q+mstrideq], m6 yading@10: %else ; h yading@10: inc dst1q yading@10: SBUTTERFLY bw, 6, 4, 0 yading@10: yading@10: %if mmsize == 16 ; sse2 yading@10: %if cpuflag(sse4) yading@10: inc dst2q yading@10: %endif yading@10: WRITE_8W m6, dst2q, dst1q, mstrideq, strideq yading@10: lea dst2q, [dst3q+mstrideq+1] yading@10: %if cpuflag(sse4) yading@10: inc dst3q yading@10: %endif yading@10: WRITE_8W m4, dst3q, dst2q, mstrideq, strideq yading@10: %else ; mmx/mmxext yading@10: WRITE_2x4W m6, m4, dst2q, dst1q, mstrideq, strideq yading@10: %endif yading@10: %endif yading@10: yading@10: %if mmsize == 8 ; mmx/mmxext yading@10: ; next 8 pixels yading@10: %ifidn %1, v yading@10: add dst1q, 8 ; advance 8 cols = pixels yading@10: %else ; h yading@10: lea dst1q, [dst1q+strideq*8-1] ; advance 8 rows = lines yading@10: %endif yading@10: dec cntrq yading@10: jg .next8px yading@10: REP_RET yading@10: %else ; sse2 yading@10: RET yading@10: %endif yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: SIMPLE_LOOPFILTER v, 4 yading@10: SIMPLE_LOOPFILTER h, 5 yading@10: INIT_MMX mmxext yading@10: SIMPLE_LOOPFILTER v, 4 yading@10: SIMPLE_LOOPFILTER h, 5 yading@10: %endif yading@10: yading@10: INIT_XMM sse2 yading@10: SIMPLE_LOOPFILTER v, 3 yading@10: SIMPLE_LOOPFILTER h, 5 yading@10: INIT_XMM ssse3 yading@10: SIMPLE_LOOPFILTER v, 3 yading@10: SIMPLE_LOOPFILTER h, 5 yading@10: INIT_XMM sse4 yading@10: SIMPLE_LOOPFILTER h, 5 yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_h/v_loop_filter_inner_(uint8_t *dst, [uint8_t *v,] int stride, yading@10: ; int flimE, int flimI, int hev_thr); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro INNER_LOOPFILTER 2 yading@10: %define stack_size 0 yading@10: %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr yading@10: %ifidn %1, v ; [3]=hev() result yading@10: %define stack_size mmsize * -4 yading@10: %else ; h ; extra storage space for transposes yading@10: %define stack_size mmsize * -5 yading@10: %endif yading@10: %endif yading@10: yading@10: %if %2 == 8 ; chroma yading@10: cglobal vp8_%1_loop_filter8uv_inner, 6, 6, 13, stack_size, dst, dst8, stride, flimE, flimI, hevthr yading@10: %else ; luma yading@10: cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, flimI, hevthr yading@10: %endif yading@10: yading@10: %if cpuflag(ssse3) yading@10: pxor m7, m7 yading@10: %endif yading@10: yading@10: %ifndef m8 yading@10: ; splat function arguments yading@10: SPLATB_REG m0, flimEq, m7 ; E yading@10: SPLATB_REG m1, flimIq, m7 ; I yading@10: SPLATB_REG m2, hevthrq, m7 ; hev_thresh yading@10: yading@10: %define m_flimE [rsp] yading@10: %define m_flimI [rsp+mmsize] yading@10: %define m_hevthr [rsp+mmsize*2] yading@10: %define m_maskres [rsp+mmsize*3] yading@10: %define m_p0backup [rsp+mmsize*3] yading@10: %define m_q0backup [rsp+mmsize*4] yading@10: yading@10: mova m_flimE, m0 yading@10: mova m_flimI, m1 yading@10: mova m_hevthr, m2 yading@10: %else yading@10: %define m_flimE m9 yading@10: %define m_flimI m10 yading@10: %define m_hevthr m11 yading@10: %define m_maskres m12 yading@10: %define m_p0backup m12 yading@10: %define m_q0backup m8 yading@10: yading@10: ; splat function arguments yading@10: SPLATB_REG m_flimE, flimEq, m7 ; E yading@10: SPLATB_REG m_flimI, flimIq, m7 ; I yading@10: SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh yading@10: %endif yading@10: yading@10: %if %2 == 8 ; chroma yading@10: DEFINE_ARGS dst1, dst8, mstride, stride, dst2 yading@10: %elif mmsize == 8 yading@10: DEFINE_ARGS dst1, mstride, stride, dst2, cntr yading@10: mov cntrq, 2 yading@10: %else yading@10: DEFINE_ARGS dst1, mstride, stride, dst2, dst8 yading@10: %endif yading@10: mov strideq, mstrideq yading@10: neg mstrideq yading@10: %ifidn %1, h yading@10: lea dst1q, [dst1q+strideq*4-4] yading@10: %if %2 == 8 ; chroma yading@10: lea dst8q, [dst8q+strideq*4-4] yading@10: %endif yading@10: %endif yading@10: yading@10: %if mmsize == 8 yading@10: .next8px: yading@10: %endif yading@10: ; read yading@10: lea dst2q, [dst1q+strideq] yading@10: %ifidn %1, v yading@10: %if %2 == 8 && mmsize == 16 yading@10: %define movrow movh yading@10: %else yading@10: %define movrow mova yading@10: %endif yading@10: movrow m0, [dst1q+mstrideq*4] ; p3 yading@10: movrow m1, [dst2q+mstrideq*4] ; p2 yading@10: movrow m2, [dst1q+mstrideq*2] ; p1 yading@10: movrow m5, [dst2q] ; q1 yading@10: movrow m6, [dst2q+ strideq*1] ; q2 yading@10: movrow m7, [dst2q+ strideq*2] ; q3 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m0, [dst8q+mstrideq*4] yading@10: movhps m2, [dst8q+mstrideq*2] yading@10: add dst8q, strideq yading@10: movhps m1, [dst8q+mstrideq*4] yading@10: movhps m5, [dst8q] yading@10: movhps m6, [dst8q+ strideq ] yading@10: movhps m7, [dst8q+ strideq*2] yading@10: add dst8q, mstrideq yading@10: %endif yading@10: %elif mmsize == 8 ; mmx/mmxext (h) yading@10: ; read 8 rows of 8px each yading@10: movu m0, [dst1q+mstrideq*4] yading@10: movu m1, [dst2q+mstrideq*4] yading@10: movu m2, [dst1q+mstrideq*2] yading@10: movu m3, [dst1q+mstrideq ] yading@10: movu m4, [dst1q] yading@10: movu m5, [dst2q] yading@10: movu m6, [dst2q+ strideq ] yading@10: yading@10: ; 8x8 transpose yading@10: TRANSPOSE4x4B 0, 1, 2, 3, 7 yading@10: mova m_q0backup, m1 yading@10: movu m7, [dst2q+ strideq*2] yading@10: TRANSPOSE4x4B 4, 5, 6, 7, 1 yading@10: SBUTTERFLY dq, 0, 4, 1 ; p3/p2 yading@10: SBUTTERFLY dq, 2, 6, 1 ; q0/q1 yading@10: SBUTTERFLY dq, 3, 7, 1 ; q2/q3 yading@10: mova m1, m_q0backup yading@10: mova m_q0backup, m2 ; store q0 yading@10: SBUTTERFLY dq, 1, 5, 2 ; p1/p0 yading@10: mova m_p0backup, m5 ; store p0 yading@10: SWAP 1, 4 yading@10: SWAP 2, 4 yading@10: SWAP 6, 3 yading@10: SWAP 5, 3 yading@10: %else ; sse2 (h) yading@10: %if %2 == 16 yading@10: lea dst8q, [dst1q+ strideq*8] yading@10: %endif yading@10: yading@10: ; read 16 rows of 8px each, interleave yading@10: movh m0, [dst1q+mstrideq*4] yading@10: movh m1, [dst8q+mstrideq*4] yading@10: movh m2, [dst1q+mstrideq*2] yading@10: movh m5, [dst8q+mstrideq*2] yading@10: movh m3, [dst1q+mstrideq ] yading@10: movh m6, [dst8q+mstrideq ] yading@10: movh m4, [dst1q] yading@10: movh m7, [dst8q] yading@10: punpcklbw m0, m1 ; A/I yading@10: punpcklbw m2, m5 ; C/K yading@10: punpcklbw m3, m6 ; D/L yading@10: punpcklbw m4, m7 ; E/M yading@10: yading@10: add dst8q, strideq yading@10: movh m1, [dst2q+mstrideq*4] yading@10: movh m6, [dst8q+mstrideq*4] yading@10: movh m5, [dst2q] yading@10: movh m7, [dst8q] yading@10: punpcklbw m1, m6 ; B/J yading@10: punpcklbw m5, m7 ; F/N yading@10: movh m6, [dst2q+ strideq ] yading@10: movh m7, [dst8q+ strideq ] yading@10: punpcklbw m6, m7 ; G/O yading@10: yading@10: ; 8x16 transpose yading@10: TRANSPOSE4x4B 0, 1, 2, 3, 7 yading@10: %ifdef m8 yading@10: SWAP 1, 8 yading@10: %else yading@10: mova m_q0backup, m1 yading@10: %endif yading@10: movh m7, [dst2q+ strideq*2] yading@10: movh m1, [dst8q+ strideq*2] yading@10: punpcklbw m7, m1 ; H/P yading@10: TRANSPOSE4x4B 4, 5, 6, 7, 1 yading@10: SBUTTERFLY dq, 0, 4, 1 ; p3/p2 yading@10: SBUTTERFLY dq, 2, 6, 1 ; q0/q1 yading@10: SBUTTERFLY dq, 3, 7, 1 ; q2/q3 yading@10: %ifdef m8 yading@10: SWAP 1, 8 yading@10: SWAP 2, 8 yading@10: %else yading@10: mova m1, m_q0backup yading@10: mova m_q0backup, m2 ; store q0 yading@10: %endif yading@10: SBUTTERFLY dq, 1, 5, 2 ; p1/p0 yading@10: %ifdef m12 yading@10: SWAP 5, 12 yading@10: %else yading@10: mova m_p0backup, m5 ; store p0 yading@10: %endif yading@10: SWAP 1, 4 yading@10: SWAP 2, 4 yading@10: SWAP 6, 3 yading@10: SWAP 5, 3 yading@10: %endif yading@10: yading@10: ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1 yading@10: mova m4, m1 yading@10: SWAP 4, 1 yading@10: psubusb m4, m0 ; p2-p3 yading@10: psubusb m0, m1 ; p3-p2 yading@10: por m0, m4 ; abs(p3-p2) yading@10: yading@10: mova m4, m2 yading@10: SWAP 4, 2 yading@10: psubusb m4, m1 ; p1-p2 yading@10: psubusb m1, m2 ; p2-p1 yading@10: por m1, m4 ; abs(p2-p1) yading@10: yading@10: mova m4, m6 yading@10: SWAP 4, 6 yading@10: psubusb m4, m7 ; q2-q3 yading@10: psubusb m7, m6 ; q3-q2 yading@10: por m7, m4 ; abs(q3-q2) yading@10: yading@10: mova m4, m5 yading@10: SWAP 4, 5 yading@10: psubusb m4, m6 ; q1-q2 yading@10: psubusb m6, m5 ; q2-q1 yading@10: por m6, m4 ; abs(q2-q1) yading@10: yading@10: %if notcpuflag(mmxext) yading@10: mova m4, m_flimI yading@10: pxor m3, m3 yading@10: psubusb m0, m4 yading@10: psubusb m1, m4 yading@10: psubusb m7, m4 yading@10: psubusb m6, m4 yading@10: pcmpeqb m0, m3 ; abs(p3-p2) <= I yading@10: pcmpeqb m1, m3 ; abs(p2-p1) <= I yading@10: pcmpeqb m7, m3 ; abs(q3-q2) <= I yading@10: pcmpeqb m6, m3 ; abs(q2-q1) <= I yading@10: pand m0, m1 yading@10: pand m7, m6 yading@10: pand m0, m7 yading@10: %else ; mmxext/sse2 yading@10: pmaxub m0, m1 yading@10: pmaxub m6, m7 yading@10: pmaxub m0, m6 yading@10: %endif yading@10: yading@10: ; normal_limit and high_edge_variance for p1-p0, q1-q0 yading@10: SWAP 7, 3 ; now m7 is zero yading@10: %ifidn %1, v yading@10: movrow m3, [dst1q+mstrideq ] ; p0 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m3, [dst8q+mstrideq ] yading@10: %endif yading@10: %elifdef m12 yading@10: SWAP 3, 12 yading@10: %else yading@10: mova m3, m_p0backup yading@10: %endif yading@10: yading@10: mova m1, m2 yading@10: SWAP 1, 2 yading@10: mova m6, m3 yading@10: SWAP 3, 6 yading@10: psubusb m1, m3 ; p1-p0 yading@10: psubusb m6, m2 ; p0-p1 yading@10: por m1, m6 ; abs(p1-p0) yading@10: %if notcpuflag(mmxext) yading@10: mova m6, m1 yading@10: psubusb m1, m4 yading@10: psubusb m6, m_hevthr yading@10: pcmpeqb m1, m7 ; abs(p1-p0) <= I yading@10: pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh yading@10: pand m0, m1 yading@10: mova m_maskres, m6 yading@10: %else ; mmxext/sse2 yading@10: pmaxub m0, m1 ; max_I yading@10: SWAP 1, 4 ; max_hev_thresh yading@10: %endif yading@10: yading@10: SWAP 6, 4 ; now m6 is I yading@10: %ifidn %1, v yading@10: movrow m4, [dst1q] ; q0 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m4, [dst8q] yading@10: %endif yading@10: %elifdef m8 yading@10: SWAP 4, 8 yading@10: %else yading@10: mova m4, m_q0backup yading@10: %endif yading@10: mova m1, m4 yading@10: SWAP 1, 4 yading@10: mova m7, m5 yading@10: SWAP 7, 5 yading@10: psubusb m1, m5 ; q0-q1 yading@10: psubusb m7, m4 ; q1-q0 yading@10: por m1, m7 ; abs(q1-q0) yading@10: %if notcpuflag(mmxext) yading@10: mova m7, m1 yading@10: psubusb m1, m6 yading@10: psubusb m7, m_hevthr yading@10: pxor m6, m6 yading@10: pcmpeqb m1, m6 ; abs(q1-q0) <= I yading@10: pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh yading@10: mova m6, m_maskres yading@10: pand m0, m1 ; abs([pq][321]-[pq][210]) <= I yading@10: pand m6, m7 yading@10: %else ; mmxext/sse2 yading@10: pxor m7, m7 yading@10: pmaxub m0, m1 yading@10: pmaxub m6, m1 yading@10: psubusb m0, m_flimI yading@10: psubusb m6, m_hevthr yading@10: pcmpeqb m0, m7 ; max(abs(..)) <= I yading@10: pcmpeqb m6, m7 ; !(max(abs..) > thresh) yading@10: %endif yading@10: %ifdef m12 yading@10: SWAP 6, 12 yading@10: %else yading@10: mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t) yading@10: %endif yading@10: yading@10: ; simple_limit yading@10: mova m1, m3 yading@10: SWAP 1, 3 yading@10: mova m6, m4 ; keep copies of p0/q0 around for later use yading@10: SWAP 6, 4 yading@10: psubusb m1, m4 ; p0-q0 yading@10: psubusb m6, m3 ; q0-p0 yading@10: por m1, m6 ; abs(q0-p0) yading@10: paddusb m1, m1 ; m1=2*abs(q0-p0) yading@10: yading@10: mova m7, m2 yading@10: SWAP 7, 2 yading@10: mova m6, m5 yading@10: SWAP 6, 5 yading@10: psubusb m7, m5 ; p1-q1 yading@10: psubusb m6, m2 ; q1-p1 yading@10: por m7, m6 ; abs(q1-p1) yading@10: pxor m6, m6 yading@10: pand m7, [pb_FE] yading@10: psrlq m7, 1 ; abs(q1-p1)/2 yading@10: paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2 yading@10: psubusb m7, m_flimE yading@10: pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E yading@10: pand m0, m7 ; normal_limit result yading@10: yading@10: ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask yading@10: %ifdef m8 ; x86-64 && sse2 yading@10: mova m8, [pb_80] yading@10: %define m_pb_80 m8 yading@10: %else ; x86-32 or mmx/mmxext yading@10: %define m_pb_80 [pb_80] yading@10: %endif yading@10: mova m1, m4 yading@10: mova m7, m3 yading@10: pxor m1, m_pb_80 yading@10: pxor m7, m_pb_80 yading@10: psubsb m1, m7 ; (signed) q0-p0 yading@10: mova m6, m2 yading@10: mova m7, m5 yading@10: pxor m6, m_pb_80 yading@10: pxor m7, m_pb_80 yading@10: psubsb m6, m7 ; (signed) p1-q1 yading@10: mova m7, m_maskres yading@10: pandn m7, m6 yading@10: paddsb m7, m1 yading@10: paddsb m7, m1 yading@10: paddsb m7, m1 ; 3*(q0-p0)+is4tap?(p1-q1) yading@10: yading@10: pand m7, m0 yading@10: mova m1, [pb_F8] yading@10: mova m6, m7 yading@10: paddsb m7, [pb_3] yading@10: paddsb m6, [pb_4] yading@10: pand m7, m1 yading@10: pand m6, m1 yading@10: yading@10: pxor m1, m1 yading@10: pxor m0, m0 yading@10: pcmpgtb m1, m7 yading@10: psubb m0, m7 yading@10: psrlq m7, 3 ; +f2 yading@10: psrlq m0, 3 ; -f2 yading@10: pand m0, m1 yading@10: pandn m1, m7 yading@10: psubusb m3, m0 yading@10: paddusb m3, m1 ; p0+f2 yading@10: yading@10: pxor m1, m1 yading@10: pxor m0, m0 yading@10: pcmpgtb m0, m6 yading@10: psubb m1, m6 yading@10: psrlq m6, 3 ; +f1 yading@10: psrlq m1, 3 ; -f1 yading@10: pand m1, m0 yading@10: pandn m0, m6 yading@10: psubusb m4, m0 yading@10: paddusb m4, m1 ; q0-f1 yading@10: yading@10: %ifdef m12 yading@10: SWAP 6, 12 yading@10: %else yading@10: mova m6, m_maskres yading@10: %endif yading@10: %if notcpuflag(mmxext) yading@10: mova m7, [pb_1] yading@10: %else ; mmxext/sse2 yading@10: pxor m7, m7 yading@10: %endif yading@10: pand m0, m6 yading@10: pand m1, m6 yading@10: %if notcpuflag(mmxext) yading@10: paddusb m0, m7 yading@10: pand m1, [pb_FE] yading@10: pandn m7, m0 yading@10: psrlq m1, 1 yading@10: psrlq m7, 1 yading@10: SWAP 0, 7 yading@10: %else ; mmxext/sse2 yading@10: psubusb m1, [pb_1] yading@10: pavgb m0, m7 ; a yading@10: pavgb m1, m7 ; -a yading@10: %endif yading@10: psubusb m5, m0 yading@10: psubusb m2, m1 yading@10: paddusb m5, m1 ; q1-a yading@10: paddusb m2, m0 ; p1+a yading@10: yading@10: ; store yading@10: %ifidn %1, v yading@10: movrow [dst1q+mstrideq*2], m2 yading@10: movrow [dst1q+mstrideq ], m3 yading@10: movrow [dst1q], m4 yading@10: movrow [dst1q+ strideq ], m5 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps [dst8q+mstrideq*2], m2 yading@10: movhps [dst8q+mstrideq ], m3 yading@10: movhps [dst8q], m4 yading@10: movhps [dst8q+ strideq ], m5 yading@10: %endif yading@10: %else ; h yading@10: add dst1q, 2 yading@10: add dst2q, 2 yading@10: yading@10: ; 4x8/16 transpose yading@10: TRANSPOSE4x4B 2, 3, 4, 5, 6 yading@10: yading@10: %if mmsize == 8 ; mmx/mmxext (h) yading@10: WRITE_4x2D 2, 3, 4, 5, dst1q, dst2q, mstrideq, strideq yading@10: %else ; sse2 (h) yading@10: lea dst8q, [dst8q+mstrideq +2] yading@10: WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2 yading@10: %endif yading@10: %endif yading@10: yading@10: %if mmsize == 8 yading@10: %if %2 == 8 ; chroma yading@10: %ifidn %1, h yading@10: sub dst1q, 2 yading@10: %endif yading@10: cmp dst1q, dst8q yading@10: mov dst1q, dst8q yading@10: jnz .next8px yading@10: %else yading@10: %ifidn %1, h yading@10: lea dst1q, [dst1q+ strideq*8-2] yading@10: %else ; v yading@10: add dst1q, 8 yading@10: %endif yading@10: dec cntrq yading@10: jg .next8px yading@10: %endif yading@10: REP_RET yading@10: %else ; mmsize == 16 yading@10: RET yading@10: %endif yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: INNER_LOOPFILTER v, 16 yading@10: INNER_LOOPFILTER h, 16 yading@10: INNER_LOOPFILTER v, 8 yading@10: INNER_LOOPFILTER h, 8 yading@10: yading@10: INIT_MMX mmxext yading@10: INNER_LOOPFILTER v, 16 yading@10: INNER_LOOPFILTER h, 16 yading@10: INNER_LOOPFILTER v, 8 yading@10: INNER_LOOPFILTER h, 8 yading@10: %endif yading@10: yading@10: INIT_XMM sse2 yading@10: INNER_LOOPFILTER v, 16 yading@10: INNER_LOOPFILTER h, 16 yading@10: INNER_LOOPFILTER v, 8 yading@10: INNER_LOOPFILTER h, 8 yading@10: yading@10: INIT_XMM ssse3 yading@10: INNER_LOOPFILTER v, 16 yading@10: INNER_LOOPFILTER h, 16 yading@10: INNER_LOOPFILTER v, 8 yading@10: INNER_LOOPFILTER h, 8 yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void vp8_h/v_loop_filter_mbedge_(uint8_t *dst, [uint8_t *v,] int stride, yading@10: ; int flimE, int flimI, int hev_thr); yading@10: ;----------------------------------------------------------------------------- yading@10: yading@10: %macro MBEDGE_LOOPFILTER 2 yading@10: %define stack_size 0 yading@10: %ifndef m8 ; stack layout: [0]=E, [1]=I, [2]=hev_thr yading@10: %if mmsize == 16 ; [3]=hev() result yading@10: ; [4]=filter tmp result yading@10: ; [5]/[6] = p2/q2 backup yading@10: ; [7]=lim_res sign result yading@10: %define stack_size mmsize * -7 yading@10: %else ; 8 ; extra storage space for transposes yading@10: %define stack_size mmsize * -8 yading@10: %endif yading@10: %endif yading@10: yading@10: %if %2 == 8 ; chroma yading@10: cglobal vp8_%1_loop_filter8uv_mbedge, 6, 6, 15, stack_size, dst1, dst8, stride, flimE, flimI, hevthr yading@10: %else ; luma yading@10: cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE, flimI, hevthr yading@10: %endif yading@10: yading@10: %if cpuflag(ssse3) yading@10: pxor m7, m7 yading@10: %endif yading@10: yading@10: %ifndef m8 yading@10: ; splat function arguments yading@10: SPLATB_REG m0, flimEq, m7 ; E yading@10: SPLATB_REG m1, flimIq, m7 ; I yading@10: SPLATB_REG m2, hevthrq, m7 ; hev_thresh yading@10: yading@10: %define m_flimE [rsp] yading@10: %define m_flimI [rsp+mmsize] yading@10: %define m_hevthr [rsp+mmsize*2] yading@10: %define m_maskres [rsp+mmsize*3] yading@10: %define m_limres [rsp+mmsize*4] yading@10: %define m_p0backup [rsp+mmsize*3] yading@10: %define m_q0backup [rsp+mmsize*4] yading@10: %define m_p2backup [rsp+mmsize*5] yading@10: %define m_q2backup [rsp+mmsize*6] yading@10: %if mmsize == 16 yading@10: %define m_limsign [rsp] yading@10: %else yading@10: %define m_limsign [rsp+mmsize*7] yading@10: %endif yading@10: yading@10: mova m_flimE, m0 yading@10: mova m_flimI, m1 yading@10: mova m_hevthr, m2 yading@10: %else ; sse2 on x86-64 yading@10: %define m_flimE m9 yading@10: %define m_flimI m10 yading@10: %define m_hevthr m11 yading@10: %define m_maskres m12 yading@10: %define m_limres m8 yading@10: %define m_p0backup m12 yading@10: %define m_q0backup m8 yading@10: %define m_p2backup m13 yading@10: %define m_q2backup m14 yading@10: %define m_limsign m9 yading@10: yading@10: ; splat function arguments yading@10: SPLATB_REG m_flimE, flimEq, m7 ; E yading@10: SPLATB_REG m_flimI, flimIq, m7 ; I yading@10: SPLATB_REG m_hevthr, hevthrq, m7 ; hev_thresh yading@10: %endif yading@10: yading@10: %if %2 == 8 ; chroma yading@10: DEFINE_ARGS dst1, dst8, mstride, stride, dst2 yading@10: %elif mmsize == 8 yading@10: DEFINE_ARGS dst1, mstride, stride, dst2, cntr yading@10: mov cntrq, 2 yading@10: %else yading@10: DEFINE_ARGS dst1, mstride, stride, dst2, dst8 yading@10: %endif yading@10: mov strideq, mstrideq yading@10: neg mstrideq yading@10: %ifidn %1, h yading@10: lea dst1q, [dst1q+strideq*4-4] yading@10: %if %2 == 8 ; chroma yading@10: lea dst8q, [dst8q+strideq*4-4] yading@10: %endif yading@10: %endif yading@10: yading@10: %if mmsize == 8 yading@10: .next8px: yading@10: %endif yading@10: ; read yading@10: lea dst2q, [dst1q+ strideq ] yading@10: %ifidn %1, v yading@10: %if %2 == 8 && mmsize == 16 yading@10: %define movrow movh yading@10: %else yading@10: %define movrow mova yading@10: %endif yading@10: movrow m0, [dst1q+mstrideq*4] ; p3 yading@10: movrow m1, [dst2q+mstrideq*4] ; p2 yading@10: movrow m2, [dst1q+mstrideq*2] ; p1 yading@10: movrow m5, [dst2q] ; q1 yading@10: movrow m6, [dst2q+ strideq ] ; q2 yading@10: movrow m7, [dst2q+ strideq*2] ; q3 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m0, [dst8q+mstrideq*4] yading@10: movhps m2, [dst8q+mstrideq*2] yading@10: add dst8q, strideq yading@10: movhps m1, [dst8q+mstrideq*4] yading@10: movhps m5, [dst8q] yading@10: movhps m6, [dst8q+ strideq ] yading@10: movhps m7, [dst8q+ strideq*2] yading@10: add dst8q, mstrideq yading@10: %endif yading@10: %elif mmsize == 8 ; mmx/mmxext (h) yading@10: ; read 8 rows of 8px each yading@10: movu m0, [dst1q+mstrideq*4] yading@10: movu m1, [dst2q+mstrideq*4] yading@10: movu m2, [dst1q+mstrideq*2] yading@10: movu m3, [dst1q+mstrideq ] yading@10: movu m4, [dst1q] yading@10: movu m5, [dst2q] yading@10: movu m6, [dst2q+ strideq ] yading@10: yading@10: ; 8x8 transpose yading@10: TRANSPOSE4x4B 0, 1, 2, 3, 7 yading@10: mova m_q0backup, m1 yading@10: movu m7, [dst2q+ strideq*2] yading@10: TRANSPOSE4x4B 4, 5, 6, 7, 1 yading@10: SBUTTERFLY dq, 0, 4, 1 ; p3/p2 yading@10: SBUTTERFLY dq, 2, 6, 1 ; q0/q1 yading@10: SBUTTERFLY dq, 3, 7, 1 ; q2/q3 yading@10: mova m1, m_q0backup yading@10: mova m_q0backup, m2 ; store q0 yading@10: SBUTTERFLY dq, 1, 5, 2 ; p1/p0 yading@10: mova m_p0backup, m5 ; store p0 yading@10: SWAP 1, 4 yading@10: SWAP 2, 4 yading@10: SWAP 6, 3 yading@10: SWAP 5, 3 yading@10: %else ; sse2 (h) yading@10: %if %2 == 16 yading@10: lea dst8q, [dst1q+ strideq*8 ] yading@10: %endif yading@10: yading@10: ; read 16 rows of 8px each, interleave yading@10: movh m0, [dst1q+mstrideq*4] yading@10: movh m1, [dst8q+mstrideq*4] yading@10: movh m2, [dst1q+mstrideq*2] yading@10: movh m5, [dst8q+mstrideq*2] yading@10: movh m3, [dst1q+mstrideq ] yading@10: movh m6, [dst8q+mstrideq ] yading@10: movh m4, [dst1q] yading@10: movh m7, [dst8q] yading@10: punpcklbw m0, m1 ; A/I yading@10: punpcklbw m2, m5 ; C/K yading@10: punpcklbw m3, m6 ; D/L yading@10: punpcklbw m4, m7 ; E/M yading@10: yading@10: add dst8q, strideq yading@10: movh m1, [dst2q+mstrideq*4] yading@10: movh m6, [dst8q+mstrideq*4] yading@10: movh m5, [dst2q] yading@10: movh m7, [dst8q] yading@10: punpcklbw m1, m6 ; B/J yading@10: punpcklbw m5, m7 ; F/N yading@10: movh m6, [dst2q+ strideq ] yading@10: movh m7, [dst8q+ strideq ] yading@10: punpcklbw m6, m7 ; G/O yading@10: yading@10: ; 8x16 transpose yading@10: TRANSPOSE4x4B 0, 1, 2, 3, 7 yading@10: %ifdef m8 yading@10: SWAP 1, 8 yading@10: %else yading@10: mova m_q0backup, m1 yading@10: %endif yading@10: movh m7, [dst2q+ strideq*2] yading@10: movh m1, [dst8q+ strideq*2] yading@10: punpcklbw m7, m1 ; H/P yading@10: TRANSPOSE4x4B 4, 5, 6, 7, 1 yading@10: SBUTTERFLY dq, 0, 4, 1 ; p3/p2 yading@10: SBUTTERFLY dq, 2, 6, 1 ; q0/q1 yading@10: SBUTTERFLY dq, 3, 7, 1 ; q2/q3 yading@10: %ifdef m8 yading@10: SWAP 1, 8 yading@10: SWAP 2, 8 yading@10: %else yading@10: mova m1, m_q0backup yading@10: mova m_q0backup, m2 ; store q0 yading@10: %endif yading@10: SBUTTERFLY dq, 1, 5, 2 ; p1/p0 yading@10: %ifdef m12 yading@10: SWAP 5, 12 yading@10: %else yading@10: mova m_p0backup, m5 ; store p0 yading@10: %endif yading@10: SWAP 1, 4 yading@10: SWAP 2, 4 yading@10: SWAP 6, 3 yading@10: SWAP 5, 3 yading@10: %endif yading@10: yading@10: ; normal_limit for p3-p2, p2-p1, q3-q2 and q2-q1 yading@10: mova m4, m1 yading@10: SWAP 4, 1 yading@10: psubusb m4, m0 ; p2-p3 yading@10: psubusb m0, m1 ; p3-p2 yading@10: por m0, m4 ; abs(p3-p2) yading@10: yading@10: mova m4, m2 yading@10: SWAP 4, 2 yading@10: psubusb m4, m1 ; p1-p2 yading@10: mova m_p2backup, m1 yading@10: psubusb m1, m2 ; p2-p1 yading@10: por m1, m4 ; abs(p2-p1) yading@10: yading@10: mova m4, m6 yading@10: SWAP 4, 6 yading@10: psubusb m4, m7 ; q2-q3 yading@10: psubusb m7, m6 ; q3-q2 yading@10: por m7, m4 ; abs(q3-q2) yading@10: yading@10: mova m4, m5 yading@10: SWAP 4, 5 yading@10: psubusb m4, m6 ; q1-q2 yading@10: mova m_q2backup, m6 yading@10: psubusb m6, m5 ; q2-q1 yading@10: por m6, m4 ; abs(q2-q1) yading@10: yading@10: %if notcpuflag(mmxext) yading@10: mova m4, m_flimI yading@10: pxor m3, m3 yading@10: psubusb m0, m4 yading@10: psubusb m1, m4 yading@10: psubusb m7, m4 yading@10: psubusb m6, m4 yading@10: pcmpeqb m0, m3 ; abs(p3-p2) <= I yading@10: pcmpeqb m1, m3 ; abs(p2-p1) <= I yading@10: pcmpeqb m7, m3 ; abs(q3-q2) <= I yading@10: pcmpeqb m6, m3 ; abs(q2-q1) <= I yading@10: pand m0, m1 yading@10: pand m7, m6 yading@10: pand m0, m7 yading@10: %else ; mmxext/sse2 yading@10: pmaxub m0, m1 yading@10: pmaxub m6, m7 yading@10: pmaxub m0, m6 yading@10: %endif yading@10: yading@10: ; normal_limit and high_edge_variance for p1-p0, q1-q0 yading@10: SWAP 7, 3 ; now m7 is zero yading@10: %ifidn %1, v yading@10: movrow m3, [dst1q+mstrideq ] ; p0 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m3, [dst8q+mstrideq ] yading@10: %endif yading@10: %elifdef m12 yading@10: SWAP 3, 12 yading@10: %else yading@10: mova m3, m_p0backup yading@10: %endif yading@10: yading@10: mova m1, m2 yading@10: SWAP 1, 2 yading@10: mova m6, m3 yading@10: SWAP 3, 6 yading@10: psubusb m1, m3 ; p1-p0 yading@10: psubusb m6, m2 ; p0-p1 yading@10: por m1, m6 ; abs(p1-p0) yading@10: %if notcpuflag(mmxext) yading@10: mova m6, m1 yading@10: psubusb m1, m4 yading@10: psubusb m6, m_hevthr yading@10: pcmpeqb m1, m7 ; abs(p1-p0) <= I yading@10: pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh yading@10: pand m0, m1 yading@10: mova m_maskres, m6 yading@10: %else ; mmxext/sse2 yading@10: pmaxub m0, m1 ; max_I yading@10: SWAP 1, 4 ; max_hev_thresh yading@10: %endif yading@10: yading@10: SWAP 6, 4 ; now m6 is I yading@10: %ifidn %1, v yading@10: movrow m4, [dst1q] ; q0 yading@10: %if mmsize == 16 && %2 == 8 yading@10: movhps m4, [dst8q] yading@10: %endif yading@10: %elifdef m8 yading@10: SWAP 4, 8 yading@10: %else yading@10: mova m4, m_q0backup yading@10: %endif yading@10: mova m1, m4 yading@10: SWAP 1, 4 yading@10: mova m7, m5 yading@10: SWAP 7, 5 yading@10: psubusb m1, m5 ; q0-q1 yading@10: psubusb m7, m4 ; q1-q0 yading@10: por m1, m7 ; abs(q1-q0) yading@10: %if notcpuflag(mmxext) yading@10: mova m7, m1 yading@10: psubusb m1, m6 yading@10: psubusb m7, m_hevthr yading@10: pxor m6, m6 yading@10: pcmpeqb m1, m6 ; abs(q1-q0) <= I yading@10: pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh yading@10: mova m6, m_maskres yading@10: pand m0, m1 ; abs([pq][321]-[pq][210]) <= I yading@10: pand m6, m7 yading@10: %else ; mmxext/sse2 yading@10: pxor m7, m7 yading@10: pmaxub m0, m1 yading@10: pmaxub m6, m1 yading@10: psubusb m0, m_flimI yading@10: psubusb m6, m_hevthr yading@10: pcmpeqb m0, m7 ; max(abs(..)) <= I yading@10: pcmpeqb m6, m7 ; !(max(abs..) > thresh) yading@10: %endif yading@10: %ifdef m12 yading@10: SWAP 6, 12 yading@10: %else yading@10: mova m_maskres, m6 ; !(abs(p1-p0) > hev_t || abs(q1-q0) > hev_t) yading@10: %endif yading@10: yading@10: ; simple_limit yading@10: mova m1, m3 yading@10: SWAP 1, 3 yading@10: mova m6, m4 ; keep copies of p0/q0 around for later use yading@10: SWAP 6, 4 yading@10: psubusb m1, m4 ; p0-q0 yading@10: psubusb m6, m3 ; q0-p0 yading@10: por m1, m6 ; abs(q0-p0) yading@10: paddusb m1, m1 ; m1=2*abs(q0-p0) yading@10: yading@10: mova m7, m2 yading@10: SWAP 7, 2 yading@10: mova m6, m5 yading@10: SWAP 6, 5 yading@10: psubusb m7, m5 ; p1-q1 yading@10: psubusb m6, m2 ; q1-p1 yading@10: por m7, m6 ; abs(q1-p1) yading@10: pxor m6, m6 yading@10: pand m7, [pb_FE] yading@10: psrlq m7, 1 ; abs(q1-p1)/2 yading@10: paddusb m7, m1 ; abs(q0-p0)*2+abs(q1-p1)/2 yading@10: psubusb m7, m_flimE yading@10: pcmpeqb m7, m6 ; abs(q0-p0)*2+abs(q1-p1)/2 <= E yading@10: pand m0, m7 ; normal_limit result yading@10: yading@10: ; filter_common; at this point, m2-m5=p1-q1 and m0 is filter_mask yading@10: %ifdef m8 ; x86-64 && sse2 yading@10: mova m8, [pb_80] yading@10: %define m_pb_80 m8 yading@10: %else ; x86-32 or mmx/mmxext yading@10: %define m_pb_80 [pb_80] yading@10: %endif yading@10: mova m1, m4 yading@10: mova m7, m3 yading@10: pxor m1, m_pb_80 yading@10: pxor m7, m_pb_80 yading@10: psubsb m1, m7 ; (signed) q0-p0 yading@10: mova m6, m2 yading@10: mova m7, m5 yading@10: pxor m6, m_pb_80 yading@10: pxor m7, m_pb_80 yading@10: psubsb m6, m7 ; (signed) p1-q1 yading@10: mova m7, m_maskres yading@10: paddsb m6, m1 yading@10: paddsb m6, m1 yading@10: paddsb m6, m1 yading@10: pand m6, m0 yading@10: %ifdef m8 yading@10: mova m_limres, m6 ; 3*(qp-p0)+(p1-q1) masked for filter_mbedge yading@10: pand m_limres, m7 yading@10: %else yading@10: mova m0, m6 yading@10: pand m0, m7 yading@10: mova m_limres, m0 yading@10: %endif yading@10: pandn m7, m6 ; 3*(q0-p0)+(p1-q1) masked for filter_common yading@10: yading@10: mova m1, [pb_F8] yading@10: mova m6, m7 yading@10: paddsb m7, [pb_3] yading@10: paddsb m6, [pb_4] yading@10: pand m7, m1 yading@10: pand m6, m1 yading@10: yading@10: pxor m1, m1 yading@10: pxor m0, m0 yading@10: pcmpgtb m1, m7 yading@10: psubb m0, m7 yading@10: psrlq m7, 3 ; +f2 yading@10: psrlq m0, 3 ; -f2 yading@10: pand m0, m1 yading@10: pandn m1, m7 yading@10: psubusb m3, m0 yading@10: paddusb m3, m1 ; p0+f2 yading@10: yading@10: pxor m1, m1 yading@10: pxor m0, m0 yading@10: pcmpgtb m0, m6 yading@10: psubb m1, m6 yading@10: psrlq m6, 3 ; +f1 yading@10: psrlq m1, 3 ; -f1 yading@10: pand m1, m0 yading@10: pandn m0, m6 yading@10: psubusb m4, m0 yading@10: paddusb m4, m1 ; q0-f1 yading@10: yading@10: ; filter_mbedge (m2-m5 = p1-q1; lim_res carries w) yading@10: %if cpuflag(ssse3) yading@10: mova m7, [pb_1] yading@10: %else yading@10: mova m7, [pw_63] yading@10: %endif yading@10: %ifdef m8 yading@10: SWAP 1, 8 yading@10: %else yading@10: mova m1, m_limres yading@10: %endif yading@10: pxor m0, m0 yading@10: mova m6, m1 yading@10: pcmpgtb m0, m1 ; which are negative yading@10: %if cpuflag(ssse3) yading@10: punpcklbw m6, m7 ; interleave with "1" for rounding yading@10: punpckhbw m1, m7 yading@10: %else yading@10: punpcklbw m6, m0 ; signed byte->word yading@10: punpckhbw m1, m0 yading@10: %endif yading@10: mova m_limsign, m0 yading@10: %if cpuflag(ssse3) yading@10: mova m7, [pb_27_63] yading@10: %ifndef m8 yading@10: mova m_limres, m1 yading@10: %endif yading@10: %ifdef m10 yading@10: SWAP 0, 10 ; don't lose lim_sign copy yading@10: %endif yading@10: mova m0, m7 yading@10: pmaddubsw m7, m6 yading@10: SWAP 6, 7 yading@10: pmaddubsw m0, m1 yading@10: SWAP 1, 0 yading@10: %ifdef m10 yading@10: SWAP 0, 10 yading@10: %else yading@10: mova m0, m_limsign yading@10: %endif yading@10: %else yading@10: mova m_maskres, m6 ; backup for later in filter yading@10: mova m_limres, m1 yading@10: pmullw m6, [pw_27] yading@10: pmullw m1, [pw_27] yading@10: paddw m6, m7 yading@10: paddw m1, m7 yading@10: %endif yading@10: psraw m6, 7 yading@10: psraw m1, 7 yading@10: packsswb m6, m1 ; a0 yading@10: pxor m1, m1 yading@10: psubb m1, m6 yading@10: pand m1, m0 ; -a0 yading@10: pandn m0, m6 ; +a0 yading@10: %if cpuflag(ssse3) yading@10: mova m6, [pb_18_63] ; pipelining yading@10: %endif yading@10: psubusb m3, m1 yading@10: paddusb m4, m1 yading@10: paddusb m3, m0 ; p0+a0 yading@10: psubusb m4, m0 ; q0-a0 yading@10: yading@10: %if cpuflag(ssse3) yading@10: SWAP 6, 7 yading@10: %ifdef m10 yading@10: SWAP 1, 10 yading@10: %else yading@10: mova m1, m_limres yading@10: %endif yading@10: mova m0, m7 yading@10: pmaddubsw m7, m6 yading@10: SWAP 6, 7 yading@10: pmaddubsw m0, m1 yading@10: SWAP 1, 0 yading@10: %ifdef m10 yading@10: SWAP 0, 10 yading@10: %endif yading@10: mova m0, m_limsign yading@10: %else yading@10: mova m6, m_maskres yading@10: mova m1, m_limres yading@10: pmullw m6, [pw_18] yading@10: pmullw m1, [pw_18] yading@10: paddw m6, m7 yading@10: paddw m1, m7 yading@10: %endif yading@10: mova m0, m_limsign yading@10: psraw m6, 7 yading@10: psraw m1, 7 yading@10: packsswb m6, m1 ; a1 yading@10: pxor m1, m1 yading@10: psubb m1, m6 yading@10: pand m1, m0 ; -a1 yading@10: pandn m0, m6 ; +a1 yading@10: %if cpuflag(ssse3) yading@10: mova m6, [pb_9_63] yading@10: %endif yading@10: psubusb m2, m1 yading@10: paddusb m5, m1 yading@10: paddusb m2, m0 ; p1+a1 yading@10: psubusb m5, m0 ; q1-a1 yading@10: yading@10: %if cpuflag(ssse3) yading@10: SWAP 6, 7 yading@10: %ifdef m10 yading@10: SWAP 1, 10 yading@10: %else yading@10: mova m1, m_limres yading@10: %endif yading@10: mova m0, m7 yading@10: pmaddubsw m7, m6 yading@10: SWAP 6, 7 yading@10: pmaddubsw m0, m1 yading@10: SWAP 1, 0 yading@10: %else yading@10: %ifdef m8 yading@10: SWAP 6, 12 yading@10: SWAP 1, 8 yading@10: %else yading@10: mova m6, m_maskres yading@10: mova m1, m_limres yading@10: %endif yading@10: pmullw m6, [pw_9] yading@10: pmullw m1, [pw_9] yading@10: paddw m6, m7 yading@10: paddw m1, m7 yading@10: %endif yading@10: %ifdef m9 yading@10: SWAP 7, 9 yading@10: %else yading@10: mova m7, m_limsign yading@10: %endif yading@10: psraw m6, 7 yading@10: psraw m1, 7 yading@10: packsswb m6, m1 ; a1 yading@10: pxor m0, m0 yading@10: psubb m0, m6 yading@10: pand m0, m7 ; -a1 yading@10: pandn m7, m6 ; +a1 yading@10: %ifdef m8 yading@10: SWAP 1, 13 yading@10: SWAP 6, 14 yading@10: %else yading@10: mova m1, m_p2backup yading@10: mova m6, m_q2backup yading@10: %endif yading@10: psubusb m1, m0 yading@10: paddusb m6, m0 yading@10: paddusb m1, m7 ; p1+a1 yading@10: psubusb m6, m7 ; q1-a1 yading@10: yading@10: ; store yading@10: %ifidn %1, v yading@10: movrow [dst2q+mstrideq*4], m1 yading@10: movrow [dst1q+mstrideq*2], m2 yading@10: movrow [dst1q+mstrideq ], m3 yading@10: movrow [dst1q], m4 yading@10: movrow [dst2q], m5 yading@10: movrow [dst2q+ strideq ], m6 yading@10: %if mmsize == 16 && %2 == 8 yading@10: add dst8q, mstrideq yading@10: movhps [dst8q+mstrideq*2], m1 yading@10: movhps [dst8q+mstrideq ], m2 yading@10: movhps [dst8q], m3 yading@10: add dst8q, strideq yading@10: movhps [dst8q], m4 yading@10: movhps [dst8q+ strideq ], m5 yading@10: movhps [dst8q+ strideq*2], m6 yading@10: %endif yading@10: %else ; h yading@10: inc dst1q yading@10: inc dst2q yading@10: yading@10: ; 4x8/16 transpose yading@10: TRANSPOSE4x4B 1, 2, 3, 4, 0 yading@10: SBUTTERFLY bw, 5, 6, 0 yading@10: yading@10: %if mmsize == 8 ; mmx/mmxext (h) yading@10: WRITE_4x2D 1, 2, 3, 4, dst1q, dst2q, mstrideq, strideq yading@10: add dst1q, 4 yading@10: WRITE_2x4W m5, m6, dst2q, dst1q, mstrideq, strideq yading@10: %else ; sse2 (h) yading@10: lea dst8q, [dst8q+mstrideq+1] yading@10: WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2 yading@10: lea dst1q, [dst2q+mstrideq+4] yading@10: lea dst8q, [dst8q+mstrideq+4] yading@10: %if cpuflag(sse4) yading@10: add dst2q, 4 yading@10: %endif yading@10: WRITE_8W m5, dst2q, dst1q, mstrideq, strideq yading@10: %if cpuflag(sse4) yading@10: lea dst2q, [dst8q+ strideq ] yading@10: %endif yading@10: WRITE_8W m6, dst2q, dst8q, mstrideq, strideq yading@10: %endif yading@10: %endif yading@10: yading@10: %if mmsize == 8 yading@10: %if %2 == 8 ; chroma yading@10: %ifidn %1, h yading@10: sub dst1q, 5 yading@10: %endif yading@10: cmp dst1q, dst8q yading@10: mov dst1q, dst8q yading@10: jnz .next8px yading@10: %else yading@10: %ifidn %1, h yading@10: lea dst1q, [dst1q+ strideq*8-5] yading@10: %else ; v yading@10: add dst1q, 8 yading@10: %endif yading@10: dec cntrq yading@10: jg .next8px yading@10: %endif yading@10: REP_RET yading@10: %else ; mmsize == 16 yading@10: RET yading@10: %endif yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_32 yading@10: INIT_MMX mmx yading@10: MBEDGE_LOOPFILTER v, 16 yading@10: MBEDGE_LOOPFILTER h, 16 yading@10: MBEDGE_LOOPFILTER v, 8 yading@10: MBEDGE_LOOPFILTER h, 8 yading@10: yading@10: INIT_MMX mmxext yading@10: MBEDGE_LOOPFILTER v, 16 yading@10: MBEDGE_LOOPFILTER h, 16 yading@10: MBEDGE_LOOPFILTER v, 8 yading@10: MBEDGE_LOOPFILTER h, 8 yading@10: %endif yading@10: yading@10: INIT_XMM sse2 yading@10: MBEDGE_LOOPFILTER v, 16 yading@10: MBEDGE_LOOPFILTER h, 16 yading@10: MBEDGE_LOOPFILTER v, 8 yading@10: MBEDGE_LOOPFILTER h, 8 yading@10: yading@10: INIT_XMM ssse3 yading@10: MBEDGE_LOOPFILTER v, 16 yading@10: MBEDGE_LOOPFILTER h, 16 yading@10: MBEDGE_LOOPFILTER v, 8 yading@10: MBEDGE_LOOPFILTER h, 8 yading@10: yading@10: INIT_XMM sse4 yading@10: MBEDGE_LOOPFILTER h, 16 yading@10: MBEDGE_LOOPFILTER h, 8