yading@10: ;***************************************************************************** yading@10: ;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code yading@10: ;***************************************************************************** yading@10: ;* Copyright (C) 2005-2011 x264 project yading@10: ;* yading@10: ;* Authors: Oskar Arvidsson yading@10: ;* Loren Merritt yading@10: ;* Jason Garrett-Glaser yading@10: ;* yading@10: ;* This file is part of Libav. yading@10: ;* yading@10: ;* Libav is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* Libav is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with Libav; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: yading@10: pw_pixel_max: times 8 dw ((1 << 10)-1) yading@10: yading@10: SECTION .text yading@10: yading@10: cextern pw_2 yading@10: cextern pw_3 yading@10: cextern pw_4 yading@10: yading@10: ; out: %4 = |%1-%2|-%3 yading@10: ; clobbers: %5 yading@10: %macro ABS_SUB 5 yading@10: psubusw %5, %2, %1 yading@10: psubusw %4, %1, %2 yading@10: por %4, %5 yading@10: psubw %4, %3 yading@10: %endmacro yading@10: yading@10: ; out: %4 = |%1-%2|<%3 yading@10: %macro DIFF_LT 5 yading@10: psubusw %4, %2, %1 yading@10: psubusw %5, %1, %2 yading@10: por %5, %4 ; |%1-%2| yading@10: pxor %4, %4 yading@10: psubw %5, %3 ; |%1-%2|-%3 yading@10: pcmpgtw %4, %5 ; 0 > |%1-%2|-%3 yading@10: %endmacro yading@10: yading@10: %macro LOAD_AB 4 yading@10: movd %1, %3 yading@10: movd %2, %4 yading@10: SPLATW %1, %1 yading@10: SPLATW %2, %2 yading@10: %endmacro yading@10: yading@10: ; in: %2=tc reg yading@10: ; out: %1=splatted tc yading@10: %macro LOAD_TC 2 yading@10: movd %1, [%2] yading@10: punpcklbw %1, %1 yading@10: %if mmsize == 8 yading@10: pshufw %1, %1, 0 yading@10: %else yading@10: pshuflw %1, %1, 01010000b yading@10: pshufd %1, %1, 01010000b yading@10: %endif yading@10: psraw %1, 6 yading@10: %endmacro yading@10: yading@10: ; in: %1=p1, %2=p0, %3=q0, %4=q1 yading@10: ; %5=alpha, %6=beta, %7-%9=tmp yading@10: ; out: %7=mask yading@10: %macro LOAD_MASK 9 yading@10: ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha yading@10: ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta yading@10: pand %8, %9 yading@10: ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta yading@10: pxor %7, %7 yading@10: pand %8, %9 yading@10: pcmpgtw %7, %8 yading@10: %endmacro yading@10: yading@10: ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp yading@10: ; out: %1=p0', m2=q0' yading@10: %macro DEBLOCK_P0_Q0 7 yading@10: psubw %3, %4 yading@10: pxor %7, %7 yading@10: paddw %3, [pw_4] yading@10: psubw %7, %5 yading@10: psubw %6, %2, %1 yading@10: psllw %6, 2 yading@10: paddw %3, %6 yading@10: psraw %3, 3 yading@10: mova %6, [pw_pixel_max] yading@10: CLIPW %3, %7, %5 yading@10: pxor %7, %7 yading@10: paddw %1, %3 yading@10: psubw %2, %3 yading@10: CLIPW %1, %7, %6 yading@10: CLIPW %2, %7, %6 yading@10: %endmacro yading@10: yading@10: ; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp yading@10: %macro LUMA_Q1 6 yading@10: pavgw %6, %3, %4 ; (p0+q0+1)>>1 yading@10: paddw %1, %6 yading@10: pxor %6, %6 yading@10: psraw %1, 1 yading@10: psubw %6, %5 yading@10: psubw %1, %2 yading@10: CLIPW %1, %6, %5 yading@10: paddw %1, %2 yading@10: %endmacro yading@10: yading@10: %macro LUMA_DEBLOCK_ONE 3 yading@10: DIFF_LT m5, %1, bm, m4, m6 yading@10: pxor m6, m6 yading@10: mova %3, m4 yading@10: pcmpgtw m6, tcm yading@10: pand m4, tcm yading@10: pandn m6, m7 yading@10: pand m4, m6 yading@10: LUMA_Q1 m5, %2, m1, m2, m4, m6 yading@10: %endmacro yading@10: yading@10: %macro LUMA_H_STORE 2 yading@10: %if mmsize == 8 yading@10: movq [r0-4], m0 yading@10: movq [r0+r1-4], m1 yading@10: movq [r0+r1*2-4], m2 yading@10: movq [r0+%2-4], m3 yading@10: %else yading@10: movq [r0-4], m0 yading@10: movhps [r0+r1-4], m0 yading@10: movq [r0+r1*2-4], m1 yading@10: movhps [%1-4], m1 yading@10: movq [%1+r1-4], m2 yading@10: movhps [%1+r1*2-4], m2 yading@10: movq [%1+%2-4], m3 yading@10: movhps [%1+r1*4-4], m3 yading@10: %endif yading@10: %endmacro yading@10: yading@10: %macro DEBLOCK_LUMA 0 yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_v_luma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_v_luma_10, 5,5,8*(mmsize/16) yading@10: %assign pad 5*mmsize+12-(stack_offset&15) yading@10: %define tcm [rsp] yading@10: %define ms1 [rsp+mmsize] yading@10: %define ms2 [rsp+mmsize*2] yading@10: %define am [rsp+mmsize*3] yading@10: %define bm [rsp+mmsize*4] yading@10: SUB rsp, pad yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: LOAD_AB m4, m5, r2d, r3d yading@10: mov r3, 32/mmsize yading@10: mov r2, r0 yading@10: sub r0, r1 yading@10: mova am, m4 yading@10: sub r0, r1 yading@10: mova bm, m5 yading@10: sub r0, r1 yading@10: .loop: yading@10: mova m0, [r0+r1] yading@10: mova m1, [r0+r1*2] yading@10: mova m2, [r2] yading@10: mova m3, [r2+r1] yading@10: yading@10: LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6 yading@10: LOAD_TC m6, r4 yading@10: mova tcm, m6 yading@10: yading@10: mova m5, [r0] yading@10: LUMA_DEBLOCK_ONE m1, m0, ms1 yading@10: mova [r0+r1], m5 yading@10: yading@10: mova m5, [r2+r1*2] yading@10: LUMA_DEBLOCK_ONE m2, m3, ms2 yading@10: mova [r2+r1], m5 yading@10: yading@10: pxor m5, m5 yading@10: mova m6, tcm yading@10: pcmpgtw m5, tcm yading@10: psubw m6, ms1 yading@10: pandn m5, m7 yading@10: psubw m6, ms2 yading@10: pand m5, m6 yading@10: DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6 yading@10: mova [r0+r1*2], m1 yading@10: mova [r2], m2 yading@10: yading@10: add r0, mmsize yading@10: add r2, mmsize yading@10: add r4, mmsize/8 yading@10: dec r3 yading@10: jg .loop yading@10: ADD rsp, pad yading@10: RET yading@10: yading@10: cglobal deblock_h_luma_10, 5,6,8*(mmsize/16) yading@10: %assign pad 7*mmsize+12-(stack_offset&15) yading@10: %define tcm [rsp] yading@10: %define ms1 [rsp+mmsize] yading@10: %define ms2 [rsp+mmsize*2] yading@10: %define p1m [rsp+mmsize*3] yading@10: %define p2m [rsp+mmsize*4] yading@10: %define am [rsp+mmsize*5] yading@10: %define bm [rsp+mmsize*6] yading@10: SUB rsp, pad yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: LOAD_AB m4, m5, r2d, r3d yading@10: mov r3, r1 yading@10: mova am, m4 yading@10: add r3, r1 yading@10: mov r5, 32/mmsize yading@10: mova bm, m5 yading@10: add r3, r1 yading@10: %if mmsize == 16 yading@10: mov r2, r0 yading@10: add r2, r3 yading@10: %endif yading@10: .loop: yading@10: %if mmsize == 8 yading@10: movq m2, [r0-8] ; y q2 q1 q0 yading@10: movq m7, [r0+0] yading@10: movq m5, [r0+r1-8] yading@10: movq m3, [r0+r1+0] yading@10: movq m0, [r0+r1*2-8] yading@10: movq m6, [r0+r1*2+0] yading@10: movq m1, [r0+r3-8] yading@10: TRANSPOSE4x4W 2, 5, 0, 1, 4 yading@10: SWAP 2, 7 yading@10: movq m7, [r0+r3] yading@10: TRANSPOSE4x4W 2, 3, 6, 7, 4 yading@10: %else yading@10: movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x yading@10: movu m0, [r0+r1-8] yading@10: movu m2, [r0+r1*2-8] yading@10: movu m3, [r2-8] yading@10: TRANSPOSE4x4W 5, 0, 2, 3, 6 yading@10: mova tcm, m3 yading@10: yading@10: movu m4, [r2+r1-8] yading@10: movu m1, [r2+r1*2-8] yading@10: movu m3, [r2+r3-8] yading@10: movu m7, [r2+r1*4-8] yading@10: TRANSPOSE4x4W 4, 1, 3, 7, 6 yading@10: yading@10: mova m6, tcm yading@10: punpcklqdq m6, m7 yading@10: punpckhqdq m5, m4 yading@10: SBUTTERFLY qdq, 0, 1, 7 yading@10: SBUTTERFLY qdq, 2, 3, 7 yading@10: %endif yading@10: yading@10: mova p2m, m6 yading@10: LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6 yading@10: LOAD_TC m6, r4 yading@10: mova tcm, m6 yading@10: yading@10: LUMA_DEBLOCK_ONE m1, m0, ms1 yading@10: mova p1m, m5 yading@10: yading@10: mova m5, p2m yading@10: LUMA_DEBLOCK_ONE m2, m3, ms2 yading@10: mova p2m, m5 yading@10: yading@10: pxor m5, m5 yading@10: mova m6, tcm yading@10: pcmpgtw m5, tcm yading@10: psubw m6, ms1 yading@10: pandn m5, m7 yading@10: psubw m6, ms2 yading@10: pand m5, m6 yading@10: DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6 yading@10: mova m0, p1m yading@10: mova m3, p2m yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: LUMA_H_STORE r2, r3 yading@10: yading@10: add r4, mmsize/8 yading@10: lea r0, [r0+r1*(mmsize/2)] yading@10: lea r2, [r2+r1*(mmsize/2)] yading@10: dec r5 yading@10: jg .loop yading@10: ADD rsp, pad yading@10: RET yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_64 yading@10: ; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2 yading@10: ; m12=alpha, m13=beta yading@10: ; out: m0=p1', m3=q1', m1=p0', m2=q0' yading@10: ; clobbers: m4, m5, m6, m7, m10, m11, m14 yading@10: %macro DEBLOCK_LUMA_INTER_SSE2 0 yading@10: LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6 yading@10: LOAD_TC m6, r4 yading@10: DIFF_LT m8, m1, m13, m10, m4 yading@10: DIFF_LT m9, m2, m13, m11, m4 yading@10: pand m6, m7 yading@10: yading@10: mova m14, m6 yading@10: pxor m4, m4 yading@10: pcmpgtw m6, m4 yading@10: pand m6, m14 yading@10: yading@10: mova m5, m10 yading@10: pand m5, m6 yading@10: LUMA_Q1 m8, m0, m1, m2, m5, m4 yading@10: yading@10: mova m5, m11 yading@10: pand m5, m6 yading@10: LUMA_Q1 m9, m3, m1, m2, m5, m4 yading@10: yading@10: pxor m4, m4 yading@10: psubw m6, m10 yading@10: pcmpgtw m4, m14 yading@10: pandn m4, m7 yading@10: psubw m6, m11 yading@10: pand m4, m6 yading@10: DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6 yading@10: yading@10: SWAP 0, 8 yading@10: SWAP 3, 9 yading@10: %endmacro yading@10: yading@10: %macro DEBLOCK_LUMA_64 0 yading@10: cglobal deblock_v_luma_10, 5,5,15 yading@10: %define p2 m8 yading@10: %define p1 m0 yading@10: %define p0 m1 yading@10: %define q0 m2 yading@10: %define q1 m3 yading@10: %define q2 m9 yading@10: %define mask0 m7 yading@10: %define mask1 m10 yading@10: %define mask2 m11 yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: LOAD_AB m12, m13, r2d, r3d yading@10: mov r2, r0 yading@10: sub r0, r1 yading@10: sub r0, r1 yading@10: sub r0, r1 yading@10: mov r3, 2 yading@10: .loop: yading@10: mova p2, [r0] yading@10: mova p1, [r0+r1] yading@10: mova p0, [r0+r1*2] yading@10: mova q0, [r2] yading@10: mova q1, [r2+r1] yading@10: mova q2, [r2+r1*2] yading@10: DEBLOCK_LUMA_INTER_SSE2 yading@10: mova [r0+r1], p1 yading@10: mova [r0+r1*2], p0 yading@10: mova [r2], q0 yading@10: mova [r2+r1], q1 yading@10: add r0, mmsize yading@10: add r2, mmsize yading@10: add r4, 2 yading@10: dec r3 yading@10: jg .loop yading@10: REP_RET yading@10: yading@10: cglobal deblock_h_luma_10, 5,7,15 yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: LOAD_AB m12, m13, r2d, r3d yading@10: mov r2, r1 yading@10: add r2, r1 yading@10: add r2, r1 yading@10: mov r5, r0 yading@10: add r5, r2 yading@10: mov r6, 2 yading@10: .loop: yading@10: movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x yading@10: movu m0, [r0+r1-8] yading@10: movu m2, [r0+r1*2-8] yading@10: movu m9, [r5-8] yading@10: movu m5, [r5+r1-8] yading@10: movu m1, [r5+r1*2-8] yading@10: movu m3, [r5+r2-8] yading@10: movu m7, [r5+r1*4-8] yading@10: yading@10: TRANSPOSE4x4W 8, 0, 2, 9, 10 yading@10: TRANSPOSE4x4W 5, 1, 3, 7, 10 yading@10: yading@10: punpckhqdq m8, m5 yading@10: SBUTTERFLY qdq, 0, 1, 10 yading@10: SBUTTERFLY qdq, 2, 3, 10 yading@10: punpcklqdq m9, m7 yading@10: yading@10: DEBLOCK_LUMA_INTER_SSE2 yading@10: yading@10: TRANSPOSE4x4W 0, 1, 2, 3, 4 yading@10: LUMA_H_STORE r5, r2 yading@10: add r4, 2 yading@10: lea r0, [r0+r1*8] yading@10: lea r5, [r5+r1*8] yading@10: dec r6 yading@10: jg .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_XMM sse2 yading@10: DEBLOCK_LUMA_64 yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_XMM avx yading@10: DEBLOCK_LUMA_64 yading@10: %endif yading@10: %endif yading@10: yading@10: %macro SWAPMOVA 2 yading@10: %ifid %1 yading@10: SWAP %1, %2 yading@10: %else yading@10: mova %1, %2 yading@10: %endif yading@10: %endmacro yading@10: yading@10: ; in: t0-t2: tmp registers yading@10: ; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0 yading@10: ; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2' yading@10: %macro LUMA_INTRA_P012 12 ; p0..p3 in memory yading@10: %if ARCH_X86_64 yading@10: paddw t0, %3, %2 yading@10: mova t2, %4 yading@10: paddw t2, %3 yading@10: %else yading@10: mova t0, %3 yading@10: mova t2, %4 yading@10: paddw t0, %2 yading@10: paddw t2, %3 yading@10: %endif yading@10: paddw t0, %1 yading@10: paddw t2, t2 yading@10: paddw t0, %5 yading@10: paddw t2, %9 yading@10: paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2) yading@10: paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) yading@10: yading@10: psrlw t2, 3 yading@10: psrlw t1, t0, 2 yading@10: psubw t2, %3 yading@10: psubw t1, %2 yading@10: pand t2, %8 yading@10: pand t1, %8 yading@10: paddw t2, %3 yading@10: paddw t1, %2 yading@10: SWAPMOVA %11, t1 yading@10: yading@10: psubw t1, t0, %3 yading@10: paddw t0, t0 yading@10: psubw t1, %5 yading@10: psubw t0, %3 yading@10: paddw t1, %6 yading@10: paddw t1, %2 yading@10: paddw t0, %6 yading@10: psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4 yading@10: psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3 yading@10: yading@10: pxor t0, t1 yading@10: pxor t1, %1 yading@10: pand t0, %8 yading@10: pand t1, %7 yading@10: pxor t0, t1 yading@10: pxor t0, %1 yading@10: SWAPMOVA %10, t0 yading@10: SWAPMOVA %12, t2 yading@10: %endmacro yading@10: yading@10: %macro LUMA_INTRA_INIT 1 yading@10: %xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15) yading@10: %define t0 m4 yading@10: %define t1 m5 yading@10: %define t2 m6 yading@10: %define t3 m7 yading@10: %assign i 4 yading@10: %rep %1 yading@10: CAT_XDEFINE t, i, [rsp+mmsize*(i-4)] yading@10: %assign i i+1 yading@10: %endrep yading@10: SUB rsp, pad yading@10: %endmacro yading@10: yading@10: ; in: %1-%3=tmp, %4=p2, %5=q2 yading@10: %macro LUMA_INTRA_INTER 5 yading@10: LOAD_AB t0, t1, r2d, r3d yading@10: mova %1, t0 yading@10: LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3 yading@10: %if ARCH_X86_64 yading@10: mova %2, t0 ; mask0 yading@10: psrlw t3, %1, 2 yading@10: %else yading@10: mova t3, %1 yading@10: mova %2, t0 ; mask0 yading@10: psrlw t3, 2 yading@10: %endif yading@10: paddw t3, [pw_2] ; alpha/4+2 yading@10: DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2 yading@10: pand t2, %2 yading@10: mova t3, %5 ; q2 yading@10: mova %1, t2 ; mask1 yading@10: DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta yading@10: pand t2, %1 yading@10: mova t3, %4 ; p2 yading@10: mova %3, t2 ; mask1q yading@10: DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta yading@10: pand t2, %1 yading@10: mova %1, t2 ; mask1p yading@10: %endmacro yading@10: yading@10: %macro LUMA_H_INTRA_LOAD 0 yading@10: %if mmsize == 8 yading@10: movu t0, [r0-8] yading@10: movu t1, [r0+r1-8] yading@10: movu m0, [r0+r1*2-8] yading@10: movu m1, [r0+r4-8] yading@10: TRANSPOSE4x4W 4, 5, 0, 1, 2 yading@10: mova t4, t0 ; p3 yading@10: mova t5, t1 ; p2 yading@10: yading@10: movu m2, [r0] yading@10: movu m3, [r0+r1] yading@10: movu t0, [r0+r1*2] yading@10: movu t1, [r0+r4] yading@10: TRANSPOSE4x4W 2, 3, 4, 5, 6 yading@10: mova t6, t0 ; q2 yading@10: mova t7, t1 ; q3 yading@10: %else yading@10: movu t0, [r0-8] yading@10: movu t1, [r0+r1-8] yading@10: movu m0, [r0+r1*2-8] yading@10: movu m1, [r0+r5-8] yading@10: movu m2, [r4-8] yading@10: movu m3, [r4+r1-8] yading@10: movu t2, [r4+r1*2-8] yading@10: movu t3, [r4+r5-8] yading@10: TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5 yading@10: mova t4, t0 ; p3 yading@10: mova t5, t1 ; p2 yading@10: mova t6, t2 ; q2 yading@10: mova t7, t3 ; q3 yading@10: %endif yading@10: %endmacro yading@10: yading@10: ; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp yading@10: %macro LUMA_H_INTRA_STORE 9 yading@10: %if mmsize == 8 yading@10: TRANSPOSE4x4W %1, %2, %3, %4, %9 yading@10: movq [r0-8], m%1 yading@10: movq [r0+r1-8], m%2 yading@10: movq [r0+r1*2-8], m%3 yading@10: movq [r0+r4-8], m%4 yading@10: movq m%1, %8 yading@10: TRANSPOSE4x4W %5, %6, %7, %1, %9 yading@10: movq [r0], m%5 yading@10: movq [r0+r1], m%6 yading@10: movq [r0+r1*2], m%7 yading@10: movq [r0+r4], m%1 yading@10: %else yading@10: TRANSPOSE2x4x4W %1, %2, %3, %4, %9 yading@10: movq [r0-8], m%1 yading@10: movq [r0+r1-8], m%2 yading@10: movq [r0+r1*2-8], m%3 yading@10: movq [r0+r5-8], m%4 yading@10: movhps [r4-8], m%1 yading@10: movhps [r4+r1-8], m%2 yading@10: movhps [r4+r1*2-8], m%3 yading@10: movhps [r4+r5-8], m%4 yading@10: %ifnum %8 yading@10: SWAP %1, %8 yading@10: %else yading@10: mova m%1, %8 yading@10: %endif yading@10: TRANSPOSE2x4x4W %5, %6, %7, %1, %9 yading@10: movq [r0], m%5 yading@10: movq [r0+r1], m%6 yading@10: movq [r0+r1*2], m%7 yading@10: movq [r0+r5], m%1 yading@10: movhps [r4], m%5 yading@10: movhps [r4+r1], m%6 yading@10: movhps [r4+r1*2], m%7 yading@10: movhps [r4+r5], m%1 yading@10: %endif yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_64 yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta ) yading@10: ;----------------------------------------------------------------------------- yading@10: %macro DEBLOCK_LUMA_INTRA_64 0 yading@10: cglobal deblock_v_luma_intra_10, 4,7,16 yading@10: %define t0 m1 yading@10: %define t1 m2 yading@10: %define t2 m4 yading@10: %define p2 m8 yading@10: %define p1 m9 yading@10: %define p0 m10 yading@10: %define q0 m11 yading@10: %define q1 m12 yading@10: %define q2 m13 yading@10: %define aa m5 yading@10: %define bb m14 yading@10: lea r4, [r1*4] yading@10: lea r5, [r1*3] ; 3*stride yading@10: neg r4 yading@10: add r4, r0 ; pix-4*stride yading@10: mov r6, 2 yading@10: mova m0, [pw_2] yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: LOAD_AB aa, bb, r2d, r3d yading@10: .loop: yading@10: mova p2, [r4+r1] yading@10: mova p1, [r4+2*r1] yading@10: mova p0, [r4+r5] yading@10: mova q0, [r0] yading@10: mova q1, [r0+r1] yading@10: mova q2, [r0+2*r1] yading@10: yading@10: LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1 yading@10: mova t2, aa yading@10: psrlw t2, 2 yading@10: paddw t2, m0 ; alpha/4+2 yading@10: DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2 yading@10: DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta yading@10: DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta yading@10: pand m6, m3 yading@10: pand m7, m6 yading@10: pand m6, t1 yading@10: LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1] yading@10: LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1] yading@10: add r0, mmsize yading@10: add r4, mmsize yading@10: dec r6 yading@10: jg .loop yading@10: REP_RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_h_luma_intra_10, 4,7,16 yading@10: %define t0 m15 yading@10: %define t1 m14 yading@10: %define t2 m2 yading@10: %define q3 m5 yading@10: %define q2 m8 yading@10: %define q1 m9 yading@10: %define q0 m10 yading@10: %define p0 m11 yading@10: %define p1 m12 yading@10: %define p2 m13 yading@10: %define p3 m4 yading@10: %define spill [rsp] yading@10: %assign pad 24-(stack_offset&15) yading@10: SUB rsp, pad yading@10: lea r4, [r1*4] yading@10: lea r5, [r1*3] ; 3*stride yading@10: add r4, r0 ; pix+4*stride yading@10: mov r6, 2 yading@10: mova m0, [pw_2] yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: .loop: yading@10: movu q3, [r0-8] yading@10: movu q2, [r0+r1-8] yading@10: movu q1, [r0+r1*2-8] yading@10: movu q0, [r0+r5-8] yading@10: movu p0, [r4-8] yading@10: movu p1, [r4+r1-8] yading@10: movu p2, [r4+r1*2-8] yading@10: movu p3, [r4+r5-8] yading@10: TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1 yading@10: yading@10: LOAD_AB m1, m2, r2d, r3d yading@10: LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1 yading@10: psrlw m1, 2 yading@10: paddw m1, m0 ; alpha/4+2 yading@10: DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2 yading@10: DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta yading@10: DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta yading@10: pand m6, m3 yading@10: pand m7, m6 yading@10: pand m6, t1 yading@10: yading@10: mova spill, q3 yading@10: LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2 yading@10: LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2 yading@10: mova m7, spill yading@10: yading@10: LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14 yading@10: yading@10: lea r0, [r0+r1*8] yading@10: lea r4, [r4+r1*8] yading@10: dec r6 yading@10: jg .loop yading@10: ADD rsp, pad yading@10: RET yading@10: %endmacro yading@10: yading@10: INIT_XMM sse2 yading@10: DEBLOCK_LUMA_INTRA_64 yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_XMM avx yading@10: DEBLOCK_LUMA_INTRA_64 yading@10: %endif yading@10: yading@10: %endif yading@10: yading@10: %macro DEBLOCK_LUMA_INTRA 0 yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_v_luma_intra( uint16_t *pix, int stride, int alpha, int beta ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16) yading@10: LUMA_INTRA_INIT 3 yading@10: lea r4, [r1*4] yading@10: lea r5, [r1*3] yading@10: neg r4 yading@10: add r4, r0 yading@10: mov r6, 32/mmsize yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: .loop: yading@10: mova m0, [r4+r1*2] ; p1 yading@10: mova m1, [r4+r5] ; p0 yading@10: mova m2, [r0] ; q0 yading@10: mova m3, [r0+r1] ; q1 yading@10: LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2] yading@10: LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1] yading@10: mova t3, [r0+r1*2] ; q2 yading@10: LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1] yading@10: add r0, mmsize yading@10: add r4, mmsize yading@10: dec r6 yading@10: jg .loop yading@10: ADD rsp, pad yading@10: RET yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_h_luma_intra( uint16_t *pix, int stride, int alpha, int beta ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16) yading@10: LUMA_INTRA_INIT 8 yading@10: %if mmsize == 8 yading@10: lea r4, [r1*3] yading@10: mov r5, 32/mmsize yading@10: %else yading@10: lea r4, [r1*4] yading@10: lea r5, [r1*3] ; 3*stride yading@10: add r4, r0 ; pix+4*stride yading@10: mov r6, 32/mmsize yading@10: %endif yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: .loop: yading@10: LUMA_H_INTRA_LOAD yading@10: LUMA_INTRA_INTER t8, t9, t10, t5, t6 yading@10: yading@10: LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11 yading@10: mova t3, t6 ; q2 yading@10: LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5 yading@10: yading@10: mova m2, t4 yading@10: mova m0, t11 yading@10: mova m1, t5 yading@10: mova m3, t8 yading@10: mova m6, t6 yading@10: yading@10: LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7 yading@10: yading@10: lea r0, [r0+r1*(mmsize/2)] yading@10: %if mmsize == 8 yading@10: dec r5 yading@10: %else yading@10: lea r4, [r4+r1*(mmsize/2)] yading@10: dec r6 yading@10: %endif yading@10: jg .loop yading@10: ADD rsp, pad yading@10: RET yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_64 == 0 yading@10: INIT_MMX mmxext yading@10: DEBLOCK_LUMA yading@10: DEBLOCK_LUMA_INTRA yading@10: INIT_XMM sse2 yading@10: DEBLOCK_LUMA yading@10: DEBLOCK_LUMA_INTRA yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_XMM avx yading@10: DEBLOCK_LUMA yading@10: DEBLOCK_LUMA_INTRA yading@10: %endif yading@10: %endif yading@10: yading@10: ; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp yading@10: ; out: %1=p0', %2=q0' yading@10: %macro CHROMA_DEBLOCK_P0_Q0_INTRA 7 yading@10: mova %6, [pw_2] yading@10: paddw %6, %3 yading@10: paddw %6, %4 yading@10: paddw %7, %6, %2 yading@10: paddw %6, %1 yading@10: paddw %6, %3 yading@10: paddw %7, %4 yading@10: psraw %6, 2 yading@10: psraw %7, 2 yading@10: psubw %6, %1 yading@10: psubw %7, %2 yading@10: pand %6, %5 yading@10: pand %7, %5 yading@10: paddw %1, %6 yading@10: paddw %2, %7 yading@10: %endmacro yading@10: yading@10: %macro CHROMA_V_LOAD 1 yading@10: mova m0, [r0] ; p1 yading@10: mova m1, [r0+r1] ; p0 yading@10: mova m2, [%1] ; q0 yading@10: mova m3, [%1+r1] ; q1 yading@10: %endmacro yading@10: yading@10: %macro CHROMA_V_STORE 0 yading@10: mova [r0+1*r1], m1 yading@10: mova [r0+2*r1], m2 yading@10: %endmacro yading@10: yading@10: %macro CHROMA_V_LOAD_TC 2 yading@10: movd %1, [%2] yading@10: punpcklbw %1, %1 yading@10: punpcklwd %1, %1 yading@10: psraw %1, 6 yading@10: %endmacro yading@10: yading@10: %macro DEBLOCK_CHROMA 0 yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_v_chroma( uint16_t *pix, int stride, int alpha, int beta, int8_t *tc0 ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16) yading@10: mov r5, r0 yading@10: sub r0, r1 yading@10: sub r0, r1 yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: %if mmsize < 16 yading@10: mov r6, 16/mmsize yading@10: .loop: yading@10: %endif yading@10: CHROMA_V_LOAD r5 yading@10: LOAD_AB m4, m5, r2d, r3d yading@10: LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4 yading@10: pxor m4, m4 yading@10: CHROMA_V_LOAD_TC m6, r4 yading@10: psubw m6, [pw_3] yading@10: pmaxsw m6, m4 yading@10: pand m7, m6 yading@10: DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6 yading@10: CHROMA_V_STORE yading@10: %if mmsize < 16 yading@10: add r0, mmsize yading@10: add r5, mmsize yading@10: add r4, mmsize/4 yading@10: dec r6 yading@10: jg .loop yading@10: REP_RET yading@10: %else yading@10: RET yading@10: %endif yading@10: yading@10: ;----------------------------------------------------------------------------- yading@10: ; void deblock_v_chroma_intra( uint16_t *pix, int stride, int alpha, int beta ) yading@10: ;----------------------------------------------------------------------------- yading@10: cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16) yading@10: mov r4, r0 yading@10: sub r0, r1 yading@10: sub r0, r1 yading@10: shl r2d, 2 yading@10: shl r3d, 2 yading@10: %if mmsize < 16 yading@10: mov r5, 16/mmsize yading@10: .loop: yading@10: %endif yading@10: CHROMA_V_LOAD r4 yading@10: LOAD_AB m4, m5, r2d, r3d yading@10: LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4 yading@10: CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6 yading@10: CHROMA_V_STORE yading@10: %if mmsize < 16 yading@10: add r0, mmsize yading@10: add r4, mmsize yading@10: dec r5 yading@10: jg .loop yading@10: REP_RET yading@10: %else yading@10: RET yading@10: %endif yading@10: %endmacro yading@10: yading@10: %if ARCH_X86_64 == 0 yading@10: INIT_MMX mmxext yading@10: DEBLOCK_CHROMA yading@10: %endif yading@10: INIT_XMM sse2 yading@10: DEBLOCK_CHROMA yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_XMM avx yading@10: DEBLOCK_CHROMA yading@10: %endif