yading@10: ;****************************************************************************** yading@10: ;* MMX optimized DSP utils yading@10: ;* Copyright (c) 2008 Loren Merritt yading@10: ;* Copyright (c) 2003-2013 Michael Niedermayer yading@10: ;* Copyright (c) 2013 Daniel Kang yading@10: ;* yading@10: ;* This file is part of FFmpeg. yading@10: ;* yading@10: ;* FFmpeg is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* FFmpeg is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with FFmpeg; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION .text yading@10: yading@10: %macro op_avgh 3 yading@10: movh %3, %2 yading@10: pavgb %1, %3 yading@10: movh %2, %1 yading@10: %endmacro yading@10: yading@10: %macro op_avg 2 yading@10: pavgb %1, %2 yading@10: mova %2, %1 yading@10: %endmacro yading@10: yading@10: %macro op_puth 2-3 yading@10: movh %2, %1 yading@10: %endmacro yading@10: yading@10: %macro op_put 2 yading@10: mova %2, %1 yading@10: %endmacro yading@10: yading@10: ; void pixels4_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) yading@10: %macro PIXELS4_L2 1 yading@10: %define OP op_%1h yading@10: cglobal %1_pixels4_l2, 6,6 yading@10: movsxdifnidn r3, r3d yading@10: movsxdifnidn r4, r4d yading@10: test r5d, 1 yading@10: je .loop yading@10: movd m0, [r1] yading@10: movd m1, [r2] yading@10: add r1, r4 yading@10: add r2, 4 yading@10: pavgb m0, m1 yading@10: OP m0, [r0], m3 yading@10: add r0, r3 yading@10: dec r5d yading@10: .loop: yading@10: mova m0, [r1] yading@10: mova m1, [r1+r4] yading@10: lea r1, [r1+2*r4] yading@10: pavgb m0, [r2] yading@10: pavgb m1, [r2+4] yading@10: OP m0, [r0], m3 yading@10: OP m1, [r0+r3], m3 yading@10: lea r0, [r0+2*r3] yading@10: mova m0, [r1] yading@10: mova m1, [r1+r4] yading@10: lea r1, [r1+2*r4] yading@10: pavgb m0, [r2+8] yading@10: pavgb m1, [r2+12] yading@10: OP m0, [r0], m3 yading@10: OP m1, [r0+r3], m3 yading@10: lea r0, [r0+2*r3] yading@10: add r2, 16 yading@10: sub r5d, 4 yading@10: jne .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: PIXELS4_L2 put yading@10: PIXELS4_L2 avg yading@10: yading@10: ; void pixels8_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) yading@10: %macro PIXELS8_L2 1 yading@10: %define OP op_%1 yading@10: cglobal %1_pixels8_l2, 6,6 yading@10: movsxdifnidn r3, r3d yading@10: movsxdifnidn r4, r4d yading@10: test r5d, 1 yading@10: je .loop yading@10: mova m0, [r1] yading@10: mova m1, [r2] yading@10: add r1, r4 yading@10: add r2, 8 yading@10: pavgb m0, m1 yading@10: OP m0, [r0] yading@10: add r0, r3 yading@10: dec r5d yading@10: .loop: yading@10: mova m0, [r1] yading@10: mova m1, [r1+r4] yading@10: lea r1, [r1+2*r4] yading@10: pavgb m0, [r2] yading@10: pavgb m1, [r2+8] yading@10: OP m0, [r0] yading@10: OP m1, [r0+r3] yading@10: lea r0, [r0+2*r3] yading@10: mova m0, [r1] yading@10: mova m1, [r1+r4] yading@10: lea r1, [r1+2*r4] yading@10: pavgb m0, [r2+16] yading@10: pavgb m1, [r2+24] yading@10: OP m0, [r0] yading@10: OP m1, [r0+r3] yading@10: lea r0, [r0+2*r3] yading@10: add r2, 32 yading@10: sub r5d, 4 yading@10: jne .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: PIXELS8_L2 put yading@10: PIXELS8_L2 avg yading@10: yading@10: ; void pixels16_l2_mmxext(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) yading@10: %macro PIXELS16_L2 1 yading@10: %define OP op_%1 yading@10: cglobal %1_pixels16_l2, 6,6 yading@10: movsxdifnidn r3, r3d yading@10: movsxdifnidn r4, r4d yading@10: test r5d, 1 yading@10: je .loop yading@10: mova m0, [r1] yading@10: mova m1, [r1+8] yading@10: pavgb m0, [r2] yading@10: pavgb m1, [r2+8] yading@10: add r1, r4 yading@10: add r2, 16 yading@10: OP m0, [r0] yading@10: OP m1, [r0+8] yading@10: add r0, r3 yading@10: dec r5d yading@10: .loop: yading@10: mova m0, [r1] yading@10: mova m1, [r1+8] yading@10: add r1, r4 yading@10: pavgb m0, [r2] yading@10: pavgb m1, [r2+8] yading@10: OP m0, [r0] yading@10: OP m1, [r0+8] yading@10: add r0, r3 yading@10: mova m0, [r1] yading@10: mova m1, [r1+8] yading@10: add r1, r4 yading@10: pavgb m0, [r2+16] yading@10: pavgb m1, [r2+24] yading@10: OP m0, [r0] yading@10: OP m1, [r0+8] yading@10: add r0, r3 yading@10: add r2, 32 yading@10: sub r5d, 2 yading@10: jne .loop yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_MMX mmxext yading@10: PIXELS16_L2 put yading@10: PIXELS16_L2 avg