yading@10: ;****************************************************************************** yading@10: ;* V210 SIMD unpack yading@10: ;* Copyright (c) 2011 Loren Merritt yading@10: ;* Copyright (c) 2011 Kieran Kunhya yading@10: ;* yading@10: ;* This file is part of Libav. yading@10: ;* yading@10: ;* Libav is free software; you can redistribute it and/or yading@10: ;* modify it under the terms of the GNU Lesser General Public yading@10: ;* License as published by the Free Software Foundation; either yading@10: ;* version 2.1 of the License, or (at your option) any later version. yading@10: ;* yading@10: ;* Libav is distributed in the hope that it will be useful, yading@10: ;* but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: ;* Lesser General Public License for more details. yading@10: ;* yading@10: ;* You should have received a copy of the GNU Lesser General Public yading@10: ;* License along with Libav; if not, write to the Free Software yading@10: ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: ;****************************************************************************** yading@10: yading@10: %include "libavutil/x86/x86util.asm" yading@10: yading@10: SECTION_RODATA yading@10: yading@10: v210_mask: times 4 dd 0x3ff yading@10: v210_mult: dw 64,4,64,4,64,4,64,4 yading@10: v210_luma_shuf: db 8,9,0,1,2,3,12,13,4,5,6,7,-1,-1,-1,-1 yading@10: v210_chroma_shuf: db 0,1,8,9,6,7,-1,-1,2,3,4,5,12,13,-1,-1 yading@10: yading@10: SECTION .text yading@10: yading@10: %macro v210_planar_unpack 2 yading@10: yading@10: ; v210_planar_unpack(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width) yading@10: cglobal v210_planar_unpack_%1_%2, 5, 5, 7 yading@10: movsxdifnidn r4, r4d yading@10: lea r1, [r1+2*r4] yading@10: add r2, r4 yading@10: add r3, r4 yading@10: neg r4 yading@10: yading@10: mova m3, [v210_mult] yading@10: mova m4, [v210_mask] yading@10: mova m5, [v210_luma_shuf] yading@10: mova m6, [v210_chroma_shuf] yading@10: .loop yading@10: %ifidn %1, unaligned yading@10: movu m0, [r0] yading@10: %else yading@10: mova m0, [r0] yading@10: %endif yading@10: yading@10: pmullw m1, m0, m3 yading@10: psrld m0, 10 yading@10: psrlw m1, 6 ; u0 v0 y1 y2 v1 u2 y4 y5 yading@10: pand m0, m4 ; y0 __ u1 __ y3 __ v2 __ yading@10: yading@10: shufps m2, m1, m0, 0x8d ; y1 y2 y4 y5 y0 __ y3 __ yading@10: pshufb m2, m5 ; y0 y1 y2 y3 y4 y5 __ __ yading@10: movu [r1+2*r4], m2 yading@10: yading@10: shufps m1, m0, 0xd8 ; u0 v0 v1 u2 u1 __ v2 __ yading@10: pshufb m1, m6 ; u0 u1 u2 __ v0 v1 v2 __ yading@10: movq [r2+r4], m1 yading@10: movhps [r3+r4], m1 yading@10: yading@10: add r0, mmsize yading@10: add r4, 6 yading@10: jl .loop yading@10: yading@10: REP_RET yading@10: %endmacro yading@10: yading@10: INIT_XMM yading@10: v210_planar_unpack unaligned, ssse3 yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_AVX yading@10: v210_planar_unpack unaligned, avx yading@10: %endif yading@10: yading@10: INIT_XMM yading@10: v210_planar_unpack aligned, ssse3 yading@10: %if HAVE_AVX_EXTERNAL yading@10: INIT_AVX yading@10: v210_planar_unpack aligned, avx yading@10: %endif