yading@10
|
1 ;******************************************************************************
|
yading@10
|
2 ;* V210 SIMD unpack
|
yading@10
|
3 ;* Copyright (c) 2011 Loren Merritt <lorenm@u.washington.edu>
|
yading@10
|
4 ;* Copyright (c) 2011 Kieran Kunhya <kieran@kunhya.com>
|
yading@10
|
5 ;*
|
yading@10
|
6 ;* This file is part of Libav.
|
yading@10
|
7 ;*
|
yading@10
|
8 ;* Libav is free software; you can redistribute it and/or
|
yading@10
|
9 ;* modify it under the terms of the GNU Lesser General Public
|
yading@10
|
10 ;* License as published by the Free Software Foundation; either
|
yading@10
|
11 ;* version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
12 ;*
|
yading@10
|
13 ;* Libav is distributed in the hope that it will be useful,
|
yading@10
|
14 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
15 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
16 ;* Lesser General Public License for more details.
|
yading@10
|
17 ;*
|
yading@10
|
18 ;* You should have received a copy of the GNU Lesser General Public
|
yading@10
|
19 ;* License along with Libav; if not, write to the Free Software
|
yading@10
|
20 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
21 ;******************************************************************************
|
yading@10
|
22
|
yading@10
|
23 %include "libavutil/x86/x86util.asm"
|
yading@10
|
24
|
yading@10
|
25 SECTION_RODATA
|
yading@10
|
26
|
yading@10
|
27 v210_mask: times 4 dd 0x3ff
|
yading@10
|
28 v210_mult: dw 64,4,64,4,64,4,64,4
|
yading@10
|
29 v210_luma_shuf: db 8,9,0,1,2,3,12,13,4,5,6,7,-1,-1,-1,-1
|
yading@10
|
30 v210_chroma_shuf: db 0,1,8,9,6,7,-1,-1,2,3,4,5,12,13,-1,-1
|
yading@10
|
31
|
yading@10
|
32 SECTION .text
|
yading@10
|
33
|
yading@10
|
34 %macro v210_planar_unpack 2
|
yading@10
|
35
|
yading@10
|
36 ; v210_planar_unpack(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width)
|
yading@10
|
37 cglobal v210_planar_unpack_%1_%2, 5, 5, 7
|
yading@10
|
38 movsxdifnidn r4, r4d
|
yading@10
|
39 lea r1, [r1+2*r4]
|
yading@10
|
40 add r2, r4
|
yading@10
|
41 add r3, r4
|
yading@10
|
42 neg r4
|
yading@10
|
43
|
yading@10
|
44 mova m3, [v210_mult]
|
yading@10
|
45 mova m4, [v210_mask]
|
yading@10
|
46 mova m5, [v210_luma_shuf]
|
yading@10
|
47 mova m6, [v210_chroma_shuf]
|
yading@10
|
48 .loop
|
yading@10
|
49 %ifidn %1, unaligned
|
yading@10
|
50 movu m0, [r0]
|
yading@10
|
51 %else
|
yading@10
|
52 mova m0, [r0]
|
yading@10
|
53 %endif
|
yading@10
|
54
|
yading@10
|
55 pmullw m1, m0, m3
|
yading@10
|
56 psrld m0, 10
|
yading@10
|
57 psrlw m1, 6 ; u0 v0 y1 y2 v1 u2 y4 y5
|
yading@10
|
58 pand m0, m4 ; y0 __ u1 __ y3 __ v2 __
|
yading@10
|
59
|
yading@10
|
60 shufps m2, m1, m0, 0x8d ; y1 y2 y4 y5 y0 __ y3 __
|
yading@10
|
61 pshufb m2, m5 ; y0 y1 y2 y3 y4 y5 __ __
|
yading@10
|
62 movu [r1+2*r4], m2
|
yading@10
|
63
|
yading@10
|
64 shufps m1, m0, 0xd8 ; u0 v0 v1 u2 u1 __ v2 __
|
yading@10
|
65 pshufb m1, m6 ; u0 u1 u2 __ v0 v1 v2 __
|
yading@10
|
66 movq [r2+r4], m1
|
yading@10
|
67 movhps [r3+r4], m1
|
yading@10
|
68
|
yading@10
|
69 add r0, mmsize
|
yading@10
|
70 add r4, 6
|
yading@10
|
71 jl .loop
|
yading@10
|
72
|
yading@10
|
73 REP_RET
|
yading@10
|
74 %endmacro
|
yading@10
|
75
|
yading@10
|
76 INIT_XMM
|
yading@10
|
77 v210_planar_unpack unaligned, ssse3
|
yading@10
|
78 %if HAVE_AVX_EXTERNAL
|
yading@10
|
79 INIT_AVX
|
yading@10
|
80 v210_planar_unpack unaligned, avx
|
yading@10
|
81 %endif
|
yading@10
|
82
|
yading@10
|
83 INIT_XMM
|
yading@10
|
84 v210_planar_unpack aligned, ssse3
|
yading@10
|
85 %if HAVE_AVX_EXTERNAL
|
yading@10
|
86 INIT_AVX
|
yading@10
|
87 v210_planar_unpack aligned, avx
|
yading@10
|
88 %endif
|