yading@10
|
1 ;******************************************************************************
|
yading@10
|
2 ;* MMX optimized DSP utils
|
yading@10
|
3 ;* Copyright (c) 2008 Loren Merritt
|
yading@10
|
4 ;* Copyright (c) 2003-2013 Michael Niedermayer
|
yading@10
|
5 ;* Copyright (c) 2013 Daniel Kang
|
yading@10
|
6 ;*
|
yading@10
|
7 ;* This file is part of FFmpeg.
|
yading@10
|
8 ;*
|
yading@10
|
9 ;* FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
10 ;* modify it under the terms of the GNU Lesser General Public
|
yading@10
|
11 ;* License as published by the Free Software Foundation; either
|
yading@10
|
12 ;* version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
13 ;*
|
yading@10
|
14 ;* FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
17 ;* Lesser General Public License for more details.
|
yading@10
|
18 ;*
|
yading@10
|
19 ;* You should have received a copy of the GNU Lesser General Public
|
yading@10
|
20 ;* License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
22 ;******************************************************************************
|
yading@10
|
23
|
yading@10
|
24 %include "libavutil/x86/x86util.asm"
|
yading@10
|
25
|
yading@10
|
26 SECTION .text
|
yading@10
|
27
|
yading@10
|
28 INIT_MMX mmxext
|
yading@10
|
29 ; void pixels(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
30 %macro PIXELS48 2
|
yading@10
|
31 %if %2 == 4
|
yading@10
|
32 %define OP movh
|
yading@10
|
33 %else
|
yading@10
|
34 %define OP mova
|
yading@10
|
35 %endif
|
yading@10
|
36 cglobal %1_pixels%2, 4,5
|
yading@10
|
37 movsxdifnidn r2, r2d
|
yading@10
|
38 lea r4, [r2*3]
|
yading@10
|
39 .loop:
|
yading@10
|
40 OP m0, [r1]
|
yading@10
|
41 OP m1, [r1+r2]
|
yading@10
|
42 OP m2, [r1+r2*2]
|
yading@10
|
43 OP m3, [r1+r4]
|
yading@10
|
44 lea r1, [r1+r2*4]
|
yading@10
|
45 %ifidn %1, avg
|
yading@10
|
46 pavgb m0, [r0]
|
yading@10
|
47 pavgb m1, [r0+r2]
|
yading@10
|
48 pavgb m2, [r0+r2*2]
|
yading@10
|
49 pavgb m3, [r0+r4]
|
yading@10
|
50 %endif
|
yading@10
|
51 OP [r0], m0
|
yading@10
|
52 OP [r0+r2], m1
|
yading@10
|
53 OP [r0+r2*2], m2
|
yading@10
|
54 OP [r0+r4], m3
|
yading@10
|
55 sub r3d, 4
|
yading@10
|
56 lea r0, [r0+r2*4]
|
yading@10
|
57 jne .loop
|
yading@10
|
58 RET
|
yading@10
|
59 %endmacro
|
yading@10
|
60
|
yading@10
|
61 PIXELS48 put, 4
|
yading@10
|
62 PIXELS48 avg, 4
|
yading@10
|
63 PIXELS48 put, 8
|
yading@10
|
64 PIXELS48 avg, 8
|
yading@10
|
65
|
yading@10
|
66
|
yading@10
|
67 INIT_XMM sse2
|
yading@10
|
68 ; void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
69 cglobal put_pixels16, 4,5,4
|
yading@10
|
70 lea r4, [r2*3]
|
yading@10
|
71 .loop:
|
yading@10
|
72 movu m0, [r1]
|
yading@10
|
73 movu m1, [r1+r2]
|
yading@10
|
74 movu m2, [r1+r2*2]
|
yading@10
|
75 movu m3, [r1+r4]
|
yading@10
|
76 lea r1, [r1+r2*4]
|
yading@10
|
77 mova [r0], m0
|
yading@10
|
78 mova [r0+r2], m1
|
yading@10
|
79 mova [r0+r2*2], m2
|
yading@10
|
80 mova [r0+r4], m3
|
yading@10
|
81 sub r3d, 4
|
yading@10
|
82 lea r0, [r0+r2*4]
|
yading@10
|
83 jnz .loop
|
yading@10
|
84 REP_RET
|
yading@10
|
85
|
yading@10
|
86 ; void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
|
yading@10
|
87 cglobal avg_pixels16, 4,5,4
|
yading@10
|
88 lea r4, [r2*3]
|
yading@10
|
89 .loop:
|
yading@10
|
90 movu m0, [r1]
|
yading@10
|
91 movu m1, [r1+r2]
|
yading@10
|
92 movu m2, [r1+r2*2]
|
yading@10
|
93 movu m3, [r1+r4]
|
yading@10
|
94 lea r1, [r1+r2*4]
|
yading@10
|
95 pavgb m0, [r0]
|
yading@10
|
96 pavgb m1, [r0+r2]
|
yading@10
|
97 pavgb m2, [r0+r2*2]
|
yading@10
|
98 pavgb m3, [r0+r4]
|
yading@10
|
99 mova [r0], m0
|
yading@10
|
100 mova [r0+r2], m1
|
yading@10
|
101 mova [r0+r2*2], m2
|
yading@10
|
102 mova [r0+r4], m3
|
yading@10
|
103 sub r3d, 4
|
yading@10
|
104 lea r0, [r0+r2*4]
|
yading@10
|
105 jnz .loop
|
yading@10
|
106 REP_RET
|