yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2001 Michel Lespinasse
|
yading@10
|
3 *
|
yading@10
|
4 * This file is part of FFmpeg.
|
yading@10
|
5 *
|
yading@10
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
8 * License as published by the Free Software Foundation; either
|
yading@10
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
10 *
|
yading@10
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
14 * Lesser General Public License for more details.
|
yading@10
|
15 *
|
yading@10
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
19 */
|
yading@10
|
20
|
yading@10
|
21 /*
|
yading@10
|
22 * NOTE: This code is based on GPL code from the libmpeg2 project. The
|
yading@10
|
23 * author, Michel Lespinasses, has given explicit permission to release
|
yading@10
|
24 * under LGPL as part of FFmpeg.
|
yading@10
|
25 */
|
yading@10
|
26
|
yading@10
|
27 /*
|
yading@10
|
28 * FFmpeg integration by Dieter Shirley
|
yading@10
|
29 *
|
yading@10
|
30 * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
|
yading@10
|
31 * project. I've deleted all of the libmpeg2-specific code, renamed the
|
yading@10
|
32 * functions and reordered the function parameters. The only change to the
|
yading@10
|
33 * IDCT function itself was to factor out the partial transposition, and to
|
yading@10
|
34 * perform a full transpose at the end of the function.
|
yading@10
|
35 */
|
yading@10
|
36
|
yading@10
|
37
|
yading@10
|
38 #include <stdlib.h> /* malloc(), free() */
|
yading@10
|
39 #include <string.h>
|
yading@10
|
40 #include "config.h"
|
yading@10
|
41 #if HAVE_ALTIVEC_H
|
yading@10
|
42 #include <altivec.h>
|
yading@10
|
43 #endif
|
yading@10
|
44 #include "libavutil/ppc/types_altivec.h"
|
yading@10
|
45 #include "dsputil_altivec.h"
|
yading@10
|
46
|
yading@10
|
47 #define IDCT_HALF \
|
yading@10
|
48 /* 1st stage */ \
|
yading@10
|
49 t1 = vec_mradds (a1, vx7, vx1 ); \
|
yading@10
|
50 t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
|
yading@10
|
51 t7 = vec_mradds (a2, vx5, vx3); \
|
yading@10
|
52 t3 = vec_mradds (ma2, vx3, vx5); \
|
yading@10
|
53 \
|
yading@10
|
54 /* 2nd stage */ \
|
yading@10
|
55 t5 = vec_adds (vx0, vx4); \
|
yading@10
|
56 t0 = vec_subs (vx0, vx4); \
|
yading@10
|
57 t2 = vec_mradds (a0, vx6, vx2); \
|
yading@10
|
58 t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
|
yading@10
|
59 t6 = vec_adds (t8, t3); \
|
yading@10
|
60 t3 = vec_subs (t8, t3); \
|
yading@10
|
61 t8 = vec_subs (t1, t7); \
|
yading@10
|
62 t1 = vec_adds (t1, t7); \
|
yading@10
|
63 \
|
yading@10
|
64 /* 3rd stage */ \
|
yading@10
|
65 t7 = vec_adds (t5, t2); \
|
yading@10
|
66 t2 = vec_subs (t5, t2); \
|
yading@10
|
67 t5 = vec_adds (t0, t4); \
|
yading@10
|
68 t0 = vec_subs (t0, t4); \
|
yading@10
|
69 t4 = vec_subs (t8, t3); \
|
yading@10
|
70 t3 = vec_adds (t8, t3); \
|
yading@10
|
71 \
|
yading@10
|
72 /* 4th stage */ \
|
yading@10
|
73 vy0 = vec_adds (t7, t1); \
|
yading@10
|
74 vy7 = vec_subs (t7, t1); \
|
yading@10
|
75 vy1 = vec_mradds (c4, t3, t5); \
|
yading@10
|
76 vy6 = vec_mradds (mc4, t3, t5); \
|
yading@10
|
77 vy2 = vec_mradds (c4, t4, t0); \
|
yading@10
|
78 vy5 = vec_mradds (mc4, t4, t0); \
|
yading@10
|
79 vy3 = vec_adds (t2, t6); \
|
yading@10
|
80 vy4 = vec_subs (t2, t6);
|
yading@10
|
81
|
yading@10
|
82
|
yading@10
|
83 #define IDCT \
|
yading@10
|
84 vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
|
yading@10
|
85 vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
|
yading@10
|
86 vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \
|
yading@10
|
87 vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
|
yading@10
|
88 vec_u16 shift; \
|
yading@10
|
89 \
|
yading@10
|
90 c4 = vec_splat (constants[0], 0); \
|
yading@10
|
91 a0 = vec_splat (constants[0], 1); \
|
yading@10
|
92 a1 = vec_splat (constants[0], 2); \
|
yading@10
|
93 a2 = vec_splat (constants[0], 3); \
|
yading@10
|
94 mc4 = vec_splat (constants[0], 4); \
|
yading@10
|
95 ma2 = vec_splat (constants[0], 5); \
|
yading@10
|
96 bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3); \
|
yading@10
|
97 \
|
yading@10
|
98 zero = vec_splat_s16 (0); \
|
yading@10
|
99 shift = vec_splat_u16 (4); \
|
yading@10
|
100 \
|
yading@10
|
101 vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
|
yading@10
|
102 vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
|
yading@10
|
103 vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
|
yading@10
|
104 vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
|
yading@10
|
105 vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
|
yading@10
|
106 vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
|
yading@10
|
107 vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
|
yading@10
|
108 vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
|
yading@10
|
109 \
|
yading@10
|
110 IDCT_HALF \
|
yading@10
|
111 \
|
yading@10
|
112 vx0 = vec_mergeh (vy0, vy4); \
|
yading@10
|
113 vx1 = vec_mergel (vy0, vy4); \
|
yading@10
|
114 vx2 = vec_mergeh (vy1, vy5); \
|
yading@10
|
115 vx3 = vec_mergel (vy1, vy5); \
|
yading@10
|
116 vx4 = vec_mergeh (vy2, vy6); \
|
yading@10
|
117 vx5 = vec_mergel (vy2, vy6); \
|
yading@10
|
118 vx6 = vec_mergeh (vy3, vy7); \
|
yading@10
|
119 vx7 = vec_mergel (vy3, vy7); \
|
yading@10
|
120 \
|
yading@10
|
121 vy0 = vec_mergeh (vx0, vx4); \
|
yading@10
|
122 vy1 = vec_mergel (vx0, vx4); \
|
yading@10
|
123 vy2 = vec_mergeh (vx1, vx5); \
|
yading@10
|
124 vy3 = vec_mergel (vx1, vx5); \
|
yading@10
|
125 vy4 = vec_mergeh (vx2, vx6); \
|
yading@10
|
126 vy5 = vec_mergel (vx2, vx6); \
|
yading@10
|
127 vy6 = vec_mergeh (vx3, vx7); \
|
yading@10
|
128 vy7 = vec_mergel (vx3, vx7); \
|
yading@10
|
129 \
|
yading@10
|
130 vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
|
yading@10
|
131 vx1 = vec_mergel (vy0, vy4); \
|
yading@10
|
132 vx2 = vec_mergeh (vy1, vy5); \
|
yading@10
|
133 vx3 = vec_mergel (vy1, vy5); \
|
yading@10
|
134 vx4 = vec_mergeh (vy2, vy6); \
|
yading@10
|
135 vx5 = vec_mergel (vy2, vy6); \
|
yading@10
|
136 vx6 = vec_mergeh (vy3, vy7); \
|
yading@10
|
137 vx7 = vec_mergel (vy3, vy7); \
|
yading@10
|
138 \
|
yading@10
|
139 IDCT_HALF \
|
yading@10
|
140 \
|
yading@10
|
141 shift = vec_splat_u16 (6); \
|
yading@10
|
142 vx0 = vec_sra (vy0, shift); \
|
yading@10
|
143 vx1 = vec_sra (vy1, shift); \
|
yading@10
|
144 vx2 = vec_sra (vy2, shift); \
|
yading@10
|
145 vx3 = vec_sra (vy3, shift); \
|
yading@10
|
146 vx4 = vec_sra (vy4, shift); \
|
yading@10
|
147 vx5 = vec_sra (vy5, shift); \
|
yading@10
|
148 vx6 = vec_sra (vy6, shift); \
|
yading@10
|
149 vx7 = vec_sra (vy7, shift);
|
yading@10
|
150
|
yading@10
|
151
|
yading@10
|
152 static const vec_s16 constants[5] = {
|
yading@10
|
153 {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
|
yading@10
|
154 {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
|
yading@10
|
155 {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
|
yading@10
|
156 {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
|
yading@10
|
157 {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
|
yading@10
|
158 };
|
yading@10
|
159
|
yading@10
|
160 void ff_idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
|
yading@10
|
161 {
|
yading@10
|
162 vec_s16 *block = (vec_s16*)blk;
|
yading@10
|
163 vec_u8 tmp;
|
yading@10
|
164
|
yading@10
|
165 IDCT
|
yading@10
|
166
|
yading@10
|
167 #define COPY(dest,src) \
|
yading@10
|
168 tmp = vec_packsu (src, src); \
|
yading@10
|
169 vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
|
yading@10
|
170 vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
|
yading@10
|
171
|
yading@10
|
172 COPY (dest, vx0) dest += stride;
|
yading@10
|
173 COPY (dest, vx1) dest += stride;
|
yading@10
|
174 COPY (dest, vx2) dest += stride;
|
yading@10
|
175 COPY (dest, vx3) dest += stride;
|
yading@10
|
176 COPY (dest, vx4) dest += stride;
|
yading@10
|
177 COPY (dest, vx5) dest += stride;
|
yading@10
|
178 COPY (dest, vx6) dest += stride;
|
yading@10
|
179 COPY (dest, vx7)
|
yading@10
|
180 }
|
yading@10
|
181
|
yading@10
|
182 void ff_idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
|
yading@10
|
183 {
|
yading@10
|
184 vec_s16 *block = (vec_s16*)blk;
|
yading@10
|
185 vec_u8 tmp;
|
yading@10
|
186 vec_s16 tmp2, tmp3;
|
yading@10
|
187 vec_u8 perm0;
|
yading@10
|
188 vec_u8 perm1;
|
yading@10
|
189 vec_u8 p0, p1, p;
|
yading@10
|
190
|
yading@10
|
191 IDCT
|
yading@10
|
192
|
yading@10
|
193 p0 = vec_lvsl (0, dest);
|
yading@10
|
194 p1 = vec_lvsl (stride, dest);
|
yading@10
|
195 p = vec_splat_u8 (-1);
|
yading@10
|
196 perm0 = vec_mergeh (p, p0);
|
yading@10
|
197 perm1 = vec_mergeh (p, p1);
|
yading@10
|
198
|
yading@10
|
199 #define ADD(dest,src,perm) \
|
yading@10
|
200 /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
|
yading@10
|
201 tmp = vec_ld (0, dest); \
|
yading@10
|
202 tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm); \
|
yading@10
|
203 tmp3 = vec_adds (tmp2, src); \
|
yading@10
|
204 tmp = vec_packsu (tmp3, tmp3); \
|
yading@10
|
205 vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
|
yading@10
|
206 vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
|
yading@10
|
207
|
yading@10
|
208 ADD (dest, vx0, perm0) dest += stride;
|
yading@10
|
209 ADD (dest, vx1, perm1) dest += stride;
|
yading@10
|
210 ADD (dest, vx2, perm0) dest += stride;
|
yading@10
|
211 ADD (dest, vx3, perm1) dest += stride;
|
yading@10
|
212 ADD (dest, vx4, perm0) dest += stride;
|
yading@10
|
213 ADD (dest, vx5, perm1) dest += stride;
|
yading@10
|
214 ADD (dest, vx6, perm0) dest += stride;
|
yading@10
|
215 ADD (dest, vx7, perm1)
|
yading@10
|
216 }
|