yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2009 David Conrad
|
yading@10
|
3 *
|
yading@10
|
4 * This file is part of FFmpeg.
|
yading@10
|
5 *
|
yading@10
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
8 * License as published by the Free Software Foundation; either
|
yading@10
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
10 *
|
yading@10
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
14 * Lesser General Public License for more details.
|
yading@10
|
15 *
|
yading@10
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
19 */
|
yading@10
|
20
|
yading@10
|
21 #include "libavutil/arm/asm.S"
|
yading@10
|
22
|
yading@10
|
23 const vp3_idct_constants, align=4
|
yading@10
|
24 .short 64277, 60547, 54491, 46341, 36410, 25080, 12785
|
yading@10
|
25 endconst
|
yading@10
|
26
|
yading@10
|
27 #define xC1S7 d0[0]
|
yading@10
|
28 #define xC2S6 d0[1]
|
yading@10
|
29 #define xC3S5 d0[2]
|
yading@10
|
30 #define xC4S4 d0[3]
|
yading@10
|
31 #define xC5S3 d1[0]
|
yading@10
|
32 #define xC6S2 d1[1]
|
yading@10
|
33 #define xC7S1 d1[2]
|
yading@10
|
34
|
yading@10
|
35 .macro vp3_loop_filter
|
yading@10
|
36 vsubl.u8 q3, d18, d17
|
yading@10
|
37 vsubl.u8 q2, d16, d19
|
yading@10
|
38 vadd.i16 q1, q3, q3
|
yading@10
|
39 vadd.i16 q2, q2, q3
|
yading@10
|
40 vadd.i16 q0, q1, q2
|
yading@10
|
41 vrshr.s16 q0, q0, #3
|
yading@10
|
42 vmovl.u8 q9, d18
|
yading@10
|
43 vdup.u16 q15, r2
|
yading@10
|
44
|
yading@10
|
45 vabs.s16 q1, q0
|
yading@10
|
46 vshr.s16 q0, q0, #15
|
yading@10
|
47 vqsub.u16 q2, q15, q1
|
yading@10
|
48 vqsub.u16 q3, q2, q1
|
yading@10
|
49 vsub.i16 q1, q2, q3
|
yading@10
|
50 veor q1, q1, q0
|
yading@10
|
51 vsub.i16 q0, q1, q0
|
yading@10
|
52
|
yading@10
|
53 vaddw.u8 q2, q0, d17
|
yading@10
|
54 vsub.i16 q3, q9, q0
|
yading@10
|
55 vqmovun.s16 d0, q2
|
yading@10
|
56 vqmovun.s16 d1, q3
|
yading@10
|
57 .endm
|
yading@10
|
58
|
yading@10
|
59 function ff_vp3_v_loop_filter_neon, export=1
|
yading@10
|
60 sub ip, r0, r1
|
yading@10
|
61 sub r0, r0, r1, lsl #1
|
yading@10
|
62 vld1.64 {d16}, [r0,:64], r1
|
yading@10
|
63 vld1.64 {d17}, [r0,:64], r1
|
yading@10
|
64 vld1.64 {d18}, [r0,:64], r1
|
yading@10
|
65 vld1.64 {d19}, [r0,:64], r1
|
yading@10
|
66 ldrb r2, [r2, #129*4]
|
yading@10
|
67
|
yading@10
|
68 vp3_loop_filter
|
yading@10
|
69
|
yading@10
|
70 vst1.64 {d0}, [ip,:64], r1
|
yading@10
|
71 vst1.64 {d1}, [ip,:64], r1
|
yading@10
|
72 bx lr
|
yading@10
|
73 endfunc
|
yading@10
|
74
|
yading@10
|
75 function ff_vp3_h_loop_filter_neon, export=1
|
yading@10
|
76 sub ip, r0, #1
|
yading@10
|
77 sub r0, r0, #2
|
yading@10
|
78 vld1.32 {d16[]}, [r0], r1
|
yading@10
|
79 vld1.32 {d17[]}, [r0], r1
|
yading@10
|
80 vld1.32 {d18[]}, [r0], r1
|
yading@10
|
81 vld1.32 {d19[]}, [r0], r1
|
yading@10
|
82 vld1.32 {d16[1]}, [r0], r1
|
yading@10
|
83 vld1.32 {d17[1]}, [r0], r1
|
yading@10
|
84 vld1.32 {d18[1]}, [r0], r1
|
yading@10
|
85 vld1.32 {d19[1]}, [r0], r1
|
yading@10
|
86 ldrb r2, [r2, #129*4]
|
yading@10
|
87
|
yading@10
|
88 vtrn.8 d16, d17
|
yading@10
|
89 vtrn.8 d18, d19
|
yading@10
|
90 vtrn.16 d16, d18
|
yading@10
|
91 vtrn.16 d17, d19
|
yading@10
|
92
|
yading@10
|
93 vp3_loop_filter
|
yading@10
|
94
|
yading@10
|
95 vtrn.8 d0, d1
|
yading@10
|
96
|
yading@10
|
97 vst1.16 {d0[0]}, [ip], r1
|
yading@10
|
98 vst1.16 {d1[0]}, [ip], r1
|
yading@10
|
99 vst1.16 {d0[1]}, [ip], r1
|
yading@10
|
100 vst1.16 {d1[1]}, [ip], r1
|
yading@10
|
101 vst1.16 {d0[2]}, [ip], r1
|
yading@10
|
102 vst1.16 {d1[2]}, [ip], r1
|
yading@10
|
103 vst1.16 {d0[3]}, [ip], r1
|
yading@10
|
104 vst1.16 {d1[3]}, [ip], r1
|
yading@10
|
105 bx lr
|
yading@10
|
106 endfunc
|
yading@10
|
107
|
yading@10
|
108
|
yading@10
|
109 function vp3_idct_start_neon
|
yading@10
|
110 vpush {d8-d15}
|
yading@10
|
111 vmov.i16 q4, #0
|
yading@10
|
112 vmov.i16 q5, #0
|
yading@10
|
113 movrel r3, vp3_idct_constants
|
yading@10
|
114 vld1.64 {d0-d1}, [r3,:128]
|
yading@10
|
115 vld1.64 {d16-d19}, [r2,:128]
|
yading@10
|
116 vst1.64 {q4-q5}, [r2,:128]!
|
yading@10
|
117 vld1.64 {d20-d23}, [r2,:128]
|
yading@10
|
118 vst1.64 {q4-q5}, [r2,:128]!
|
yading@10
|
119 vld1.64 {d24-d27}, [r2,:128]
|
yading@10
|
120 vst1.64 {q4-q5}, [r2,:128]!
|
yading@10
|
121 vadd.s16 q1, q8, q12
|
yading@10
|
122 vsub.s16 q8, q8, q12
|
yading@10
|
123 vld1.64 {d28-d31}, [r2,:128]
|
yading@10
|
124 vst1.64 {q4-q5}, [r2,:128]!
|
yading@10
|
125
|
yading@10
|
126 vp3_idct_core_neon:
|
yading@10
|
127 vmull.s16 q2, d18, xC1S7 // (ip[1] * C1) << 16
|
yading@10
|
128 vmull.s16 q3, d19, xC1S7
|
yading@10
|
129 vmull.s16 q4, d2, xC4S4 // ((ip[0] + ip[4]) * C4) << 16
|
yading@10
|
130 vmull.s16 q5, d3, xC4S4
|
yading@10
|
131 vmull.s16 q6, d16, xC4S4 // ((ip[0] - ip[4]) * C4) << 16
|
yading@10
|
132 vmull.s16 q7, d17, xC4S4
|
yading@10
|
133 vshrn.s32 d4, q2, #16
|
yading@10
|
134 vshrn.s32 d5, q3, #16
|
yading@10
|
135 vshrn.s32 d6, q4, #16
|
yading@10
|
136 vshrn.s32 d7, q5, #16
|
yading@10
|
137 vshrn.s32 d8, q6, #16
|
yading@10
|
138 vshrn.s32 d9, q7, #16
|
yading@10
|
139 vadd.s16 q12, q1, q3 // E = (ip[0] + ip[4]) * C4
|
yading@10
|
140 vadd.s16 q8, q8, q4 // F = (ip[0] - ip[4]) * C4
|
yading@10
|
141 vadd.s16 q1, q2, q9 // ip[1] * C1
|
yading@10
|
142
|
yading@10
|
143 vmull.s16 q2, d30, xC1S7 // (ip[7] * C1) << 16
|
yading@10
|
144 vmull.s16 q3, d31, xC1S7
|
yading@10
|
145 vmull.s16 q4, d30, xC7S1 // (ip[7] * C7) << 16
|
yading@10
|
146 vmull.s16 q5, d31, xC7S1
|
yading@10
|
147 vmull.s16 q6, d18, xC7S1 // (ip[1] * C7) << 16
|
yading@10
|
148 vmull.s16 q7, d19, xC7S1
|
yading@10
|
149 vshrn.s32 d4, q2, #16
|
yading@10
|
150 vshrn.s32 d5, q3, #16
|
yading@10
|
151 vshrn.s32 d6, q4, #16 // ip[7] * C7
|
yading@10
|
152 vshrn.s32 d7, q5, #16
|
yading@10
|
153 vshrn.s32 d8, q6, #16 // ip[1] * C7
|
yading@10
|
154 vshrn.s32 d9, q7, #16
|
yading@10
|
155 vadd.s16 q2, q2, q15 // ip[7] * C1
|
yading@10
|
156 vadd.s16 q9, q1, q3 // A = ip[1] * C1 + ip[7] * C7
|
yading@10
|
157 vsub.s16 q15, q4, q2 // B = ip[1] * C7 - ip[7] * C1
|
yading@10
|
158
|
yading@10
|
159 vmull.s16 q2, d22, xC5S3 // (ip[3] * C5) << 16
|
yading@10
|
160 vmull.s16 q3, d23, xC5S3
|
yading@10
|
161 vmull.s16 q4, d22, xC3S5 // (ip[3] * C3) << 16
|
yading@10
|
162 vmull.s16 q5, d23, xC3S5
|
yading@10
|
163 vmull.s16 q6, d26, xC5S3 // (ip[5] * C5) << 16
|
yading@10
|
164 vmull.s16 q7, d27, xC5S3
|
yading@10
|
165 vshrn.s32 d4, q2, #16
|
yading@10
|
166 vshrn.s32 d5, q3, #16
|
yading@10
|
167 vshrn.s32 d6, q4, #16
|
yading@10
|
168 vshrn.s32 d7, q5, #16
|
yading@10
|
169 vshrn.s32 d8, q6, #16
|
yading@10
|
170 vshrn.s32 d9, q7, #16
|
yading@10
|
171 vadd.s16 q3, q3, q11 // ip[3] * C3
|
yading@10
|
172 vadd.s16 q4, q4, q13 // ip[5] * C5
|
yading@10
|
173 vadd.s16 q1, q2, q11 // ip[3] * C5
|
yading@10
|
174 vadd.s16 q11, q3, q4 // C = ip[3] * C3 + ip[5] * C5
|
yading@10
|
175
|
yading@10
|
176 vmull.s16 q2, d26, xC3S5 // (ip[5] * C3) << 16
|
yading@10
|
177 vmull.s16 q3, d27, xC3S5
|
yading@10
|
178 vmull.s16 q4, d20, xC2S6 // (ip[2] * C2) << 16
|
yading@10
|
179 vmull.s16 q5, d21, xC2S6
|
yading@10
|
180 vmull.s16 q6, d28, xC6S2 // (ip[6] * C6) << 16
|
yading@10
|
181 vmull.s16 q7, d29, xC6S2
|
yading@10
|
182 vshrn.s32 d4, q2, #16
|
yading@10
|
183 vshrn.s32 d5, q3, #16
|
yading@10
|
184 vshrn.s32 d6, q4, #16
|
yading@10
|
185 vshrn.s32 d7, q5, #16
|
yading@10
|
186 vshrn.s32 d8, q6, #16 // ip[6] * C6
|
yading@10
|
187 vshrn.s32 d9, q7, #16
|
yading@10
|
188 vadd.s16 q2, q2, q13 // ip[5] * C3
|
yading@10
|
189 vadd.s16 q3, q3, q10 // ip[2] * C2
|
yading@10
|
190 vsub.s16 q13, q2, q1 // D = ip[5] * C3 - ip[3] * C5
|
yading@10
|
191 vsub.s16 q1, q9, q11 // (A - C)
|
yading@10
|
192 vadd.s16 q11, q9, q11 // Cd = A + C
|
yading@10
|
193 vsub.s16 q9, q15, q13 // (B - D)
|
yading@10
|
194 vadd.s16 q13, q15, q13 // Dd = B + D
|
yading@10
|
195 vadd.s16 q15, q3, q4 // G = ip[2] * C2 + ip[6] * C6
|
yading@10
|
196
|
yading@10
|
197 vmull.s16 q2, d2, xC4S4 // ((A - C) * C4) << 16
|
yading@10
|
198 vmull.s16 q3, d3, xC4S4
|
yading@10
|
199 vmull.s16 q4, d28, xC2S6 // (ip[6] * C2) << 16
|
yading@10
|
200 vmull.s16 q5, d29, xC2S6
|
yading@10
|
201 vmull.s16 q6, d20, xC6S2 // (ip[2] * C6) << 16
|
yading@10
|
202 vmull.s16 q7, d21, xC6S2
|
yading@10
|
203 vshrn.s32 d4, q2, #16
|
yading@10
|
204 vshrn.s32 d5, q3, #16
|
yading@10
|
205 vshrn.s32 d6, q4, #16
|
yading@10
|
206 vshrn.s32 d7, q5, #16
|
yading@10
|
207 vshrn.s32 d8, q6, #16 // ip[2] * C6
|
yading@10
|
208 vmull.s16 q5, d18, xC4S4 // ((B - D) * C4) << 16
|
yading@10
|
209 vmull.s16 q6, d19, xC4S4
|
yading@10
|
210 vshrn.s32 d9, q7, #16
|
yading@10
|
211 vadd.s16 q3, q3, q14 // ip[6] * C2
|
yading@10
|
212 vadd.s16 q10, q1, q2 // Ad = (A - C) * C4
|
yading@10
|
213 vsub.s16 q14, q4, q3 // H = ip[2] * C6 - ip[6] * C2
|
yading@10
|
214 bx lr
|
yading@10
|
215 endfunc
|
yading@10
|
216
|
yading@10
|
217 .macro VP3_IDCT_END type
|
yading@10
|
218 function vp3_idct_end_\type\()_neon
|
yading@10
|
219 .ifc \type, col
|
yading@10
|
220 vdup.16 q0, r3
|
yading@10
|
221 vadd.s16 q12, q12, q0
|
yading@10
|
222 vadd.s16 q8, q8, q0
|
yading@10
|
223 .endif
|
yading@10
|
224
|
yading@10
|
225 vshrn.s32 d2, q5, #16
|
yading@10
|
226 vshrn.s32 d3, q6, #16
|
yading@10
|
227 vadd.s16 q2, q12, q15 // Gd = E + G
|
yading@10
|
228 vadd.s16 q9, q1, q9 // (B - D) * C4
|
yading@10
|
229 vsub.s16 q12, q12, q15 // Ed = E - G
|
yading@10
|
230 vsub.s16 q3, q8, q10 // Fd = F - Ad
|
yading@10
|
231 vadd.s16 q10, q8, q10 // Add = F + Ad
|
yading@10
|
232 vadd.s16 q4, q9, q14 // Hd = Bd + H
|
yading@10
|
233 vsub.s16 q14, q9, q14 // Bdd = Bd - H
|
yading@10
|
234 vadd.s16 q8, q2, q11 // [0] = Gd + Cd
|
yading@10
|
235 vsub.s16 q15, q2, q11 // [7] = Gd - Cd
|
yading@10
|
236 vadd.s16 q9, q10, q4 // [1] = Add + Hd
|
yading@10
|
237 vsub.s16 q10, q10, q4 // [2] = Add - Hd
|
yading@10
|
238 vadd.s16 q11, q12, q13 // [3] = Ed + Dd
|
yading@10
|
239 vsub.s16 q12, q12, q13 // [4] = Ed - Dd
|
yading@10
|
240 .ifc \type, row
|
yading@10
|
241 vtrn.16 q8, q9
|
yading@10
|
242 .endif
|
yading@10
|
243 vadd.s16 q13, q3, q14 // [5] = Fd + Bdd
|
yading@10
|
244 vsub.s16 q14, q3, q14 // [6] = Fd - Bdd
|
yading@10
|
245
|
yading@10
|
246 .ifc \type, row
|
yading@10
|
247 // 8x8 transpose
|
yading@10
|
248 vtrn.16 q10, q11
|
yading@10
|
249 vtrn.16 q12, q13
|
yading@10
|
250 vtrn.16 q14, q15
|
yading@10
|
251 vtrn.32 q8, q10
|
yading@10
|
252 vtrn.32 q9, q11
|
yading@10
|
253 vtrn.32 q12, q14
|
yading@10
|
254 vtrn.32 q13, q15
|
yading@10
|
255 vswp d17, d24
|
yading@10
|
256 vswp d19, d26
|
yading@10
|
257 vadd.s16 q1, q8, q12
|
yading@10
|
258 vswp d21, d28
|
yading@10
|
259 vsub.s16 q8, q8, q12
|
yading@10
|
260 vswp d23, d30
|
yading@10
|
261 .endif
|
yading@10
|
262 bx lr
|
yading@10
|
263 endfunc
|
yading@10
|
264 .endm
|
yading@10
|
265
|
yading@10
|
266 VP3_IDCT_END row
|
yading@10
|
267 VP3_IDCT_END col
|
yading@10
|
268
|
yading@10
|
269 function ff_vp3_idct_put_neon, export=1
|
yading@10
|
270 mov ip, lr
|
yading@10
|
271 bl vp3_idct_start_neon
|
yading@10
|
272 bl vp3_idct_end_row_neon
|
yading@10
|
273 mov r3, #8
|
yading@10
|
274 add r3, r3, #2048 // convert signed pixel to unsigned
|
yading@10
|
275 bl vp3_idct_core_neon
|
yading@10
|
276 bl vp3_idct_end_col_neon
|
yading@10
|
277 mov lr, ip
|
yading@10
|
278 vpop {d8-d15}
|
yading@10
|
279
|
yading@10
|
280 vqshrun.s16 d0, q8, #4
|
yading@10
|
281 vqshrun.s16 d1, q9, #4
|
yading@10
|
282 vqshrun.s16 d2, q10, #4
|
yading@10
|
283 vqshrun.s16 d3, q11, #4
|
yading@10
|
284 vst1.64 {d0}, [r0,:64], r1
|
yading@10
|
285 vqshrun.s16 d4, q12, #4
|
yading@10
|
286 vst1.64 {d1}, [r0,:64], r1
|
yading@10
|
287 vqshrun.s16 d5, q13, #4
|
yading@10
|
288 vst1.64 {d2}, [r0,:64], r1
|
yading@10
|
289 vqshrun.s16 d6, q14, #4
|
yading@10
|
290 vst1.64 {d3}, [r0,:64], r1
|
yading@10
|
291 vqshrun.s16 d7, q15, #4
|
yading@10
|
292 vst1.64 {d4}, [r0,:64], r1
|
yading@10
|
293 vst1.64 {d5}, [r0,:64], r1
|
yading@10
|
294 vst1.64 {d6}, [r0,:64], r1
|
yading@10
|
295 vst1.64 {d7}, [r0,:64], r1
|
yading@10
|
296 bx lr
|
yading@10
|
297 endfunc
|
yading@10
|
298
|
yading@10
|
299 function ff_vp3_idct_add_neon, export=1
|
yading@10
|
300 mov ip, lr
|
yading@10
|
301 bl vp3_idct_start_neon
|
yading@10
|
302 bl vp3_idct_end_row_neon
|
yading@10
|
303 mov r3, #8
|
yading@10
|
304 bl vp3_idct_core_neon
|
yading@10
|
305 bl vp3_idct_end_col_neon
|
yading@10
|
306 mov lr, ip
|
yading@10
|
307 vpop {d8-d15}
|
yading@10
|
308 mov r2, r0
|
yading@10
|
309
|
yading@10
|
310 vld1.64 {d0}, [r0,:64], r1
|
yading@10
|
311 vshr.s16 q8, q8, #4
|
yading@10
|
312 vld1.64 {d1}, [r0,:64], r1
|
yading@10
|
313 vshr.s16 q9, q9, #4
|
yading@10
|
314 vld1.64 {d2}, [r0,:64], r1
|
yading@10
|
315 vaddw.u8 q8, q8, d0
|
yading@10
|
316 vld1.64 {d3}, [r0,:64], r1
|
yading@10
|
317 vaddw.u8 q9, q9, d1
|
yading@10
|
318 vld1.64 {d4}, [r0,:64], r1
|
yading@10
|
319 vshr.s16 q10, q10, #4
|
yading@10
|
320 vld1.64 {d5}, [r0,:64], r1
|
yading@10
|
321 vshr.s16 q11, q11, #4
|
yading@10
|
322 vld1.64 {d6}, [r0,:64], r1
|
yading@10
|
323 vqmovun.s16 d0, q8
|
yading@10
|
324 vld1.64 {d7}, [r0,:64], r1
|
yading@10
|
325 vqmovun.s16 d1, q9
|
yading@10
|
326 vaddw.u8 q10, q10, d2
|
yading@10
|
327 vaddw.u8 q11, q11, d3
|
yading@10
|
328 vshr.s16 q12, q12, #4
|
yading@10
|
329 vshr.s16 q13, q13, #4
|
yading@10
|
330 vqmovun.s16 d2, q10
|
yading@10
|
331 vqmovun.s16 d3, q11
|
yading@10
|
332 vaddw.u8 q12, q12, d4
|
yading@10
|
333 vaddw.u8 q13, q13, d5
|
yading@10
|
334 vshr.s16 q14, q14, #4
|
yading@10
|
335 vshr.s16 q15, q15, #4
|
yading@10
|
336 vst1.64 {d0}, [r2,:64], r1
|
yading@10
|
337 vqmovun.s16 d4, q12
|
yading@10
|
338 vst1.64 {d1}, [r2,:64], r1
|
yading@10
|
339 vqmovun.s16 d5, q13
|
yading@10
|
340 vst1.64 {d2}, [r2,:64], r1
|
yading@10
|
341 vaddw.u8 q14, q14, d6
|
yading@10
|
342 vst1.64 {d3}, [r2,:64], r1
|
yading@10
|
343 vaddw.u8 q15, q15, d7
|
yading@10
|
344 vst1.64 {d4}, [r2,:64], r1
|
yading@10
|
345 vqmovun.s16 d6, q14
|
yading@10
|
346 vst1.64 {d5}, [r2,:64], r1
|
yading@10
|
347 vqmovun.s16 d7, q15
|
yading@10
|
348 vst1.64 {d6}, [r2,:64], r1
|
yading@10
|
349 vst1.64 {d7}, [r2,:64], r1
|
yading@10
|
350 bx lr
|
yading@10
|
351 endfunc
|
yading@10
|
352
|
yading@10
|
353 function ff_vp3_idct_dc_add_neon, export=1
|
yading@10
|
354 ldrsh r12, [r2]
|
yading@10
|
355 mov r3, r0
|
yading@10
|
356 add r12, r12, #15
|
yading@10
|
357 vdup.16 q15, r12
|
yading@10
|
358 mov r12, 0
|
yading@10
|
359 strh r12, [r2]
|
yading@10
|
360 vshr.s16 q15, q15, #5
|
yading@10
|
361
|
yading@10
|
362 vld1.8 {d0}, [r0,:64], r1
|
yading@10
|
363 vld1.8 {d1}, [r0,:64], r1
|
yading@10
|
364 vld1.8 {d2}, [r0,:64], r1
|
yading@10
|
365 vaddw.u8 q8, q15, d0
|
yading@10
|
366 vld1.8 {d3}, [r0,:64], r1
|
yading@10
|
367 vaddw.u8 q9, q15, d1
|
yading@10
|
368 vld1.8 {d4}, [r0,:64], r1
|
yading@10
|
369 vaddw.u8 q10, q15, d2
|
yading@10
|
370 vld1.8 {d5}, [r0,:64], r1
|
yading@10
|
371 vaddw.u8 q11, q15, d3
|
yading@10
|
372 vld1.8 {d6}, [r0,:64], r1
|
yading@10
|
373 vaddw.u8 q12, q15, d4
|
yading@10
|
374 vld1.8 {d7}, [r0,:64], r1
|
yading@10
|
375 vaddw.u8 q13, q15, d5
|
yading@10
|
376 vqmovun.s16 d0, q8
|
yading@10
|
377 vaddw.u8 q14, q15, d6
|
yading@10
|
378 vqmovun.s16 d1, q9
|
yading@10
|
379 vaddw.u8 q15, q15, d7
|
yading@10
|
380 vqmovun.s16 d2, q10
|
yading@10
|
381 vst1.8 {d0}, [r3,:64], r1
|
yading@10
|
382 vqmovun.s16 d3, q11
|
yading@10
|
383 vst1.8 {d1}, [r3,:64], r1
|
yading@10
|
384 vqmovun.s16 d4, q12
|
yading@10
|
385 vst1.8 {d2}, [r3,:64], r1
|
yading@10
|
386 vqmovun.s16 d5, q13
|
yading@10
|
387 vst1.8 {d3}, [r3,:64], r1
|
yading@10
|
388 vqmovun.s16 d6, q14
|
yading@10
|
389 vst1.8 {d4}, [r3,:64], r1
|
yading@10
|
390 vqmovun.s16 d7, q15
|
yading@10
|
391 vst1.8 {d5}, [r3,:64], r1
|
yading@10
|
392 vst1.8 {d6}, [r3,:64], r1
|
yading@10
|
393 vst1.8 {d7}, [r3,:64], r1
|
yading@10
|
394 bx lr
|
yading@10
|
395 endfunc
|