yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
|
yading@10
|
3 *
|
yading@10
|
4 * This file is part of FFmpeg.
|
yading@10
|
5 *
|
yading@10
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
8 * License as published by the Free Software Foundation; either
|
yading@10
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
10 *
|
yading@10
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
14 * Lesser General Public License for more details.
|
yading@10
|
15 *
|
yading@10
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
19 */
|
yading@10
|
20
|
yading@10
|
21 #include "libavutil/arm/asm.S"
|
yading@10
|
22 #include "neon.S"
|
yading@10
|
23
|
yading@10
|
24 /* H.264 loop filter */
|
yading@10
|
25
|
yading@10
|
26 .macro h264_loop_filter_start
|
yading@10
|
27 ldr r12, [sp]
|
yading@10
|
28 tst r2, r2
|
yading@10
|
29 ldr r12, [r12]
|
yading@10
|
30 it ne
|
yading@10
|
31 tstne r3, r3
|
yading@10
|
32 vmov.32 d24[0], r12
|
yading@10
|
33 and r12, r12, r12, lsl #16
|
yading@10
|
34 it eq
|
yading@10
|
35 bxeq lr
|
yading@10
|
36 ands r12, r12, r12, lsl #8
|
yading@10
|
37 it lt
|
yading@10
|
38 bxlt lr
|
yading@10
|
39 .endm
|
yading@10
|
40
|
yading@10
|
41 .macro h264_loop_filter_luma
|
yading@10
|
42 vdup.8 q11, r2 @ alpha
|
yading@10
|
43 vmovl.u8 q12, d24
|
yading@10
|
44 vabd.u8 q6, q8, q0 @ abs(p0 - q0)
|
yading@10
|
45 vmovl.u16 q12, d24
|
yading@10
|
46 vabd.u8 q14, q9, q8 @ abs(p1 - p0)
|
yading@10
|
47 vsli.16 q12, q12, #8
|
yading@10
|
48 vabd.u8 q15, q1, q0 @ abs(q1 - q0)
|
yading@10
|
49 vsli.32 q12, q12, #16
|
yading@10
|
50 vclt.u8 q6, q6, q11 @ < alpha
|
yading@10
|
51 vdup.8 q11, r3 @ beta
|
yading@10
|
52 vclt.s8 q7, q12, #0
|
yading@10
|
53 vclt.u8 q14, q14, q11 @ < beta
|
yading@10
|
54 vclt.u8 q15, q15, q11 @ < beta
|
yading@10
|
55 vbic q6, q6, q7
|
yading@10
|
56 vabd.u8 q4, q10, q8 @ abs(p2 - p0)
|
yading@10
|
57 vand q6, q6, q14
|
yading@10
|
58 vabd.u8 q5, q2, q0 @ abs(q2 - q0)
|
yading@10
|
59 vclt.u8 q4, q4, q11 @ < beta
|
yading@10
|
60 vand q6, q6, q15
|
yading@10
|
61 vclt.u8 q5, q5, q11 @ < beta
|
yading@10
|
62 vand q4, q4, q6
|
yading@10
|
63 vand q5, q5, q6
|
yading@10
|
64 vand q12, q12, q6
|
yading@10
|
65 vrhadd.u8 q14, q8, q0
|
yading@10
|
66 vsub.i8 q6, q12, q4
|
yading@10
|
67 vqadd.u8 q7, q9, q12
|
yading@10
|
68 vhadd.u8 q10, q10, q14
|
yading@10
|
69 vsub.i8 q6, q6, q5
|
yading@10
|
70 vhadd.u8 q14, q2, q14
|
yading@10
|
71 vmin.u8 q7, q7, q10
|
yading@10
|
72 vqsub.u8 q11, q9, q12
|
yading@10
|
73 vqadd.u8 q2, q1, q12
|
yading@10
|
74 vmax.u8 q7, q7, q11
|
yading@10
|
75 vqsub.u8 q11, q1, q12
|
yading@10
|
76 vmin.u8 q14, q2, q14
|
yading@10
|
77 vmovl.u8 q2, d0
|
yading@10
|
78 vmax.u8 q14, q14, q11
|
yading@10
|
79 vmovl.u8 q10, d1
|
yading@10
|
80 vsubw.u8 q2, q2, d16
|
yading@10
|
81 vsubw.u8 q10, q10, d17
|
yading@10
|
82 vshl.i16 q2, q2, #2
|
yading@10
|
83 vshl.i16 q10, q10, #2
|
yading@10
|
84 vaddw.u8 q2, q2, d18
|
yading@10
|
85 vaddw.u8 q10, q10, d19
|
yading@10
|
86 vsubw.u8 q2, q2, d2
|
yading@10
|
87 vsubw.u8 q10, q10, d3
|
yading@10
|
88 vrshrn.i16 d4, q2, #3
|
yading@10
|
89 vrshrn.i16 d5, q10, #3
|
yading@10
|
90 vbsl q4, q7, q9
|
yading@10
|
91 vbsl q5, q14, q1
|
yading@10
|
92 vneg.s8 q7, q6
|
yading@10
|
93 vmovl.u8 q14, d16
|
yading@10
|
94 vmin.s8 q2, q2, q6
|
yading@10
|
95 vmovl.u8 q6, d17
|
yading@10
|
96 vmax.s8 q2, q2, q7
|
yading@10
|
97 vmovl.u8 q11, d0
|
yading@10
|
98 vmovl.u8 q12, d1
|
yading@10
|
99 vaddw.s8 q14, q14, d4
|
yading@10
|
100 vaddw.s8 q6, q6, d5
|
yading@10
|
101 vsubw.s8 q11, q11, d4
|
yading@10
|
102 vsubw.s8 q12, q12, d5
|
yading@10
|
103 vqmovun.s16 d16, q14
|
yading@10
|
104 vqmovun.s16 d17, q6
|
yading@10
|
105 vqmovun.s16 d0, q11
|
yading@10
|
106 vqmovun.s16 d1, q12
|
yading@10
|
107 .endm
|
yading@10
|
108
|
yading@10
|
109 function ff_h264_v_loop_filter_luma_neon, export=1
|
yading@10
|
110 h264_loop_filter_start
|
yading@10
|
111
|
yading@10
|
112 vld1.8 {d0, d1}, [r0,:128], r1
|
yading@10
|
113 vld1.8 {d2, d3}, [r0,:128], r1
|
yading@10
|
114 vld1.8 {d4, d5}, [r0,:128], r1
|
yading@10
|
115 sub r0, r0, r1, lsl #2
|
yading@10
|
116 sub r0, r0, r1, lsl #1
|
yading@10
|
117 vld1.8 {d20,d21}, [r0,:128], r1
|
yading@10
|
118 vld1.8 {d18,d19}, [r0,:128], r1
|
yading@10
|
119 vld1.8 {d16,d17}, [r0,:128], r1
|
yading@10
|
120
|
yading@10
|
121 vpush {d8-d15}
|
yading@10
|
122
|
yading@10
|
123 h264_loop_filter_luma
|
yading@10
|
124
|
yading@10
|
125 sub r0, r0, r1, lsl #1
|
yading@10
|
126 vst1.8 {d8, d9}, [r0,:128], r1
|
yading@10
|
127 vst1.8 {d16,d17}, [r0,:128], r1
|
yading@10
|
128 vst1.8 {d0, d1}, [r0,:128], r1
|
yading@10
|
129 vst1.8 {d10,d11}, [r0,:128]
|
yading@10
|
130
|
yading@10
|
131 vpop {d8-d15}
|
yading@10
|
132 bx lr
|
yading@10
|
133 endfunc
|
yading@10
|
134
|
yading@10
|
135 function ff_h264_h_loop_filter_luma_neon, export=1
|
yading@10
|
136 h264_loop_filter_start
|
yading@10
|
137
|
yading@10
|
138 sub r0, r0, #4
|
yading@10
|
139 vld1.8 {d6}, [r0], r1
|
yading@10
|
140 vld1.8 {d20}, [r0], r1
|
yading@10
|
141 vld1.8 {d18}, [r0], r1
|
yading@10
|
142 vld1.8 {d16}, [r0], r1
|
yading@10
|
143 vld1.8 {d0}, [r0], r1
|
yading@10
|
144 vld1.8 {d2}, [r0], r1
|
yading@10
|
145 vld1.8 {d4}, [r0], r1
|
yading@10
|
146 vld1.8 {d26}, [r0], r1
|
yading@10
|
147 vld1.8 {d7}, [r0], r1
|
yading@10
|
148 vld1.8 {d21}, [r0], r1
|
yading@10
|
149 vld1.8 {d19}, [r0], r1
|
yading@10
|
150 vld1.8 {d17}, [r0], r1
|
yading@10
|
151 vld1.8 {d1}, [r0], r1
|
yading@10
|
152 vld1.8 {d3}, [r0], r1
|
yading@10
|
153 vld1.8 {d5}, [r0], r1
|
yading@10
|
154 vld1.8 {d27}, [r0], r1
|
yading@10
|
155
|
yading@10
|
156 transpose_8x8 q3, q10, q9, q8, q0, q1, q2, q13
|
yading@10
|
157
|
yading@10
|
158 vpush {d8-d15}
|
yading@10
|
159
|
yading@10
|
160 h264_loop_filter_luma
|
yading@10
|
161
|
yading@10
|
162 transpose_4x4 q4, q8, q0, q5
|
yading@10
|
163
|
yading@10
|
164 sub r0, r0, r1, lsl #4
|
yading@10
|
165 add r0, r0, #2
|
yading@10
|
166 vst1.32 {d8[0]}, [r0], r1
|
yading@10
|
167 vst1.32 {d16[0]}, [r0], r1
|
yading@10
|
168 vst1.32 {d0[0]}, [r0], r1
|
yading@10
|
169 vst1.32 {d10[0]}, [r0], r1
|
yading@10
|
170 vst1.32 {d8[1]}, [r0], r1
|
yading@10
|
171 vst1.32 {d16[1]}, [r0], r1
|
yading@10
|
172 vst1.32 {d0[1]}, [r0], r1
|
yading@10
|
173 vst1.32 {d10[1]}, [r0], r1
|
yading@10
|
174 vst1.32 {d9[0]}, [r0], r1
|
yading@10
|
175 vst1.32 {d17[0]}, [r0], r1
|
yading@10
|
176 vst1.32 {d1[0]}, [r0], r1
|
yading@10
|
177 vst1.32 {d11[0]}, [r0], r1
|
yading@10
|
178 vst1.32 {d9[1]}, [r0], r1
|
yading@10
|
179 vst1.32 {d17[1]}, [r0], r1
|
yading@10
|
180 vst1.32 {d1[1]}, [r0], r1
|
yading@10
|
181 vst1.32 {d11[1]}, [r0], r1
|
yading@10
|
182
|
yading@10
|
183 vpop {d8-d15}
|
yading@10
|
184 bx lr
|
yading@10
|
185 endfunc
|
yading@10
|
186
|
yading@10
|
187 .macro h264_loop_filter_chroma
|
yading@10
|
188 vdup.8 d22, r2 @ alpha
|
yading@10
|
189 vmovl.u8 q12, d24
|
yading@10
|
190 vabd.u8 d26, d16, d0 @ abs(p0 - q0)
|
yading@10
|
191 vmovl.u8 q2, d0
|
yading@10
|
192 vabd.u8 d28, d18, d16 @ abs(p1 - p0)
|
yading@10
|
193 vsubw.u8 q2, q2, d16
|
yading@10
|
194 vsli.16 d24, d24, #8
|
yading@10
|
195 vshl.i16 q2, q2, #2
|
yading@10
|
196 vabd.u8 d30, d2, d0 @ abs(q1 - q0)
|
yading@10
|
197 vaddw.u8 q2, q2, d18
|
yading@10
|
198 vclt.u8 d26, d26, d22 @ < alpha
|
yading@10
|
199 vsubw.u8 q2, q2, d2
|
yading@10
|
200 vdup.8 d22, r3 @ beta
|
yading@10
|
201 vrshrn.i16 d4, q2, #3
|
yading@10
|
202 vclt.u8 d28, d28, d22 @ < beta
|
yading@10
|
203 vclt.u8 d30, d30, d22 @ < beta
|
yading@10
|
204 vmin.s8 d4, d4, d24
|
yading@10
|
205 vneg.s8 d25, d24
|
yading@10
|
206 vand d26, d26, d28
|
yading@10
|
207 vmax.s8 d4, d4, d25
|
yading@10
|
208 vand d26, d26, d30
|
yading@10
|
209 vmovl.u8 q11, d0
|
yading@10
|
210 vand d4, d4, d26
|
yading@10
|
211 vmovl.u8 q14, d16
|
yading@10
|
212 vaddw.s8 q14, q14, d4
|
yading@10
|
213 vsubw.s8 q11, q11, d4
|
yading@10
|
214 vqmovun.s16 d16, q14
|
yading@10
|
215 vqmovun.s16 d0, q11
|
yading@10
|
216 .endm
|
yading@10
|
217
|
yading@10
|
218 function ff_h264_v_loop_filter_chroma_neon, export=1
|
yading@10
|
219 h264_loop_filter_start
|
yading@10
|
220
|
yading@10
|
221 sub r0, r0, r1, lsl #1
|
yading@10
|
222 vld1.8 {d18}, [r0,:64], r1
|
yading@10
|
223 vld1.8 {d16}, [r0,:64], r1
|
yading@10
|
224 vld1.8 {d0}, [r0,:64], r1
|
yading@10
|
225 vld1.8 {d2}, [r0,:64]
|
yading@10
|
226
|
yading@10
|
227 h264_loop_filter_chroma
|
yading@10
|
228
|
yading@10
|
229 sub r0, r0, r1, lsl #1
|
yading@10
|
230 vst1.8 {d16}, [r0,:64], r1
|
yading@10
|
231 vst1.8 {d0}, [r0,:64], r1
|
yading@10
|
232
|
yading@10
|
233 bx lr
|
yading@10
|
234 endfunc
|
yading@10
|
235
|
yading@10
|
236 function ff_h264_h_loop_filter_chroma_neon, export=1
|
yading@10
|
237 h264_loop_filter_start
|
yading@10
|
238
|
yading@10
|
239 sub r0, r0, #2
|
yading@10
|
240 vld1.32 {d18[0]}, [r0], r1
|
yading@10
|
241 vld1.32 {d16[0]}, [r0], r1
|
yading@10
|
242 vld1.32 {d0[0]}, [r0], r1
|
yading@10
|
243 vld1.32 {d2[0]}, [r0], r1
|
yading@10
|
244 vld1.32 {d18[1]}, [r0], r1
|
yading@10
|
245 vld1.32 {d16[1]}, [r0], r1
|
yading@10
|
246 vld1.32 {d0[1]}, [r0], r1
|
yading@10
|
247 vld1.32 {d2[1]}, [r0], r1
|
yading@10
|
248
|
yading@10
|
249 vtrn.16 d18, d0
|
yading@10
|
250 vtrn.16 d16, d2
|
yading@10
|
251 vtrn.8 d18, d16
|
yading@10
|
252 vtrn.8 d0, d2
|
yading@10
|
253
|
yading@10
|
254 h264_loop_filter_chroma
|
yading@10
|
255
|
yading@10
|
256 vtrn.16 d18, d0
|
yading@10
|
257 vtrn.16 d16, d2
|
yading@10
|
258 vtrn.8 d18, d16
|
yading@10
|
259 vtrn.8 d0, d2
|
yading@10
|
260
|
yading@10
|
261 sub r0, r0, r1, lsl #3
|
yading@10
|
262 vst1.32 {d18[0]}, [r0], r1
|
yading@10
|
263 vst1.32 {d16[0]}, [r0], r1
|
yading@10
|
264 vst1.32 {d0[0]}, [r0], r1
|
yading@10
|
265 vst1.32 {d2[0]}, [r0], r1
|
yading@10
|
266 vst1.32 {d18[1]}, [r0], r1
|
yading@10
|
267 vst1.32 {d16[1]}, [r0], r1
|
yading@10
|
268 vst1.32 {d0[1]}, [r0], r1
|
yading@10
|
269 vst1.32 {d2[1]}, [r0], r1
|
yading@10
|
270
|
yading@10
|
271 bx lr
|
yading@10
|
272 endfunc
|
yading@10
|
273
|
yading@10
|
274 @ Biweighted prediction
|
yading@10
|
275
|
yading@10
|
276 .macro biweight_16 macs, macd
|
yading@10
|
277 vdup.8 d0, r4
|
yading@10
|
278 vdup.8 d1, r5
|
yading@10
|
279 vmov q2, q8
|
yading@10
|
280 vmov q3, q8
|
yading@10
|
281 1: subs r3, r3, #2
|
yading@10
|
282 vld1.8 {d20-d21},[r0,:128], r2
|
yading@10
|
283 \macd q2, d0, d20
|
yading@10
|
284 pld [r0]
|
yading@10
|
285 \macd q3, d0, d21
|
yading@10
|
286 vld1.8 {d22-d23},[r1,:128], r2
|
yading@10
|
287 \macs q2, d1, d22
|
yading@10
|
288 pld [r1]
|
yading@10
|
289 \macs q3, d1, d23
|
yading@10
|
290 vmov q12, q8
|
yading@10
|
291 vld1.8 {d28-d29},[r0,:128], r2
|
yading@10
|
292 vmov q13, q8
|
yading@10
|
293 \macd q12, d0, d28
|
yading@10
|
294 pld [r0]
|
yading@10
|
295 \macd q13, d0, d29
|
yading@10
|
296 vld1.8 {d30-d31},[r1,:128], r2
|
yading@10
|
297 \macs q12, d1, d30
|
yading@10
|
298 pld [r1]
|
yading@10
|
299 \macs q13, d1, d31
|
yading@10
|
300 vshl.s16 q2, q2, q9
|
yading@10
|
301 vshl.s16 q3, q3, q9
|
yading@10
|
302 vqmovun.s16 d4, q2
|
yading@10
|
303 vqmovun.s16 d5, q3
|
yading@10
|
304 vshl.s16 q12, q12, q9
|
yading@10
|
305 vshl.s16 q13, q13, q9
|
yading@10
|
306 vqmovun.s16 d24, q12
|
yading@10
|
307 vqmovun.s16 d25, q13
|
yading@10
|
308 vmov q3, q8
|
yading@10
|
309 vst1.8 {d4- d5}, [r6,:128], r2
|
yading@10
|
310 vmov q2, q8
|
yading@10
|
311 vst1.8 {d24-d25},[r6,:128], r2
|
yading@10
|
312 bne 1b
|
yading@10
|
313 pop {r4-r6, pc}
|
yading@10
|
314 .endm
|
yading@10
|
315
|
yading@10
|
316 .macro biweight_8 macs, macd
|
yading@10
|
317 vdup.8 d0, r4
|
yading@10
|
318 vdup.8 d1, r5
|
yading@10
|
319 vmov q1, q8
|
yading@10
|
320 vmov q10, q8
|
yading@10
|
321 1: subs r3, r3, #2
|
yading@10
|
322 vld1.8 {d4},[r0,:64], r2
|
yading@10
|
323 \macd q1, d0, d4
|
yading@10
|
324 pld [r0]
|
yading@10
|
325 vld1.8 {d5},[r1,:64], r2
|
yading@10
|
326 \macs q1, d1, d5
|
yading@10
|
327 pld [r1]
|
yading@10
|
328 vld1.8 {d6},[r0,:64], r2
|
yading@10
|
329 \macd q10, d0, d6
|
yading@10
|
330 pld [r0]
|
yading@10
|
331 vld1.8 {d7},[r1,:64], r2
|
yading@10
|
332 \macs q10, d1, d7
|
yading@10
|
333 pld [r1]
|
yading@10
|
334 vshl.s16 q1, q1, q9
|
yading@10
|
335 vqmovun.s16 d2, q1
|
yading@10
|
336 vshl.s16 q10, q10, q9
|
yading@10
|
337 vqmovun.s16 d4, q10
|
yading@10
|
338 vmov q10, q8
|
yading@10
|
339 vst1.8 {d2},[r6,:64], r2
|
yading@10
|
340 vmov q1, q8
|
yading@10
|
341 vst1.8 {d4},[r6,:64], r2
|
yading@10
|
342 bne 1b
|
yading@10
|
343 pop {r4-r6, pc}
|
yading@10
|
344 .endm
|
yading@10
|
345
|
yading@10
|
346 .macro biweight_4 macs, macd
|
yading@10
|
347 vdup.8 d0, r4
|
yading@10
|
348 vdup.8 d1, r5
|
yading@10
|
349 vmov q1, q8
|
yading@10
|
350 vmov q10, q8
|
yading@10
|
351 1: subs r3, r3, #4
|
yading@10
|
352 vld1.32 {d4[0]},[r0,:32], r2
|
yading@10
|
353 vld1.32 {d4[1]},[r0,:32], r2
|
yading@10
|
354 \macd q1, d0, d4
|
yading@10
|
355 pld [r0]
|
yading@10
|
356 vld1.32 {d5[0]},[r1,:32], r2
|
yading@10
|
357 vld1.32 {d5[1]},[r1,:32], r2
|
yading@10
|
358 \macs q1, d1, d5
|
yading@10
|
359 pld [r1]
|
yading@10
|
360 blt 2f
|
yading@10
|
361 vld1.32 {d6[0]},[r0,:32], r2
|
yading@10
|
362 vld1.32 {d6[1]},[r0,:32], r2
|
yading@10
|
363 \macd q10, d0, d6
|
yading@10
|
364 pld [r0]
|
yading@10
|
365 vld1.32 {d7[0]},[r1,:32], r2
|
yading@10
|
366 vld1.32 {d7[1]},[r1,:32], r2
|
yading@10
|
367 \macs q10, d1, d7
|
yading@10
|
368 pld [r1]
|
yading@10
|
369 vshl.s16 q1, q1, q9
|
yading@10
|
370 vqmovun.s16 d2, q1
|
yading@10
|
371 vshl.s16 q10, q10, q9
|
yading@10
|
372 vqmovun.s16 d4, q10
|
yading@10
|
373 vmov q10, q8
|
yading@10
|
374 vst1.32 {d2[0]},[r6,:32], r2
|
yading@10
|
375 vst1.32 {d2[1]},[r6,:32], r2
|
yading@10
|
376 vmov q1, q8
|
yading@10
|
377 vst1.32 {d4[0]},[r6,:32], r2
|
yading@10
|
378 vst1.32 {d4[1]},[r6,:32], r2
|
yading@10
|
379 bne 1b
|
yading@10
|
380 pop {r4-r6, pc}
|
yading@10
|
381 2: vshl.s16 q1, q1, q9
|
yading@10
|
382 vqmovun.s16 d2, q1
|
yading@10
|
383 vst1.32 {d2[0]},[r6,:32], r2
|
yading@10
|
384 vst1.32 {d2[1]},[r6,:32], r2
|
yading@10
|
385 pop {r4-r6, pc}
|
yading@10
|
386 .endm
|
yading@10
|
387
|
yading@10
|
388 .macro biweight_func w
|
yading@10
|
389 function ff_biweight_h264_pixels_\w\()_neon, export=1
|
yading@10
|
390 push {r4-r6, lr}
|
yading@10
|
391 ldr r12, [sp, #16]
|
yading@10
|
392 add r4, sp, #20
|
yading@10
|
393 ldm r4, {r4-r6}
|
yading@10
|
394 lsr lr, r4, #31
|
yading@10
|
395 add r6, r6, #1
|
yading@10
|
396 eors lr, lr, r5, lsr #30
|
yading@10
|
397 orr r6, r6, #1
|
yading@10
|
398 vdup.16 q9, r12
|
yading@10
|
399 lsl r6, r6, r12
|
yading@10
|
400 vmvn q9, q9
|
yading@10
|
401 vdup.16 q8, r6
|
yading@10
|
402 mov r6, r0
|
yading@10
|
403 beq 10f
|
yading@10
|
404 subs lr, lr, #1
|
yading@10
|
405 beq 20f
|
yading@10
|
406 subs lr, lr, #1
|
yading@10
|
407 beq 30f
|
yading@10
|
408 b 40f
|
yading@10
|
409 10: biweight_\w vmlal.u8, vmlal.u8
|
yading@10
|
410 20: rsb r4, r4, #0
|
yading@10
|
411 biweight_\w vmlal.u8, vmlsl.u8
|
yading@10
|
412 30: rsb r4, r4, #0
|
yading@10
|
413 rsb r5, r5, #0
|
yading@10
|
414 biweight_\w vmlsl.u8, vmlsl.u8
|
yading@10
|
415 40: rsb r5, r5, #0
|
yading@10
|
416 biweight_\w vmlsl.u8, vmlal.u8
|
yading@10
|
417 endfunc
|
yading@10
|
418 .endm
|
yading@10
|
419
|
yading@10
|
420 biweight_func 16
|
yading@10
|
421 biweight_func 8
|
yading@10
|
422 biweight_func 4
|
yading@10
|
423
|
yading@10
|
424 @ Weighted prediction
|
yading@10
|
425
|
yading@10
|
426 .macro weight_16 add
|
yading@10
|
427 vdup.8 d0, r12
|
yading@10
|
428 1: subs r2, r2, #2
|
yading@10
|
429 vld1.8 {d20-d21},[r0,:128], r1
|
yading@10
|
430 vmull.u8 q2, d0, d20
|
yading@10
|
431 pld [r0]
|
yading@10
|
432 vmull.u8 q3, d0, d21
|
yading@10
|
433 vld1.8 {d28-d29},[r0,:128], r1
|
yading@10
|
434 vmull.u8 q12, d0, d28
|
yading@10
|
435 pld [r0]
|
yading@10
|
436 vmull.u8 q13, d0, d29
|
yading@10
|
437 \add q2, q8, q2
|
yading@10
|
438 vrshl.s16 q2, q2, q9
|
yading@10
|
439 \add q3, q8, q3
|
yading@10
|
440 vrshl.s16 q3, q3, q9
|
yading@10
|
441 vqmovun.s16 d4, q2
|
yading@10
|
442 vqmovun.s16 d5, q3
|
yading@10
|
443 \add q12, q8, q12
|
yading@10
|
444 vrshl.s16 q12, q12, q9
|
yading@10
|
445 \add q13, q8, q13
|
yading@10
|
446 vrshl.s16 q13, q13, q9
|
yading@10
|
447 vqmovun.s16 d24, q12
|
yading@10
|
448 vqmovun.s16 d25, q13
|
yading@10
|
449 vst1.8 {d4- d5}, [r4,:128], r1
|
yading@10
|
450 vst1.8 {d24-d25},[r4,:128], r1
|
yading@10
|
451 bne 1b
|
yading@10
|
452 pop {r4, pc}
|
yading@10
|
453 .endm
|
yading@10
|
454
|
yading@10
|
455 .macro weight_8 add
|
yading@10
|
456 vdup.8 d0, r12
|
yading@10
|
457 1: subs r2, r2, #2
|
yading@10
|
458 vld1.8 {d4},[r0,:64], r1
|
yading@10
|
459 vmull.u8 q1, d0, d4
|
yading@10
|
460 pld [r0]
|
yading@10
|
461 vld1.8 {d6},[r0,:64], r1
|
yading@10
|
462 vmull.u8 q10, d0, d6
|
yading@10
|
463 \add q1, q8, q1
|
yading@10
|
464 pld [r0]
|
yading@10
|
465 vrshl.s16 q1, q1, q9
|
yading@10
|
466 vqmovun.s16 d2, q1
|
yading@10
|
467 \add q10, q8, q10
|
yading@10
|
468 vrshl.s16 q10, q10, q9
|
yading@10
|
469 vqmovun.s16 d4, q10
|
yading@10
|
470 vst1.8 {d2},[r4,:64], r1
|
yading@10
|
471 vst1.8 {d4},[r4,:64], r1
|
yading@10
|
472 bne 1b
|
yading@10
|
473 pop {r4, pc}
|
yading@10
|
474 .endm
|
yading@10
|
475
|
yading@10
|
476 .macro weight_4 add
|
yading@10
|
477 vdup.8 d0, r12
|
yading@10
|
478 vmov q1, q8
|
yading@10
|
479 vmov q10, q8
|
yading@10
|
480 1: subs r2, r2, #4
|
yading@10
|
481 vld1.32 {d4[0]},[r0,:32], r1
|
yading@10
|
482 vld1.32 {d4[1]},[r0,:32], r1
|
yading@10
|
483 vmull.u8 q1, d0, d4
|
yading@10
|
484 pld [r0]
|
yading@10
|
485 blt 2f
|
yading@10
|
486 vld1.32 {d6[0]},[r0,:32], r1
|
yading@10
|
487 vld1.32 {d6[1]},[r0,:32], r1
|
yading@10
|
488 vmull.u8 q10, d0, d6
|
yading@10
|
489 pld [r0]
|
yading@10
|
490 \add q1, q8, q1
|
yading@10
|
491 vrshl.s16 q1, q1, q9
|
yading@10
|
492 vqmovun.s16 d2, q1
|
yading@10
|
493 \add q10, q8, q10
|
yading@10
|
494 vrshl.s16 q10, q10, q9
|
yading@10
|
495 vqmovun.s16 d4, q10
|
yading@10
|
496 vmov q10, q8
|
yading@10
|
497 vst1.32 {d2[0]},[r4,:32], r1
|
yading@10
|
498 vst1.32 {d2[1]},[r4,:32], r1
|
yading@10
|
499 vmov q1, q8
|
yading@10
|
500 vst1.32 {d4[0]},[r4,:32], r1
|
yading@10
|
501 vst1.32 {d4[1]},[r4,:32], r1
|
yading@10
|
502 bne 1b
|
yading@10
|
503 pop {r4, pc}
|
yading@10
|
504 2: \add q1, q8, q1
|
yading@10
|
505 vrshl.s16 q1, q1, q9
|
yading@10
|
506 vqmovun.s16 d2, q1
|
yading@10
|
507 vst1.32 {d2[0]},[r4,:32], r1
|
yading@10
|
508 vst1.32 {d2[1]},[r4,:32], r1
|
yading@10
|
509 pop {r4, pc}
|
yading@10
|
510 .endm
|
yading@10
|
511
|
yading@10
|
512 .macro weight_func w
|
yading@10
|
513 function ff_weight_h264_pixels_\w\()_neon, export=1
|
yading@10
|
514 push {r4, lr}
|
yading@10
|
515 ldr r12, [sp, #8]
|
yading@10
|
516 ldr r4, [sp, #12]
|
yading@10
|
517 cmp r3, #1
|
yading@10
|
518 lsl r4, r4, r3
|
yading@10
|
519 vdup.16 q8, r4
|
yading@10
|
520 mov r4, r0
|
yading@10
|
521 ble 20f
|
yading@10
|
522 rsb lr, r3, #1
|
yading@10
|
523 vdup.16 q9, lr
|
yading@10
|
524 cmp r12, #0
|
yading@10
|
525 blt 10f
|
yading@10
|
526 weight_\w vhadd.s16
|
yading@10
|
527 10: rsb r12, r12, #0
|
yading@10
|
528 weight_\w vhsub.s16
|
yading@10
|
529 20: rsb lr, r3, #0
|
yading@10
|
530 vdup.16 q9, lr
|
yading@10
|
531 cmp r12, #0
|
yading@10
|
532 blt 10f
|
yading@10
|
533 weight_\w vadd.s16
|
yading@10
|
534 10: rsb r12, r12, #0
|
yading@10
|
535 weight_\w vsub.s16
|
yading@10
|
536 endfunc
|
yading@10
|
537 .endm
|
yading@10
|
538
|
yading@10
|
539 weight_func 16
|
yading@10
|
540 weight_func 8
|
yading@10
|
541 weight_func 4
|