annotate ffmpeg/libavcodec/arm/vp8dsp_armv6.S @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * VP8 ARMv6 optimisations
yading@10 3 *
yading@10 4 * Copyright (c) 2010 Google Inc.
yading@10 5 * Copyright (c) 2010 Rob Clark <rob@ti.com>
yading@10 6 * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
yading@10 7 *
yading@10 8 * This file is part of Libav.
yading@10 9 *
yading@10 10 * Libav is free software; you can redistribute it and/or
yading@10 11 * modify it under the terms of the GNU Lesser General Public
yading@10 12 * License as published by the Free Software Foundation; either
yading@10 13 * version 2.1 of the License, or (at your option) any later version.
yading@10 14 *
yading@10 15 * Libav is distributed in the hope that it will be useful,
yading@10 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 18 * Lesser General Public License for more details.
yading@10 19 *
yading@10 20 * You should have received a copy of the GNU Lesser General Public
yading@10 21 * License along with Libav; if not, write to the Free Software
yading@10 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 23 *
yading@10 24 * This code was partially ported from libvpx, which uses this license:
yading@10 25 *
yading@10 26 * Redistribution and use in source and binary forms, with or without
yading@10 27 * modification, are permitted provided that the following conditions are
yading@10 28 * met:
yading@10 29 *
yading@10 30 * * Redistributions of source code must retain the above copyright
yading@10 31 * notice, this list of conditions and the following disclaimer.
yading@10 32 *
yading@10 33 * * Redistributions in binary form must reproduce the above copyright
yading@10 34 * notice, this list of conditions and the following disclaimer in
yading@10 35 * the documentation and/or other materials provided with the
yading@10 36 * distribution.
yading@10 37 *
yading@10 38 * * Neither the name of Google nor the names of its contributors may
yading@10 39 * be used to endorse or promote products derived from this software
yading@10 40 * without specific prior written permission.
yading@10 41 *
yading@10 42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
yading@10 43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
yading@10 44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
yading@10 45 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
yading@10 46 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
yading@10 47 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
yading@10 48 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
yading@10 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
yading@10 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
yading@10 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
yading@10 52 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
yading@10 53 */
yading@10 54
yading@10 55 #include "libavutil/arm/asm.S"
yading@10 56
yading@10 57 @ idct
yading@10 58
yading@10 59 @ void vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
yading@10 60 function ff_vp8_luma_dc_wht_armv6, export=1
yading@10 61 push {r4-r10, lr}
yading@10 62
yading@10 63 ldm r1, {r2-r9}
yading@10 64 mov r10, #0
yading@10 65 mov lr, #0
yading@10 66 uadd16 r12, r2, r8 @ t0[0,1]
yading@10 67 usub16 r2, r2, r8 @ t3[0,1]
yading@10 68 stm r1!, {r10, lr}
yading@10 69 uadd16 r8, r4, r6 @ t1[0,1]
yading@10 70 usub16 r4, r4, r6 @ t2[0,1]
yading@10 71 stm r1!, {r10, lr}
yading@10 72 uadd16 r6, r12, r8 @ dc0[0,1]
yading@10 73 usub16 r12, r12, r8 @ dc2[0,1]
yading@10 74 stm r1!, {r10, lr}
yading@10 75 uadd16 r8, r2, r4 @ dc1[0,1]
yading@10 76 usub16 r2, r2, r4 @ dc3[0,1]
yading@10 77 stm r1!, {r10, lr}
yading@10 78
yading@10 79 uadd16 lr, r3, r9 @ t0[2,3]
yading@10 80 usub16 r3, r3, r9 @ t3[2,3]
yading@10 81 uadd16 r9, r5, r7 @ t1[2,3]
yading@10 82 usub16 r5, r5, r7 @ t2[2,3]
yading@10 83
yading@10 84 uadd16 r7, lr, r9 @ dc0[2,3]
yading@10 85 usub16 lr, lr, r9 @ dc2[2,3]
yading@10 86 uadd16 r9, r3, r5 @ dc1[2,3]
yading@10 87 usub16 r3, r3, r5 @ dc3[2,3]
yading@10 88
yading@10 89 mov r1, #3
yading@10 90 orr r1, r1, #0x30000 @ 3 | 3 (round)
yading@10 91
yading@10 92 pkhbt r4, r6, r8, lsl #16 @ dc{0,1}[0]
yading@10 93 pkhtb r6, r8, r6, asr #16 @ dc{0,1}[1]
yading@10 94 pkhbt r5, r12, r2, lsl #16 @ dc{2,3}[0]
yading@10 95 pkhtb r12, r2, r12, asr #16 @ dc{2,3}[1]
yading@10 96 pkhbt r8, r7, r9, lsl #16 @ dc{0,1}[2]
yading@10 97 uadd16 r4, r4, r1
yading@10 98 uadd16 r5, r5, r1
yading@10 99 pkhtb r7, r9, r7, asr #16 @ dc{0,1}[3]
yading@10 100 pkhbt r2, lr, r3, lsl #16 @ dc{2,3}[2]
yading@10 101 pkhtb lr, r3, lr, asr #16 @ dc{2,3}[3]
yading@10 102
yading@10 103 uadd16 r9, r4, r7 @ t0[0,1]
yading@10 104 uadd16 r3, r5, lr @ t0[2,3]
yading@10 105 usub16 r4, r4, r7 @ t3[0,1]
yading@10 106 usub16 r5, r5, lr @ t3[2,3]
yading@10 107 uadd16 r7, r6, r8 @ t1[0,1]
yading@10 108 uadd16 lr, r12, r2 @ t1[2,3]
yading@10 109 usub16 r6, r6, r8 @ t2[0,1]
yading@10 110 usub16 r12, r12, r2 @ t2[2,3]
yading@10 111
yading@10 112 uadd16 r8, r9, r7 @ block[0,1][0]
yading@10 113 uadd16 r2, r3, lr @ block[2,3][0]
yading@10 114 usub16 r9, r9, r7 @ block[0,1][2]
yading@10 115 usub16 r3, r3, lr @ block[2,3][2]
yading@10 116 uadd16 r7, r4, r6 @ block[0,1][1]
yading@10 117 uadd16 lr, r5, r12 @ block[2,3][1]
yading@10 118 usub16 r4, r4, r6 @ block[0,1][3]
yading@10 119 usub16 r5, r5, r12 @ block[2,3][3]
yading@10 120
yading@10 121 #if HAVE_ARMV6T2_EXTERNAL
yading@10 122 sbfx r6, r8, #3, #13
yading@10 123 sbfx r12, r7, #3, #13
yading@10 124 sbfx r1, r9, #3, #13
yading@10 125 sbfx r10, r4, #3, #13
yading@10 126 #else
yading@10 127 sxth r6, r8
yading@10 128 sxth r12, r7
yading@10 129 sxth r1, r9
yading@10 130 sxth r10, r4
yading@10 131 asr r6, #3 @ block[0][0]
yading@10 132 asr r12, #3 @ block[0][1]
yading@10 133 asr r1, #3 @ block[0][2]
yading@10 134 asr r10, #3 @ block[0][3]
yading@10 135 #endif
yading@10 136
yading@10 137 strh r6, [r0], #32
yading@10 138 asr r8, r8, #19 @ block[1][0]
yading@10 139 strh r12, [r0], #32
yading@10 140 asr r7, r7, #19 @ block[1][1]
yading@10 141 strh r1, [r0], #32
yading@10 142 asr r9, r9, #19 @ block[1][2]
yading@10 143 strh r10, [r0], #32
yading@10 144 asr r4, r4, #19 @ block[1][3]
yading@10 145 strh r8, [r0], #32
yading@10 146 asr r6, r2, #19 @ block[3][0]
yading@10 147 strh r7, [r0], #32
yading@10 148 asr r12, lr, #19 @ block[3][1]
yading@10 149 strh r9, [r0], #32
yading@10 150 asr r1, r3, #19 @ block[3][2]
yading@10 151 strh r4, [r0], #32
yading@10 152 asr r10, r5, #19 @ block[3][3]
yading@10 153
yading@10 154 #if HAVE_ARMV6T2_EXTERNAL
yading@10 155 sbfx r2, r2, #3, #13
yading@10 156 sbfx lr, lr, #3, #13
yading@10 157 sbfx r3, r3, #3, #13
yading@10 158 sbfx r5, r5, #3, #13
yading@10 159 #else
yading@10 160 sxth r2, r2
yading@10 161 sxth lr, lr
yading@10 162 sxth r3, r3
yading@10 163 sxth r5, r5
yading@10 164 asr r2, #3 @ block[2][0]
yading@10 165 asr lr, #3 @ block[2][1]
yading@10 166 asr r3, #3 @ block[2][2]
yading@10 167 asr r5, #3 @ block[2][3]
yading@10 168 #endif
yading@10 169
yading@10 170 strh r2, [r0], #32
yading@10 171 strh lr, [r0], #32
yading@10 172 strh r3, [r0], #32
yading@10 173 strh r5, [r0], #32
yading@10 174 strh r6, [r0], #32
yading@10 175 strh r12, [r0], #32
yading@10 176 strh r1, [r0], #32
yading@10 177 strh r10, [r0], #32
yading@10 178
yading@10 179 pop {r4-r10, pc}
yading@10 180 endfunc
yading@10 181
yading@10 182 @ void vp8_luma_dc_wht_dc(int16_t block[4][4][16], int16_t dc[16])
yading@10 183 function ff_vp8_luma_dc_wht_dc_armv6, export=1
yading@10 184 ldrsh r2, [r1]
yading@10 185 mov r3, #0
yading@10 186 add r2, r2, #3
yading@10 187 strh r3, [r1]
yading@10 188 asr r2, r2, #3
yading@10 189 .rept 16
yading@10 190 strh r2, [r0], #32
yading@10 191 .endr
yading@10 192 bx lr
yading@10 193 endfunc
yading@10 194
yading@10 195 @ void vp8_idct_add(uint8_t *dst, int16_t block[16], int stride)
yading@10 196 function ff_vp8_idct_add_armv6, export=1
yading@10 197 push {r4-r12, lr}
yading@10 198 sub sp, sp, #32
yading@10 199
yading@10 200 movw r3, #20091 @ cospi8sqrt2minus1
yading@10 201 movw r4, #35468 @ sinpi8sqrt2
yading@10 202 mov r5, sp
yading@10 203 1:
yading@10 204 ldr r6, [r1, #8] @ i5 | i4 = block1[1] | block1[0]
yading@10 205 ldr lr, [r1, #16] @ i9 | i8 = block2[1] | block2[0]
yading@10 206 ldr r12, [r1, #24] @ i13 | i12 = block3[1] | block3[0]
yading@10 207
yading@10 208 smulwt r9, r3, r6 @ ip[5] * cospi8sqrt2minus1
yading@10 209 smulwb r7, r3, r6 @ ip[4] * cospi8sqrt2minus1
yading@10 210 smulwt r10, r4, r6 @ ip[5] * sinpi8sqrt2
yading@10 211 smulwb r8, r4, r6 @ ip[4] * sinpi8sqrt2
yading@10 212 pkhbt r7, r7, r9, lsl #16 @ 5c | 4c
yading@10 213 smulwt r11, r3, r12 @ ip[13] * cospi8sqrt2minus1
yading@10 214 pkhbt r8, r8, r10, lsl #16 @ 5s | 4s = t2 first half
yading@10 215 uadd16 r6, r6, r7 @ 5c+5 | 4c+4 = t3 first half
yading@10 216 smulwb r9, r3, r12 @ ip[12] * cospi8sqrt2minus1
yading@10 217 smulwt r7, r4, r12 @ ip[13] * sinpi8sqrt2
yading@10 218 smulwb r10, r4, r12 @ ip[12] * sinpi8sqrt2
yading@10 219
yading@10 220 pkhbt r9, r9, r11, lsl #16 @ 13c | 12c
yading@10 221 ldr r11, [r1] @ i1 | i0
yading@10 222 pkhbt r10, r10, r7, lsl #16 @ 13s | 12s = t3 second half
yading@10 223 uadd16 r7, r12, r9 @ 13c+13 | 12c+12 = t2 2nd half
yading@10 224 uadd16 r6, r6, r10 @ d = t3
yading@10 225 uadd16 r10, r11, lr @ a = t0
yading@10 226 usub16 r7, r8, r7 @ c = t2
yading@10 227 usub16 r8, r11, lr @ b = t1
yading@10 228 uadd16 r9, r10, r6 @ a+d = tmp{0,1}[0]
yading@10 229 usub16 r10, r10, r6 @ a-d = tmp{0,1}[3]
yading@10 230 uadd16 r6, r8, r7 @ b+c = tmp{0,1}[1]
yading@10 231 usub16 r7, r8, r7 @ b-c = tmp{0,1}[2]
yading@10 232 mov r8, #0
yading@10 233 cmp sp, r5
yading@10 234 str r6, [r5, #8] @ o5 | o4
yading@10 235 str r7, [r5, #16] @ o9 | o8
yading@10 236 str r10, [r5, #24] @ o13 | o12
yading@10 237 str r9, [r5], #4 @ o1 | o0
yading@10 238 str r8, [r1, #8]
yading@10 239 str r8, [r1, #16]
yading@10 240 str r8, [r1, #24]
yading@10 241 str r8, [r1], #4
yading@10 242 beq 1b
yading@10 243
yading@10 244 mov r5, #2
yading@10 245 2:
yading@10 246 pop {r1, r6, r12, lr}
yading@10 247 smulwt r9, r3, r12 @ ip[5] * cospi8sqrt2minus1
yading@10 248 smulwt r7, r3, r1 @ ip[1] * cospi8sqrt2minus1
yading@10 249 smulwt r10, r4, r12 @ ip[5] * sinpi8sqrt2
yading@10 250 smulwt r8, r4, r1 @ ip[1] * sinpi8sqrt2
yading@10 251 pkhbt r11, r1, r12, lsl #16 @ i4 | i0 = t0/t1 first half
yading@10 252 pkhtb r1, r12, r1, asr #16 @ i5 | i1
yading@10 253 pkhbt r7, r7, r9, lsl #16 @ 5c | 1c
yading@10 254 pkhbt r8, r8, r10, lsl #16 @ 5s | 1s = t2 first half
yading@10 255 pkhbt r9, r6, lr, lsl #16 @ i6 | i2 = t0/t1 second half
yading@10 256 pkhtb r12, lr, r6, asr #16 @ i7 | i3
yading@10 257 uadd16 r1, r7, r1 @ 5c+5 | 1c+1 = t3 first half
yading@10 258 uadd16 r10, r11, r9 @ a = t0
yading@10 259 usub16 r9, r11, r9 @ b = t1
yading@10 260 smulwt r7, r3, r12 @ ip[7] * cospi8sqrt2minus1
yading@10 261 smulwb lr, r3, r12 @ ip[3] * cospi8sqrt2minus1
yading@10 262 smulwt r11, r4, r12 @ ip[7] * sinpi8sqrt2
yading@10 263 smulwb r6, r4, r12 @ ip[3] * sinpi8sqrt2
yading@10 264 subs r5, r5, #1
yading@10 265 pkhbt r7, lr, r7, lsl #16 @ 7c | 3c
yading@10 266 pkhbt r11, r6, r11, lsl #16 @ 7s | 3s = t3 second half
yading@10 267 mov r6, #0x4
yading@10 268 orr r6, r6, #0x40000
yading@10 269 uadd16 r12, r7, r12 @ 7c+7 | 3c+3 = t2 second half
yading@10 270 uadd16 r10, r10, r6 @ t0 + 4
yading@10 271 uadd16 r9, r9, r6 @ t1 + 4
yading@10 272 usub16 lr, r8, r12 @ c (o5 | o1) = t2
yading@10 273 uadd16 r12, r11, r1 @ d (o7 | o3) = t3
yading@10 274 usub16 r1, r9, lr @ b-c = dst{0,1}[2]
yading@10 275 uadd16 r7, r10, r12 @ a+d = dst{0,1}[0]
yading@10 276 usub16 r12, r10, r12 @ a-d = dst{0,1}[3]
yading@10 277 uadd16 r10, r9, lr @ b+c = dst{0,1}[1]
yading@10 278
yading@10 279 asr lr, r1, #3 @ o[1][2]
yading@10 280 asr r9, r12, #3 @ o[1][3]
yading@10 281 pkhtb r8, lr, r7, asr #19 @ o[1][0,2]
yading@10 282 pkhtb r11, r9, r10, asr #19 @ o[1][1,3]
yading@10 283 ldr lr, [r0]
yading@10 284 sxth r12, r12
yading@10 285 ldr r9, [r0, r2]
yading@10 286 sxth r1, r1
yading@10 287 #if HAVE_ARMV6T2_EXTERNAL
yading@10 288 sbfx r7, r7, #3, #13
yading@10 289 sbfx r10, r10, #3, #13
yading@10 290 #else
yading@10 291 sxth r7, r7
yading@10 292 sxth r10, r10
yading@10 293 asr r7, #3 @ o[0][0]
yading@10 294 asr r10, #3 @ o[0][1]
yading@10 295 #endif
yading@10 296 pkhbt r7, r7, r1, lsl #13 @ o[0][0,2]
yading@10 297 pkhbt r10, r10, r12, lsl #13 @ o[0][1,3]
yading@10 298
yading@10 299 uxtab16 r7, r7, lr
yading@10 300 uxtab16 r10, r10, lr, ror #8
yading@10 301 uxtab16 r8, r8, r9
yading@10 302 uxtab16 r11, r11, r9, ror #8
yading@10 303 usat16 r7, #8, r7
yading@10 304 usat16 r10, #8, r10
yading@10 305 usat16 r8, #8, r8
yading@10 306 usat16 r11, #8, r11
yading@10 307 orr r7, r7, r10, lsl #8
yading@10 308 orr r8, r8, r11, lsl #8
yading@10 309 str r8, [r0, r2]
yading@10 310 str_post r7, r0, r2, lsl #1
yading@10 311
yading@10 312 bne 2b
yading@10 313
yading@10 314 pop {r4-r12, pc}
yading@10 315 endfunc
yading@10 316
yading@10 317 @ void vp8_idct_dc_add(uint8_t *dst, int16_t block[16], int stride)
yading@10 318 function ff_vp8_idct_dc_add_armv6, export=1
yading@10 319 push {r4-r6, lr}
yading@10 320 add r6, r0, r2, lsl #1
yading@10 321 ldrsh r3, [r1]
yading@10 322 mov r4, #0
yading@10 323 add r3, r3, #4
yading@10 324 strh r4, [r1], #32
yading@10 325 asr r3, #3
yading@10 326 ldr r5, [r0]
yading@10 327 ldr r4, [r0, r2]
yading@10 328 pkhbt r3, r3, r3, lsl #16
yading@10 329 uxtab16 lr, r3, r5 @ a1+2 | a1+0
yading@10 330 uxtab16 r5, r3, r5, ror #8 @ a1+3 | a1+1
yading@10 331 uxtab16 r12, r3, r4
yading@10 332 uxtab16 r4, r3, r4, ror #8
yading@10 333 usat16 lr, #8, lr
yading@10 334 usat16 r5, #8, r5
yading@10 335 usat16 r12, #8, r12
yading@10 336 usat16 r4, #8, r4
yading@10 337 orr lr, lr, r5, lsl #8
yading@10 338 ldr r5, [r6]
yading@10 339 orr r12, r12, r4, lsl #8
yading@10 340 ldr r4, [r6, r2]
yading@10 341 str lr, [r0]
yading@10 342 uxtab16 lr, r3, r5
yading@10 343 str r12, [r0, r2]
yading@10 344 uxtab16 r5, r3, r5, ror #8
yading@10 345 uxtab16 r12, r3, r4
yading@10 346 uxtab16 r4, r3, r4, ror #8
yading@10 347 usat16 lr, #8, lr
yading@10 348 usat16 r5, #8, r5
yading@10 349 usat16 r12, #8, r12
yading@10 350 usat16 r4, #8, r4
yading@10 351 orr lr, lr, r5, lsl #8
yading@10 352 orr r12, r12, r4, lsl #8
yading@10 353 str lr, [r6]
yading@10 354 str r12, [r6, r2]
yading@10 355 pop {r4-r6, pc}
yading@10 356 endfunc
yading@10 357
yading@10 358 @ void vp8_idct_dc_add4uv(uint8_t *dst, int16_t block[4][16], int stride)
yading@10 359 function ff_vp8_idct_dc_add4uv_armv6, export=1
yading@10 360 push {r4, lr}
yading@10 361
yading@10 362 bl ff_vp8_idct_dc_add_armv6
yading@10 363 add r0, r0, #4
yading@10 364 bl ff_vp8_idct_dc_add_armv6
yading@10 365 add r0, r0, r2, lsl #2
yading@10 366 sub r0, r0, #4
yading@10 367 bl ff_vp8_idct_dc_add_armv6
yading@10 368 add r0, r0, #4
yading@10 369 bl ff_vp8_idct_dc_add_armv6
yading@10 370
yading@10 371 pop {r4, pc}
yading@10 372 endfunc
yading@10 373
yading@10 374 @ void vp8_idct_dc_add4y(uint8_t *dst, int16_t block[4][16], int stride)
yading@10 375 function ff_vp8_idct_dc_add4y_armv6, export=1
yading@10 376 push {r4, lr}
yading@10 377
yading@10 378 bl ff_vp8_idct_dc_add_armv6
yading@10 379 add r0, r0, #4
yading@10 380 bl ff_vp8_idct_dc_add_armv6
yading@10 381 add r0, r0, #4
yading@10 382 bl ff_vp8_idct_dc_add_armv6
yading@10 383 add r0, r0, #4
yading@10 384 bl ff_vp8_idct_dc_add_armv6
yading@10 385
yading@10 386 pop {r4, pc}
yading@10 387 endfunc
yading@10 388
yading@10 389 @ loopfilter
yading@10 390
yading@10 391 .macro transpose o3, o2, o1, o0, i0, i1, i2, i3
yading@10 392 uxtb16 \o1, \i1 @ xx 12 xx 10
yading@10 393 uxtb16 \o0, \i0 @ xx 02 xx 00
yading@10 394 uxtb16 \o3, \i3 @ xx 32 xx 30
yading@10 395 uxtb16 \o2, \i2 @ xx 22 xx 20
yading@10 396 orr \o1, \o0, \o1, lsl #8 @ 12 02 10 00
yading@10 397 orr \o3, \o2, \o3, lsl #8 @ 32 22 30 20
yading@10 398
yading@10 399 uxtb16 \i1, \i1, ror #8 @ xx 13 xx 11
yading@10 400 uxtb16 \i3, \i3, ror #8 @ xx 33 xx 31
yading@10 401 uxtb16 \i0, \i0, ror #8 @ xx 03 xx 01
yading@10 402 uxtb16 \i2, \i2, ror #8 @ xx 23 xx 21
yading@10 403 orr \i0, \i0, \i1, lsl #8 @ 13 03 11 01
yading@10 404 orr \i2, \i2, \i3, lsl #8 @ 33 23 31 21
yading@10 405
yading@10 406 pkhtb \o2, \o3, \o1, asr #16 @ 32 22 12 02
yading@10 407 pkhbt \o0, \o1, \o3, lsl #16 @ 30 20 10 00
yading@10 408 pkhtb \o3, \i2, \i0, asr #16 @ 33 23 13 03
yading@10 409 pkhbt \o1, \i0, \i2, lsl #16 @ 31 21 11 01
yading@10 410 .endm
yading@10 411
yading@10 412 .macro simple_filter
yading@10 413 uqsub8 r7, r3, r6 @ p1 - q1
yading@10 414 uqsub8 r8, r6, r3 @ q1 - p1
yading@10 415 uqsub8 r10, r4, r5 @ p0 - q0
yading@10 416 uqsub8 r9, r5, r4 @ q0 - p0
yading@10 417 orr r7, r7, r8 @ abs(p1 - q1)
yading@10 418 orr r9, r9, r10 @ abs(p0 - q0)
yading@10 419 uhadd8 r7, r7, lr @ abs(p1 - q2) >> 1
yading@10 420 uqadd8 r9, r9, r9 @ abs(p0 - q0) * 2
yading@10 421 uqadd8 r7, r7, r9 @ abs(p0 - q0)*2 + abs(p1-q1)/2
yading@10 422 mvn r8, #0
yading@10 423 usub8 r10, r12, r7 @ compare to flimit
yading@10 424 sel r10, r8, lr @ filter mask: F or 0
yading@10 425 cmp r10, #0
yading@10 426 beq 2f
yading@10 427
yading@10 428 eor r3, r3, r2 @ ps1
yading@10 429 eor r6, r6, r2 @ qs1
yading@10 430 eor r4, r4, r2 @ ps0
yading@10 431 eor r5, r5, r2 @ qs0
yading@10 432
yading@10 433 qsub8 r3, r3, r6 @ vp8_filter = p1 - q1
yading@10 434 qsub8 r6, r5, r4 @ q0 - p0
yading@10 435 qadd8 r3, r3, r6 @ += q0 - p0
yading@10 436 lsr r7, r2, #5 @ 0x04040404
yading@10 437 qadd8 r3, r3, r6 @ += q0 - p0
yading@10 438 sub r9, r7, r2, lsr #7 @ 0x03030303
yading@10 439 qadd8 r3, r3, r6 @ vp8_filter = p1-q1 + 3*(q0-p0)
yading@10 440 and r3, r3, r10 @ vp8_filter &= mask
yading@10 441
yading@10 442 qadd8 r9, r3, r9 @ Filter2 = vp8_filter + 3
yading@10 443 qadd8 r3, r3, r7 @ Filter1 = vp8_filter + 4
yading@10 444
yading@10 445 shadd8 r9, r9, lr
yading@10 446 shadd8 r3, r3, lr
yading@10 447 shadd8 r9, r9, lr
yading@10 448 shadd8 r3, r3, lr
yading@10 449 shadd8 r9, r9, lr @ Filter2 >>= 3
yading@10 450 shadd8 r3, r3, lr @ Filter1 >>= 3
yading@10 451
yading@10 452 qadd8 r4, r4, r9 @ u = p0 + Filter2
yading@10 453 qsub8 r5, r5, r3 @ u = q0 - Filter1
yading@10 454 eor r4, r4, r2 @ *op0 = u ^ 0x80
yading@10 455 eor r5, r5, r2 @ *oq0 = u ^ 0x80
yading@10 456 .endm
yading@10 457
yading@10 458 @ void vp8_v_loop_filter16_simple(uint8_t *dst, int stride, int flim)
yading@10 459 function ff_vp8_v_loop_filter16_simple_armv6, export=1
yading@10 460 push {r4-r11, lr}
yading@10 461
yading@10 462 orr r2, r2, r2, lsl #16
yading@10 463 mov r11, #4
yading@10 464 mov lr, #0
yading@10 465 orr r12, r2, r2, lsl #8
yading@10 466 mov32 r2, 0x80808080
yading@10 467 1:
yading@10 468 ldr_nreg r3, r0, r1, lsl #1 @ p1
yading@10 469 ldr_nreg r4, r0, r1 @ p0
yading@10 470 ldr r5, [r0] @ q0
yading@10 471 ldr r6, [r0, r1] @ q1
yading@10 472 simple_filter
yading@10 473 T sub r7, r0, r1
yading@10 474 str r5, [r0] @ oq0
yading@10 475 A str r4, [r0, -r1] @ op0
yading@10 476 T str r4, [r7]
yading@10 477 2:
yading@10 478 subs r11, r11, #1
yading@10 479 add r0, r0, #4
yading@10 480 bne 1b
yading@10 481
yading@10 482 pop {r4-r11, pc}
yading@10 483 endfunc
yading@10 484
yading@10 485 .macro filter_mask_p
yading@10 486 uqsub8 r6, r9, r10 @ p3 - p2
yading@10 487 uqsub8 r7, r10, r9 @ p2 - p3
yading@10 488 uqsub8 r8, r10, r11 @ p2 - p1
yading@10 489 uqsub8 r10, r11, r10 @ p1 - p2
yading@10 490 orr r6, r6, r7 @ abs(p3-p2)
yading@10 491 orr r8, r8, r10 @ abs(p2-p1)
yading@10 492 uqsub8 lr, r6, r2 @ compare to limit
yading@10 493 uqsub8 r8, r8, r2 @ compare to limit
yading@10 494 uqsub8 r6, r11, r12 @ p1 - p0
yading@10 495 orr lr, lr, r8
yading@10 496 uqsub8 r7, r12, r11 @ p0 - p1
yading@10 497 orr r6, r6, r7 @ abs(p1-p0)
yading@10 498 uqsub8 r7, r6, r2 @ compare to limit
yading@10 499 uqsub8 r8, r6, r3 @ compare to thresh
yading@10 500 orr lr, lr, r7
yading@10 501 .endm
yading@10 502
yading@10 503 .macro filter_mask_pq
yading@10 504 uqsub8 r6, r11, r10 @ p1 - q1
yading@10 505 uqsub8 r7, r10, r11 @ q1 - p1
yading@10 506 uqsub8 r11, r12, r9 @ p0 - q0
yading@10 507 uqsub8 r12, r9, r12 @ q0 - p0
yading@10 508 orr r6, r6, r7 @ abs(p1-q1)
yading@10 509 orr r12, r11, r12 @ abs(p0-q0)
yading@10 510 mov32 r7, 0x7f7f7f7f
yading@10 511 uqadd8 r12, r12, r12 @ abs(p0-q0) * 2
yading@10 512 and r6, r7, r6, lsr #1 @ abs(p1-q1) / 2
yading@10 513 uqadd8 r12, r12, r6 @ abs(p0-q0) * 2 + abs(p1-q1)/2
yading@10 514 .endm
yading@10 515
yading@10 516 .macro filter_mask_v
yading@10 517 filter_mask_p
yading@10 518
yading@10 519 ldr r10, [r0, r1] @ q1
yading@10 520 ldr_post r9, r0, r1, lsl #1 @ q0
yading@10 521
yading@10 522 filter_mask_pq
yading@10 523
yading@10 524 ldr r11, [r0] @ q2
yading@10 525
yading@10 526 uqsub8 r7, r9, r10 @ q0 - q1
yading@10 527 uqsub8 r6, r10, r9 @ q1 - q0
yading@10 528 uqsub8 r12, r12, r4 @ compare to flimit
yading@10 529 uqsub8 r9, r11, r10 @ q2 - q1
yading@10 530 uqsub8 r10, r10, r11 @ q1 - q2
yading@10 531 orr lr, lr, r12
yading@10 532 ldr r12, [r0, r1] @ q3
yading@10 533 orr r6, r7, r6 @ abs(q1-q0)
yading@10 534 orr r10, r9, r10 @ abs(q2-q1)
yading@10 535 uqsub8 r9, r12, r11 @ q3 - q2
yading@10 536 uqsub8 r11, r11, r12 @ q2 - q3
yading@10 537 uqsub8 r7, r6, r2 @ compare to limit
yading@10 538 uqsub8 r10, r10, r2 @ compare to limit
yading@10 539 uqsub8 r6, r6, r3 @ compare to thresh
yading@10 540 orr r9, r9, r11 @ abs(q3-q2)
yading@10 541 orr lr, lr, r7
yading@10 542 orr lr, lr, r10
yading@10 543 uqsub8 r9, r9, r2 @ compare to limit
yading@10 544 orr lr, lr, r9
yading@10 545
yading@10 546 mov r12, #0
yading@10 547 usub8 lr, r12, lr
yading@10 548 mvn r11, #0
yading@10 549 sel lr, r11, r12 @ filter mask
yading@10 550 sub r0, r0, r1, lsl #1
yading@10 551 .endm
yading@10 552
yading@10 553 .macro filter_mask_h
yading@10 554 transpose r12, r11, r10, r9, r6, r7, r8, lr
yading@10 555
yading@10 556 filter_mask_p
yading@10 557
yading@10 558 stm sp, {r8, r11, r12, lr}
yading@10 559 sub r0, r0, r1, lsl #2
yading@10 560 add r0, r0, #4
yading@10 561
yading@10 562 ldr r7, [r0, r1]
yading@10 563 ldr_post r6, r0, r1, lsl #1
yading@10 564 ldr lr, [r0, r1]
yading@10 565 ldr r8, [r0]
yading@10 566
yading@10 567 transpose r12, r11, r10, r9, r6, r7, r8, lr
yading@10 568
yading@10 569 uqsub8 r8, r12, r11 @ q3 - q2
yading@10 570 uqsub8 lr, r11, r12 @ q2 - q3
yading@10 571 uqsub8 r7, r9, r10 @ q0 - q1
yading@10 572 uqsub8 r6, r10, r9 @ q1 - q0
yading@10 573 uqsub8 r12, r11, r10 @ q2 - q1
yading@10 574 uqsub8 r11, r10, r11 @ q1 - q2
yading@10 575 orr r8, r8, lr @ abs(q3-q2)
yading@10 576 orr r6, r7, r6 @ abs(q1-q0)
yading@10 577 orr r11, r12, r11 @ abs(q2-q1)
yading@10 578 ldr lr, [sp, #12] @ load back (f)limit accumulator
yading@10 579 uqsub8 r8, r8, r2 @ compare to limit
yading@10 580 uqsub8 r7, r6, r2 @ compare to limit
yading@10 581 uqsub8 r11, r11, r2 @ compare to limit
yading@10 582 orr lr, lr, r8
yading@10 583 uqsub8 r8, r6, r3 @ compare to thresh
yading@10 584 orr lr, lr, r7
yading@10 585 ldr r12, [sp, #8] @ p1
yading@10 586 orr lr, lr, r11
yading@10 587
yading@10 588 ldr r11, [sp, #4] @ p0
yading@10 589
yading@10 590 filter_mask_pq
yading@10 591
yading@10 592 mov r10, #0
yading@10 593 uqsub8 r12, r12, r4 @ compare to flimit
yading@10 594 mvn r11, #0
yading@10 595 orr lr, lr, r12
yading@10 596 usub8 lr, r10, lr
yading@10 597 sel lr, r11, r10 @ filter mask
yading@10 598 .endm
yading@10 599
yading@10 600 .macro filter inner
yading@10 601 mov32 r12, 0x80808080
yading@10 602 eor r11, r7, r12 @ ps1
yading@10 603 eor r8, r8, r12 @ ps0
yading@10 604 eor r9, r9, r12 @ qs0
yading@10 605 eor r10, r10, r12 @ qs1
yading@10 606
yading@10 607 stm sp, {r8-r11}
yading@10 608
yading@10 609 qsub8 r7, r11, r10 @ vp8_signed_char_clamp(ps1-qs1)
yading@10 610 qsub8 r8, r9, r8 @ vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
yading@10 611 .if \inner
yading@10 612 and r7, r7, r6 @ vp8_filter &= hev
yading@10 613 .endif
yading@10 614 qadd8 r7, r7, r8
yading@10 615 lsr r10, r12, #5 @ 0x04040404
yading@10 616 qadd8 r7, r7, r8
yading@10 617 sub r9, r10, r12, lsr #7 @ 0x03030303
yading@10 618 qadd8 r7, r7, r8
yading@10 619
yading@10 620 and r7, r7, lr @ vp8_filter &= mask
yading@10 621 .if !\inner
yading@10 622 mov r12, r7 @ Filter2
yading@10 623 and r7, r7, r6 @ Filter2 &= hev
yading@10 624 .endif
yading@10 625 qadd8 lr, r7, r9 @ Filter2 = vp8_signed_char_clamp(vp8_filter+3)
yading@10 626 qadd8 r7, r7, r10 @ Filter1 = vp8_signed_char_clamp(vp8_filter+4)
yading@10 627
yading@10 628 mov r9, #0
yading@10 629 shadd8 lr, lr, r9 @ Filter2 >>= 3
yading@10 630 shadd8 r7, r7, r9 @ Filter1 >>= 3
yading@10 631 shadd8 lr, lr, r9
yading@10 632 shadd8 r7, r7, r9
yading@10 633 shadd8 lr, lr, r9 @ Filter2
yading@10 634 shadd8 r7, r7, r9 @ Filter1
yading@10 635 .endm
yading@10 636
yading@10 637 .macro filter_v inner
yading@10 638 orr r10, r6, r8 @ calculate vp8_hevmask
yading@10 639 ldr_nreg r7, r0, r1, lsl #1 @ p1
yading@10 640 usub8 r10, r12, r10
yading@10 641 ldr_nreg r8, r0, r1 @ p0
yading@10 642 sel r6, r12, r11 @ obtain vp8_hevmask
yading@10 643 ldr r9, [r0] @ q0
yading@10 644 ldr r10, [r0, r1] @ q1
yading@10 645 filter \inner
yading@10 646 .endm
yading@10 647
yading@10 648 .macro filter_h inner
yading@10 649 orr r9, r6, r8
yading@10 650 usub8 r9, r12, r9
yading@10 651 sel r6, r12, r11 @ hev mask
yading@10 652
yading@10 653 stm sp, {r6, lr}
yading@10 654
yading@10 655 ldr_nreg r12, r0, r1, lsl #1
yading@10 656 ldr_nreg r11, r0, r1
yading@10 657 ldr r6, [r0]
yading@10 658 ldr lr, [r0, r1]
yading@10 659
yading@10 660 transpose r10, r9, r8, r7, r12, r11, r6, lr
yading@10 661
yading@10 662 ldm sp, {r6, lr}
yading@10 663 filter \inner
yading@10 664 .endm
yading@10 665
yading@10 666 .macro filter_inner
yading@10 667 ldm sp, {r8, r9}
yading@10 668 lsr r10, r10, #2 @ 0x01010101
yading@10 669 qadd8 r8, r8, lr @ u = vp8_signed_char_clamp(ps0 + Filter2)
yading@10 670 mov lr, #0
yading@10 671 qsub8 r9, r9, r7 @ u = vp8_signed_char_clamp(qs0 - Filter1)
yading@10 672 sadd8 r7, r7, r10 @ vp8_filter += 1
yading@10 673 ldr r10, [sp, #8] @ qs1
yading@10 674 shadd8 r7, r7, lr @ vp8_filter >>= 1
yading@10 675 eor r8, r8, r12 @ *op0 = u ^ 0x80
yading@10 676 bic r7, r7, r6 @ vp8_filter &= ~hev
yading@10 677 qadd8 r11, r11, r7 @ u = vp8_signed_char_clamp(ps1 + vp8_filter)
yading@10 678 eor r9, r9, r12 @ *oq0 = u ^ 0x80
yading@10 679 qsub8 r10, r10, r7 @ u = vp8_signed_char_clamp(qs1 - vp8_filter)
yading@10 680 eor r11, r11, r12 @ *op1 = u ^ 0x80
yading@10 681 eor r10, r10, r12 @ *oq1 = u ^ 0x80
yading@10 682 .endm
yading@10 683
yading@10 684 .macro filter_x c0
yading@10 685 mov lr, \c0
yading@10 686 mov r7, #63
yading@10 687
yading@10 688 sxtb16 r6, r12
yading@10 689 sxtb16 r10, r12, ror #8
yading@10 690 smlabb r8, r6, lr, r7
yading@10 691 smlatb r6, r6, lr, r7
yading@10 692 smlabb r7, r10, lr, r7
yading@10 693 smultb r10, r10, lr
yading@10 694 ssat r8, #8, r8, asr #7
yading@10 695 ssat r6, #8, r6, asr #7
yading@10 696 add r10, r10, #63
yading@10 697 ssat r7, #8, r7, asr #7
yading@10 698 ssat r10, #8, r10, asr #7
yading@10 699
yading@10 700 pkhbt r6, r8, r6, lsl #16
yading@10 701 pkhbt r10, r7, r10, lsl #16
yading@10 702 uxtb16 r6, r6
yading@10 703 uxtb16 r10, r10
yading@10 704
yading@10 705 mov32 lr, 0x80808080
yading@10 706
yading@10 707 orr r10, r6, r10, lsl #8 @ u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
yading@10 708 qsub8 r8, r9, r10 @ s = vp8_signed_char_clamp(qs0 - u)
yading@10 709 qadd8 r10, r11, r10 @ s = vp8_signed_char_clamp(ps0 + u)
yading@10 710 eor r8, r8, lr @ *oq0 = s ^ 0x80
yading@10 711 eor r10, r10, lr @ *op0 = s ^ 0x80
yading@10 712 .endm
yading@10 713
yading@10 714 .macro filter_1
yading@10 715 ldm sp, {r8, r9}
yading@10 716 qadd8 r11, r8, lr
yading@10 717 qsub8 r9, r9, r7
yading@10 718 bic r12, r12, r6 @ vp8_filter &= ~hev
yading@10 719 filter_x #27
yading@10 720 .endm
yading@10 721
yading@10 722 .macro filter_2
yading@10 723 ldr r9, [sp, #8] @ qs1
yading@10 724 ldr r11, [sp, #12] @ ps1
yading@10 725 filter_x #18
yading@10 726 .endm
yading@10 727
yading@10 728 .macro filter_3
yading@10 729 eor r9, r9, lr
yading@10 730 eor r11, r11, lr
yading@10 731 filter_x #9
yading@10 732 .endm
yading@10 733
yading@10 734 function vp8_v_loop_filter_inner_armv6
yading@10 735 mov r5, #4
yading@10 736 sub sp, sp, #16
yading@10 737
yading@10 738 orr r2, r2, r2, lsl #16
yading@10 739 orr r3, r3, r3, lsl #16
yading@10 740 orr r6, r6, r6, lsl #16
yading@10 741 orr r4, r2, r2, lsl #8 @ flimE
yading@10 742 orr r2, r3, r3, lsl #8 @ flimI
yading@10 743 orr r3, r6, r6, lsl #8 @ thresh
yading@10 744 1:
yading@10 745 sub r0, r0, r1, lsl #2
yading@10 746 ldr r10, [r0, r1] @ p2
yading@10 747 ldr_post r9, r0, r1, lsl #1 @ p3
yading@10 748 ldr r12, [r0, r1] @ p0
yading@10 749 ldr_post r11, r0, r1, lsl #1 @ p1
yading@10 750
yading@10 751 filter_mask_v
yading@10 752 cmp lr, #0
yading@10 753 beq 2f
yading@10 754 filter_v inner=1
yading@10 755 filter_inner
yading@10 756
yading@10 757 A str r11, [r0, -r1, lsl #1] @ op1
yading@10 758 A str r8, [r0, -r1] @ op0
yading@10 759 T sub r0, r0, r1, lsl #1
yading@10 760 T str r8, [r0, r1]
yading@10 761 T str_post r11, r0, r1, lsl #1
yading@10 762 str r9, [r0] @ oq0
yading@10 763 str r10, [r0, r1] @ oq1
yading@10 764 2:
yading@10 765 add r0, r0, #4
yading@10 766 cmp r5, #3
yading@10 767 it eq
yading@10 768 ldreq r0, [sp, #16]
yading@10 769 subs r5, r5, #1
yading@10 770 bne 1b
yading@10 771
yading@10 772 add sp, sp, #16
yading@10 773 pop {r0, r4-r11, pc}
yading@10 774 endfunc
yading@10 775
yading@10 776 function ff_vp8_v_loop_filter16_inner_armv6, export=1
yading@10 777 push {r4-r11, lr}
yading@10 778 add r12, r0, #8
yading@10 779 push {r12}
yading@10 780 ldr r6, [sp, #40]
yading@10 781 orr r2, r2, r2, lsl #16
yading@10 782 b vp8_v_loop_filter_inner_armv6
yading@10 783 endfunc
yading@10 784
yading@10 785 function ff_vp8_v_loop_filter8uv_inner_armv6, export=1
yading@10 786 push {r1, r4-r11, lr}
yading@10 787 mov r1, r2
yading@10 788 orr r2, r3, r3, lsl #16
yading@10 789 ldr r3, [sp, #40]
yading@10 790 ldr r6, [sp, #44]
yading@10 791 b vp8_v_loop_filter_inner_armv6
yading@10 792 endfunc
yading@10 793
yading@10 794 function vp8_v_loop_filter_armv6
yading@10 795 mov r5, #4
yading@10 796 sub sp, sp, #16
yading@10 797
yading@10 798 orr r3, r3, r3, lsl #16
yading@10 799 orr r6, r6, r6, lsl #16
yading@10 800 orr r4, r2, r2, lsl #8 @ flimE
yading@10 801 orr r2, r3, r3, lsl #8 @ flimI
yading@10 802 orr r3, r6, r6, lsl #8 @ thresh
yading@10 803 1:
yading@10 804 sub r0, r0, r1, lsl #2
yading@10 805 ldr r10, [r0, r1] @ p2
yading@10 806 ldr_post r9, r0, r1, lsl #1 @ p3
yading@10 807 ldr r12, [r0, r1] @ p0
yading@10 808 ldr_post r11, r0, r1, lsl #1 @ p1
yading@10 809
yading@10 810 filter_mask_v
yading@10 811 cmp lr, #0
yading@10 812 beq 2f
yading@10 813
yading@10 814 filter_v inner=0
yading@10 815 filter_1
yading@10 816
yading@10 817 str r8, [r0] @ *oq0
yading@10 818 A str r10, [r0, -r1] @ *op0
yading@10 819 T sub r0, r0, r1, lsl #1
yading@10 820 T str r10, [r0, r1]
yading@10 821
yading@10 822 filter_2
yading@10 823
yading@10 824 A str r10, [r0, -r1, lsl #1] @ *op1
yading@10 825 T str_post r10, r0, r1, lsl #1
yading@10 826 str r8, [r0, r1] @ *oq1
yading@10 827
yading@10 828 ldr r9, [r0, r1, lsl #1] @ q2
yading@10 829 add r0, r0, r1
yading@10 830 A ldr r11, [r0, -r1, lsl #2] @ p2
yading@10 831 T ldr_dpre r11, r0, r1, lsl #2
yading@10 832
yading@10 833 filter_3
yading@10 834
yading@10 835 A str r10, [r0, -r1, lsl #2] @ *op2
yading@10 836 T str_post r10, r0, r1, lsl #2
yading@10 837 str r8, [r0, r1] @ *oq2
yading@10 838 sub r0, r0, r1
yading@10 839 2:
yading@10 840 add r0, r0, #4
yading@10 841 cmp r5, #3
yading@10 842 it eq
yading@10 843 ldreq r0, [sp, #16]
yading@10 844 subs r5, r5, #1
yading@10 845 bne 1b
yading@10 846
yading@10 847 add sp, sp, #16
yading@10 848 pop {r0, r4-r11, pc}
yading@10 849 endfunc
yading@10 850
yading@10 851 function ff_vp8_v_loop_filter16_armv6, export=1
yading@10 852 push {r4-r11, lr}
yading@10 853 add r12, r0, #8
yading@10 854 push {r12}
yading@10 855 ldr r6, [sp, #40]
yading@10 856 orr r2, r2, r2, lsl #16
yading@10 857 b vp8_v_loop_filter_armv6
yading@10 858 endfunc
yading@10 859
yading@10 860 function ff_vp8_v_loop_filter8uv_armv6, export=1
yading@10 861 push {r1, r4-r11, lr}
yading@10 862 mov r1, r2
yading@10 863 orr r2, r3, r3, lsl #16
yading@10 864 ldr r3, [sp, #40]
yading@10 865 ldr r6, [sp, #44]
yading@10 866 b vp8_v_loop_filter_armv6
yading@10 867 endfunc
yading@10 868
yading@10 869 @ void vp8_h_loop_filter16_simple(uint8_t *dst, int stride, int flim)
yading@10 870 function ff_vp8_h_loop_filter16_simple_armv6, export=1
yading@10 871 push {r4-r11, lr}
yading@10 872 orr r12, r2, r2, lsl #16
yading@10 873 mov32 r2, 0x80808080
yading@10 874 orr r12, r12, r12, lsl #8
yading@10 875
yading@10 876 mov lr, #0
yading@10 877 mov r11, #4
yading@10 878 1:
yading@10 879 sub r0, r0, #2
yading@10 880 ldr r8, [r0, r1]
yading@10 881 ldr_post r7, r0, r1, lsl #1
yading@10 882 ldr r10, [r0, r1]
yading@10 883 ldr_post r9, r0, r1, lsl #1
yading@10 884 add r0, r0, #2
yading@10 885 transpose r6, r5, r4, r3, r7, r8, r9, r10
yading@10 886 simple_filter
yading@10 887 sub r0, r0, r1, lsl #2
yading@10 888 sub r0, r0, #1
yading@10 889
yading@10 890 uxtb16 r6, r4
yading@10 891 uxtb16 r8, r5
yading@10 892 uxtb16 r7, r4, ror #8
yading@10 893 uxtb16 r9, r5, ror #8
yading@10 894 orr r6, r6, r8, lsl #8
yading@10 895 orr r7, r7, r9, lsl #8
yading@10 896 lsr r4, r6, #16
yading@10 897 lsr r5, r7, #16
yading@10 898
yading@10 899 strh_post r6, r0, r1
yading@10 900 strh_post r7, r0, r1
yading@10 901 strh_post r4, r0, r1
yading@10 902 strh_post r5, r0, r1
yading@10 903 add r0, r0, #1
yading@10 904 2:
yading@10 905 subs r11, r11, #1
yading@10 906 bne 1b
yading@10 907
yading@10 908 pop {r4-r11, pc}
yading@10 909 endfunc
yading@10 910
yading@10 911 function vp8_h_loop_filter_inner_armv6
yading@10 912 mov r5, #4
yading@10 913 sub sp, sp, #16
yading@10 914
yading@10 915 orr r3, r3, r3, lsl #16
yading@10 916 orr r9, r9, r9, lsl #16
yading@10 917 orr r4, r2, r2, lsl #8 @ flimE
yading@10 918 orr r2, r3, r3, lsl #8 @ flimI
yading@10 919 orr r3, r9, r9, lsl #8 @ thresh
yading@10 920 sub r0, r0, #4
yading@10 921 1:
yading@10 922 ldr r7, [r0, r1]
yading@10 923 ldr_post r6, r0, r1, lsl #1
yading@10 924 ldr lr, [r0, r1]
yading@10 925 ldr_post r8, r0, r1, lsl #1
yading@10 926
yading@10 927 filter_mask_h
yading@10 928
yading@10 929 cmp lr, #0
yading@10 930 sub r0, r0, #2
yading@10 931 beq 2f
yading@10 932
yading@10 933 ldr r6, [sp]
yading@10 934
yading@10 935 filter_h inner=1
yading@10 936 filter_inner
yading@10 937
yading@10 938 transpose lr, r12, r7, r6, r11, r8, r9, r10
yading@10 939
yading@10 940 A str r6, [r0, -r1, lsl #1]
yading@10 941 A str r7, [r0, -r1]
yading@10 942 T sub r0, r0, r1, lsl #1
yading@10 943 T str r7, [r0, r1]
yading@10 944 T str_post r6, r0, r1, lsl #1
yading@10 945 str r12, [r0]
yading@10 946 str lr, [r0, r1]
yading@10 947 2:
yading@10 948 sub r0, r0, #2
yading@10 949 add r0, r0, r1, lsl #1
yading@10 950 cmp r5, #3
yading@10 951 it eq
yading@10 952 ldreq r0, [sp, #16]
yading@10 953 subs r5, r5, #1
yading@10 954 bne 1b
yading@10 955
yading@10 956 add sp, sp, #16
yading@10 957 pop {r0, r4-r11, pc}
yading@10 958 endfunc
yading@10 959
yading@10 960 function ff_vp8_h_loop_filter16_inner_armv6, export=1
yading@10 961 push {r4-r11, lr}
yading@10 962 add r12, r0, r1, lsl #3
yading@10 963 sub r12, r12, #4
yading@10 964 push {r12}
yading@10 965 ldr r9, [sp, #40]
yading@10 966 orr r2, r2, r2, lsl #16
yading@10 967 b vp8_h_loop_filter_inner_armv6
yading@10 968 endfunc
yading@10 969
yading@10 970 function ff_vp8_h_loop_filter8uv_inner_armv6, export=1
yading@10 971 sub r1, r1, #4
yading@10 972 push {r1, r4-r11, lr}
yading@10 973 mov r1, r2
yading@10 974 orr r2, r3, r3, lsl #16
yading@10 975 ldr r3, [sp, #40]
yading@10 976 ldr r9, [sp, #44]
yading@10 977 b vp8_h_loop_filter_inner_armv6
yading@10 978 endfunc
yading@10 979
yading@10 980 function vp8_h_loop_filter_armv6
yading@10 981 mov r5, #4
yading@10 982 sub sp, sp, #16
yading@10 983
yading@10 984 orr r3, r3, r3, lsl #16
yading@10 985 orr r9, r9, r9, lsl #16
yading@10 986 orr r4, r2, r2, lsl #8 @ flimE
yading@10 987 orr r2, r3, r3, lsl #8 @ flimI
yading@10 988 orr r3, r9, r9, lsl #8 @ thresh
yading@10 989 1:
yading@10 990 sub r0, r0, #4
yading@10 991 ldr r7, [r0, r1]
yading@10 992 ldr_post r6, r0, r1, lsl #1
yading@10 993 ldr lr, [r0, r1]
yading@10 994 ldr_post r8, r0, r1, lsl #1
yading@10 995
yading@10 996 filter_mask_h
yading@10 997 cmp lr, #0
yading@10 998 it eq
yading@10 999 addeq r0, r0, r1, lsl #1
yading@10 1000 beq 2f
yading@10 1001
yading@10 1002 ldr r6, [sp]
yading@10 1003 sub r0, r0, #2
yading@10 1004
yading@10 1005 filter_h inner=0
yading@10 1006 filter_1
yading@10 1007
yading@10 1008 sub r0, r0, r1, lsl #1
yading@10 1009 uxtb16 r6, r10
yading@10 1010 uxtb16 r7, r8
yading@10 1011 uxtb16 r10, r10, ror #8
yading@10 1012 uxtb16 r8, r8, ror #8
yading@10 1013 orr r6, r6, r7, lsl #8
yading@10 1014 orr r10, r10, r8, lsl #8
yading@10 1015 lsr r7, r6, #16
yading@10 1016 lsr r8, r10, #16
yading@10 1017
yading@10 1018 add r0, r0, #1
yading@10 1019 strh_post r6, r0, r1
yading@10 1020 strh_post r10, r0, r1
yading@10 1021 strh_post r7, r0, r1
yading@10 1022 strh_post r8, r0, r1
yading@10 1023
yading@10 1024 filter_2
yading@10 1025
yading@10 1026 sub r0, r0, r1, lsl #2
yading@10 1027 add r0, r0, #3
yading@10 1028
yading@10 1029 ldrb r11, [r0, #-5] @ p2 for 1/7th difference
yading@10 1030 strb r10, [r0, #-4] @ op1
yading@10 1031 strb r8, [r0, #-1] @ oq1
yading@10 1032 ldrb_post r9, r0, r1 @ q2 for 1/7th difference
yading@10 1033
yading@10 1034 lsr r10, r10, #8
yading@10 1035 lsr r8, r8, #8
yading@10 1036
yading@10 1037 ldrb r6, [r0, #-5]
yading@10 1038 strb r10, [r0, #-4]
yading@10 1039 strb r8, [r0, #-1]
yading@10 1040 ldrb_post r7, r0, r1
yading@10 1041
yading@10 1042 lsr r10, r10, #8
yading@10 1043 lsr r8, r8, #8
yading@10 1044 orr r11, r11, r6, lsl #8
yading@10 1045 orr r9, r9, r7, lsl #8
yading@10 1046
yading@10 1047 ldrb r6, [r0, #-5]
yading@10 1048 strb r10, [r0, #-4]
yading@10 1049 strb r8, [r0, #-1]
yading@10 1050 ldrb_post r7, r0, r1
yading@10 1051
yading@10 1052 lsr r10, r10, #8
yading@10 1053 lsr r8, r8, #8
yading@10 1054 orr r11, r11, r6, lsl #16
yading@10 1055 orr r9, r9, r7, lsl #16
yading@10 1056
yading@10 1057 ldrb r6, [r0, #-5]
yading@10 1058 strb r10, [r0, #-4]
yading@10 1059 strb r8, [r0, #-1]
yading@10 1060 ldrb_post r7, r0, r1
yading@10 1061 orr r11, r11, r6, lsl #24
yading@10 1062 orr r9, r9, r7, lsl #24
yading@10 1063
yading@10 1064 filter_3
yading@10 1065
yading@10 1066 sub r0, r0, r1, lsl #2
yading@10 1067 strb r10, [r0, #-5]
yading@10 1068 strb_post r8, r0, r1
yading@10 1069 lsr r10, r10, #8
yading@10 1070 lsr r8, r8, #8
yading@10 1071 strb r10, [r0, #-5]
yading@10 1072 strb_post r8, r0, r1
yading@10 1073 lsr r10, r10, #8
yading@10 1074 lsr r8, r8, #8
yading@10 1075 strb r10, [r0, #-5]
yading@10 1076 strb_post r8, r0, r1
yading@10 1077 lsr r10, r10, #8
yading@10 1078 lsr r8, r8, #8
yading@10 1079 strb r10, [r0, #-5]
yading@10 1080 strb_post r8, r0, r1
yading@10 1081
yading@10 1082 sub r0, r0, #2
yading@10 1083 2:
yading@10 1084 cmp r5, #3
yading@10 1085 it eq
yading@10 1086 ldreq r0, [sp, #16]
yading@10 1087 subs r5, r5, #1
yading@10 1088 bne 1b
yading@10 1089
yading@10 1090 add sp, sp, #16
yading@10 1091 pop {r0, r4-r11, pc}
yading@10 1092 endfunc
yading@10 1093
yading@10 1094 function ff_vp8_h_loop_filter16_armv6, export=1
yading@10 1095 push {r4-r11, lr}
yading@10 1096 add r12, r0, r1, lsl #3
yading@10 1097 push {r12}
yading@10 1098 ldr r9, [sp, #40]
yading@10 1099 orr r2, r2, r2, lsl #16
yading@10 1100 b vp8_h_loop_filter_armv6
yading@10 1101 endfunc
yading@10 1102
yading@10 1103 function ff_vp8_h_loop_filter8uv_armv6, export=1
yading@10 1104 push {r1, r4-r11, lr}
yading@10 1105 mov r1, r2
yading@10 1106 orr r2, r3, r3, lsl #16
yading@10 1107 ldr r3, [sp, #40]
yading@10 1108 ldr r9, [sp, #44]
yading@10 1109 b vp8_h_loop_filter_armv6
yading@10 1110 endfunc
yading@10 1111
yading@10 1112 .ltorg
yading@10 1113
yading@10 1114 @ MC
yading@10 1115
yading@10 1116 @ void put_vp8_pixels16(uint8_t *dst, int dststride, uint8_t *src,
yading@10 1117 @ int srcstride, int h, int mx, int my)
yading@10 1118 function ff_put_vp8_pixels16_armv6, export=1
yading@10 1119 push {r4-r11}
yading@10 1120 ldr r12, [sp, #32] @ h
yading@10 1121 1:
yading@10 1122 subs r12, r12, #2
yading@10 1123 ldr r5, [r2, #4]
yading@10 1124 ldr r6, [r2, #8]
yading@10 1125 ldr r7, [r2, #12]
yading@10 1126 ldr_post r4, r2, r3
yading@10 1127 ldr r9, [r2, #4]
yading@10 1128 ldr r10, [r2, #8]
yading@10 1129 ldr r11, [r2, #12]
yading@10 1130 ldr_post r8, r2, r3
yading@10 1131 strd r6, r7, [r0, #8]
yading@10 1132 strd_post r4, r5, r0, r1
yading@10 1133 strd r10, r11, [r0, #8]
yading@10 1134 strd_post r8, r9, r0, r1
yading@10 1135 bgt 1b
yading@10 1136 pop {r4-r11}
yading@10 1137 bx lr
yading@10 1138 endfunc
yading@10 1139
yading@10 1140 @ void put_vp8_pixels8(uint8_t *dst, int dststride, uint8_t *src,
yading@10 1141 @ int srcstride, int h, int mx, int my)
yading@10 1142 function ff_put_vp8_pixels8_armv6, export=1
yading@10 1143 push {r4-r11}
yading@10 1144 ldr r12, [sp, #32] @ h
yading@10 1145 1:
yading@10 1146 subs r12, r12, #4
yading@10 1147 ldr r5, [r2, #4]
yading@10 1148 ldr_post r4, r2, r3
yading@10 1149 ldr r7, [r2, #4]
yading@10 1150 ldr_post r6, r2, r3
yading@10 1151 ldr r9, [r2, #4]
yading@10 1152 ldr_post r8, r2, r3
yading@10 1153 ldr r11, [r2, #4]
yading@10 1154 ldr_post r10, r2, r3
yading@10 1155 strd_post r4, r5, r0, r1
yading@10 1156 strd_post r6, r7, r0, r1
yading@10 1157 strd_post r8, r9, r0, r1
yading@10 1158 strd_post r10, r11, r0, r1
yading@10 1159 bgt 1b
yading@10 1160 pop {r4-r11}
yading@10 1161 bx lr
yading@10 1162 endfunc
yading@10 1163
yading@10 1164 @ void put_vp8_pixels4(uint8_t *dst, int dststride, uint8_t *src,
yading@10 1165 @ int srcstride, int h, int mx, int my)
yading@10 1166 function ff_put_vp8_pixels4_armv6, export=1
yading@10 1167 ldr r12, [sp, #0] @ h
yading@10 1168 push {r4-r6,lr}
yading@10 1169 1:
yading@10 1170 subs r12, r12, #4
yading@10 1171 ldr_post r4, r2, r3
yading@10 1172 ldr_post r5, r2, r3
yading@10 1173 ldr_post r6, r2, r3
yading@10 1174 ldr_post lr, r2, r3
yading@10 1175 str_post r4, r0, r1
yading@10 1176 str_post r5, r0, r1
yading@10 1177 str_post r6, r0, r1
yading@10 1178 str_post lr, r0, r1
yading@10 1179 bgt 1b
yading@10 1180 pop {r4-r6,pc}
yading@10 1181 endfunc
yading@10 1182
yading@10 1183 @ note: worst case sum of all 6-tap filter values * 255 is 0x7f80 so 16 bit
yading@10 1184 @ arithmatic can be used to apply filters
yading@10 1185 const sixtap_filters_13245600, align=4
yading@10 1186 .short 2, 108, -11, 36, -8, 1, 0, 0
yading@10 1187 .short 3, 77, -16, 77, -16, 3, 0, 0
yading@10 1188 .short 1, 36, -8, 108, -11, 2, 0, 0
yading@10 1189 endconst
yading@10 1190
yading@10 1191 const fourtap_filters_1324, align=4
yading@10 1192 .short -6, 12, 123, -1
yading@10 1193 .short -9, 50, 93, -6
yading@10 1194 .short -6, 93, 50, -9
yading@10 1195 .short -1, 123, 12, -6
yading@10 1196 endconst
yading@10 1197
yading@10 1198 .macro vp8_mc_1 name, size, hv
yading@10 1199 function ff_put_vp8_\name\size\()_\hv\()_armv6, export=1
yading@10 1200 sub r1, r1, #\size
yading@10 1201 mov r12, sp
yading@10 1202 push {r1, r4-r11, lr}
yading@10 1203 ldm r12, {r5-r7}
yading@10 1204 mov r4, #\size
yading@10 1205 stm r12, {r4, r5}
yading@10 1206 orr r12, r6, r7
yading@10 1207 b vp8_put_\name\()_\hv\()_armv6 + 4
yading@10 1208 endfunc
yading@10 1209 .endm
yading@10 1210
yading@10 1211 vp8_mc_1 epel, 16, h6
yading@10 1212 vp8_mc_1 epel, 16, v6
yading@10 1213 vp8_mc_1 epel, 8, h6
yading@10 1214 vp8_mc_1 epel, 8, v6
yading@10 1215 vp8_mc_1 epel, 8, h4
yading@10 1216 vp8_mc_1 epel, 8, v4
yading@10 1217 vp8_mc_1 epel, 4, h6
yading@10 1218 vp8_mc_1 epel, 4, v6
yading@10 1219 vp8_mc_1 epel, 4, h4
yading@10 1220 vp8_mc_1 epel, 4, v4
yading@10 1221
yading@10 1222 vp8_mc_1 bilin, 16, h
yading@10 1223 vp8_mc_1 bilin, 16, v
yading@10 1224 vp8_mc_1 bilin, 8, h
yading@10 1225 vp8_mc_1 bilin, 8, v
yading@10 1226 vp8_mc_1 bilin, 4, h
yading@10 1227 vp8_mc_1 bilin, 4, v
yading@10 1228
yading@10 1229 /* True relational expressions have the value -1 in the GNU assembler,
yading@10 1230 +1 in Apple's. */
yading@10 1231 #ifdef __APPLE__
yading@10 1232 # define TMPSIZE \size * (8 + 8*(\size > 4) + \ytaps - 1)
yading@10 1233 #else
yading@10 1234 # define TMPSIZE \size * (8 - 8*(\size > 4) + \ytaps - 1)
yading@10 1235 #endif
yading@10 1236
yading@10 1237 .macro vp8_mc_hv name, size, h, v, ytaps
yading@10 1238 function ff_put_vp8_\name\size\()_\h\v\()_armv6, export=1
yading@10 1239 push {r0, r1, r4, lr}
yading@10 1240 add r0, sp, #16
yading@10 1241 sub sp, sp, #TMPSIZE+16
yading@10 1242 ldm r0, {r0, r12}
yading@10 1243 mov r4, #\size
yading@10 1244 add lr, r0, #\ytaps-1
yading@10 1245 .if \ytaps > 2
yading@10 1246 sub r2, r2, r3, lsl #\ytaps >> 1 & 1
yading@10 1247 .endif
yading@10 1248 stm sp, {r4, lr}
yading@10 1249 add r0, sp, #16
yading@10 1250 mov r1, #0
yading@10 1251 bl vp8_put_\name\()_\h\()_armv6
yading@10 1252 add r0, sp, #TMPSIZE+16
yading@10 1253 ldr lr, [sp, #TMPSIZE+16+16]
yading@10 1254 ldm r0, {r0, r1}
yading@10 1255 mov r3, #\size
yading@10 1256 ldr r12, [sp, #TMPSIZE+16+16+8]
yading@10 1257 str lr, [sp, #4]
yading@10 1258 add r2, sp, #16 + \size * (\ytaps / 2 - 1)
yading@10 1259 sub r1, r1, #\size
yading@10 1260 bl vp8_put_\name\()_\v\()_armv6
yading@10 1261 add sp, sp, #TMPSIZE+16+8
yading@10 1262 pop {r4, pc}
yading@10 1263 endfunc
yading@10 1264 .endm
yading@10 1265
yading@10 1266 vp8_mc_hv epel, 16, h6, v6, 6
yading@10 1267 vp8_mc_hv epel, 8, h6, v6, 6
yading@10 1268 vp8_mc_hv epel, 8, h4, v6, 6
yading@10 1269 vp8_mc_hv epel, 8, h6, v4, 4
yading@10 1270 vp8_mc_hv epel, 8, h4, v4, 4
yading@10 1271 vp8_mc_hv epel, 4, h6, v6, 6
yading@10 1272 vp8_mc_hv epel, 4, h4, v6, 6
yading@10 1273 vp8_mc_hv epel, 4, h6, v4, 4
yading@10 1274 vp8_mc_hv epel, 4, h4, v4, 4
yading@10 1275
yading@10 1276 vp8_mc_hv bilin, 16, h, v, 2
yading@10 1277 vp8_mc_hv bilin, 8, h, v, 2
yading@10 1278 vp8_mc_hv bilin, 4, h, v, 2
yading@10 1279
yading@10 1280 .macro sat4 r0, r1, r2, r3
yading@10 1281 asr \r0, \r0, #7
yading@10 1282 asr \r1, \r1, #7
yading@10 1283 pkhbt \r0, \r0, \r2, lsl #9
yading@10 1284 pkhbt \r1, \r1, \r3, lsl #9
yading@10 1285 usat16 \r0, #8, \r0
yading@10 1286 usat16 \r1, #8, \r1
yading@10 1287 orr \r0, \r0, \r1, lsl #8
yading@10 1288 .endm
yading@10 1289
yading@10 1290 @ Calling convention for the inner MC functions:
yading@10 1291 @ r0 dst
yading@10 1292 @ r1 dst_stride - block_width
yading@10 1293 @ r2 src
yading@10 1294 @ r3 src_stride
yading@10 1295 @ r4 block_width
yading@10 1296 @ r12 filter_index
yading@10 1297 @ [sp] block_width
yading@10 1298 @ [sp+4] height
yading@10 1299 @ [sp+8] scratch
yading@10 1300
yading@10 1301 function vp8_put_epel_h6_armv6
yading@10 1302 push {r1, r4-r11, lr}
yading@10 1303 sub r2, r2, #2
yading@10 1304 movrel lr, sixtap_filters_13245600 - 16
yading@10 1305 add lr, lr, r12, lsl #3
yading@10 1306 sub r3, r3, r4
yading@10 1307 str r3, [sp, #48]
yading@10 1308 ldm lr, {r1, r3, lr}
yading@10 1309 1:
yading@10 1310 ldr r7, [r2, #5] @ src[5-8]
yading@10 1311 ldr r6, [r2, #2] @ src[2-5]
yading@10 1312 ldr r5, [r2], #4 @ src[0-3]
yading@10 1313
yading@10 1314 pkhtb r7, r7, r7, asr #8 @ src[8,7,7,6]
yading@10 1315 uxtb16 r9, r6, ror #8 @ src[5] | src[3]
yading@10 1316 uxtb16 r6, r6 @ src[4] | src[2]
yading@10 1317 uxtb16 r8, r5, ror #8 @ src[3] | src[1]
yading@10 1318 uxtb16 r11, r7, ror #8 @ src[8] | src[7]
yading@10 1319 uxtb16 r7, r7 @ src[7] | src[6]
yading@10 1320 uxtb16 r5, r5 @ src[2] | src[0]
yading@10 1321
yading@10 1322 mov r10, #0x40
yading@10 1323 smlad r5, r5, r1, r10 @ filter[0][0]
yading@10 1324 smlad r11, r11, lr, r10 @ filter[3][2]
yading@10 1325 smlad r12, r7, lr, r10 @ filter[2][2]
yading@10 1326 smlad r10, r8, r1, r10 @ filter[1][0]
yading@10 1327 smlad r5, r8, r3, r5 @ filter[0][1]
yading@10 1328 smlad r11, r9, r1, r11 @ filter[3][0]
yading@10 1329 smlad r12, r9, r3, r12 @ filter[2][1]
yading@10 1330 pkhtb r9, r9, r6, asr #16 @ src[5] | src[4]
yading@10 1331 smlad r10, r6, r3, r10 @ filter[1][1]
yading@10 1332 pkhbt r7, r9, r7, lsl #16 @ src[6] | src[4]
yading@10 1333 smlad r5, r9, lr, r5 @ filter[0][2]
yading@10 1334 pkhtb r8, r7, r9, asr #16 @ src[6] | src[5]
yading@10 1335 smlad r11, r7, r3, r11 @ filter[3][1]
yading@10 1336 smlad r9, r8, lr, r10 @ filter[1][2]
yading@10 1337 smlad r7, r6, r1, r12 @ filter[2][0]
yading@10 1338
yading@10 1339 subs r4, r4, #4
yading@10 1340
yading@10 1341 sat4 r5, r9, r7, r11
yading@10 1342 str r5, [r0], #4
yading@10 1343
yading@10 1344 bne 1b
yading@10 1345
yading@10 1346 add r4, sp, #40
yading@10 1347 ldm r4, {r4, r5, r12}
yading@10 1348 ldr r6, [sp]
yading@10 1349 subs r5, r5, #1
yading@10 1350 add r2, r2, r12
yading@10 1351 str r5, [sp, #44]
yading@10 1352 add r0, r0, r6
yading@10 1353
yading@10 1354 bne 1b
yading@10 1355
yading@10 1356 pop {r1, r4-r11, pc}
yading@10 1357 endfunc
yading@10 1358
yading@10 1359 function vp8_put_epel_v6_armv6
yading@10 1360 push {r1, r4-r11, lr}
yading@10 1361 movrel lr, sixtap_filters_13245600 - 16
yading@10 1362 add lr, lr, r12, lsl #3
yading@10 1363 str r3, [sp, #48]
yading@10 1364 1:
yading@10 1365 add r1, r3, r3, lsl #1 @ stride * 3
yading@10 1366 ldr_nreg r5, r2, r3 @ src[0,1,2,3 + stride * 1]
yading@10 1367 ldr r6, [r2, r3] @ src[0,1,2,3 + stride * 3]
yading@10 1368 ldr r7, [r2, r3, lsl #1] @ src[0,1,2,3 + stride * 4]
yading@10 1369 ldr r8, [r2, r1] @ src[0,1,2,3 + stride * 5]
yading@10 1370
yading@10 1371 uxtb16 r9, r5, ror #8 @ src[3 + s*1] | src[1 + s*1]
yading@10 1372 uxtb16 r10, r6, ror #8 @ src[3 + s*3] | src[1 + s*3]
yading@10 1373 uxtb16 r11, r7, ror #8 @ src[3 + s*4] | src[1 + s*4]
yading@10 1374 uxtb16 r12, r8, ror #8 @ src[3 + s*5] | src[1 + s*5]
yading@10 1375 uxtb16 r5, r5 @ src[2 + s*1] | src[0 + s*1]
yading@10 1376 uxtb16 r6, r6 @ src[2 + s*3] | src[0 + s*3]
yading@10 1377 uxtb16 r7, r7 @ src[2 + s*4] | src[0 + s*4]
yading@10 1378 uxtb16 r8, r8 @ src[2 + s*5] | src[0 + s*5]
yading@10 1379 pkhbt r1, r9, r10, lsl #16 @ src[1 + s*3] | src[1 + s*1]
yading@10 1380 pkhtb r9, r10, r9, asr #16 @ src[3 + s*3] | src[3 + s*1]
yading@10 1381 pkhbt r10, r11, r12, lsl #16 @ src[1 + s*5] | src[1 + s*4]
yading@10 1382 pkhtb r11, r12, r11, asr #16 @ src[3 + s*5] | src[3 + s*4]
yading@10 1383 pkhbt r12, r5, r6, lsl #16 @ src[0 + s*3] | src[0 + s*1]
yading@10 1384 pkhtb r5, r6, r5, asr #16 @ src[2 + s*3] | src[2 + s*1]
yading@10 1385 pkhbt r6, r7, r8, lsl #16 @ src[0 + s*5] | src[0 + s*4]
yading@10 1386 pkhtb r7, r8, r7, asr #16 @ src[2 + s*5] | src[2 + s*4]
yading@10 1387
yading@10 1388 ldr r8, [lr, #4]
yading@10 1389 mov r3, #0x40
yading@10 1390 smlad r12, r12, r8, r3 @ filter[0][1]
yading@10 1391 smlad r1, r1, r8, r3 @ filter[1][1]
yading@10 1392 smlad r5, r5, r8, r3 @ filter[2][1]
yading@10 1393 smlad r9, r9, r8, r3 @ filter[3][1]
yading@10 1394 ldr r8, [lr, #8]
yading@10 1395 ldr r3, [sp, #48]
yading@10 1396 smlad r12, r6, r8, r12 @ filter[0][2]
yading@10 1397 smlad r1, r10, r8, r1 @ filter[1][2]
yading@10 1398 ldr_nreg r6, r2, r3, lsl #1 @ src[0,1,2,3 + stride * 0]
yading@10 1399 ldr r10, [r2], #4 @ src[0,1,2,3 + stride * 2]
yading@10 1400 smlad r5, r7, r8, r5 @ filter[2][2]
yading@10 1401 smlad r9, r11, r8, r9 @ filter[3][2]
yading@10 1402
yading@10 1403 uxtb16 r7, r6, ror #8 @ src[3 + s*0] | src[1 + s*0]
yading@10 1404 uxtb16 r11, r10, ror #8 @ src[3 + s*2] | src[1 + s*2]
yading@10 1405 uxtb16 r6, r6 @ src[2 + s*0] | src[0 + s*0]
yading@10 1406 uxtb16 r10, r10 @ src[2 + s*2] | src[0 + s*2]
yading@10 1407
yading@10 1408 pkhbt r8, r7, r11, lsl #16 @ src[1 + s*2] | src[1 + s*0]
yading@10 1409 pkhtb r7, r11, r7, asr #16 @ src[3 + s*2] | src[3 + s*0]
yading@10 1410 pkhbt r11, r6, r10, lsl #16 @ src[0 + s*2] | src[0 + s*0]
yading@10 1411 pkhtb r6, r10, r6, asr #16 @ src[2 + s*2] | src[2 + s*0]
yading@10 1412
yading@10 1413 ldr r10, [lr]
yading@10 1414 subs r4, r4, #4
yading@10 1415 smlad r12, r11, r10, r12 @ filter[0][0]
yading@10 1416 smlad r1, r8, r10, r1 @ filter[1][0]
yading@10 1417 smlad r5, r6, r10, r5 @ filter[2][0]
yading@10 1418 smlad r9, r7, r10, r9 @ filter[3][0]
yading@10 1419
yading@10 1420 sat4 r12, r1, r5, r9
yading@10 1421 str r12, [r0], #4
yading@10 1422
yading@10 1423 bne 1b
yading@10 1424
yading@10 1425 ldrd r4, r5, [sp, #40]
yading@10 1426 ldr r6, [sp]
yading@10 1427 subs r5, r5, #1
yading@10 1428 sub r2, r2, r4
yading@10 1429 str r5, [sp, #44]
yading@10 1430 add r0, r0, r6
yading@10 1431 add r2, r2, r3
yading@10 1432
yading@10 1433 bne 1b
yading@10 1434
yading@10 1435 pop {r1, r4-r11, pc}
yading@10 1436 endfunc
yading@10 1437
yading@10 1438 function vp8_put_epel_h4_armv6
yading@10 1439 push {r1, r4-r11, lr}
yading@10 1440 subs r2, r2, #1
yading@10 1441 movrel lr, fourtap_filters_1324 - 4
yading@10 1442 add lr, lr, r12, lsl #2
yading@10 1443 sub r3, r3, r4
yading@10 1444 ldm lr, {r5, r6}
yading@10 1445 ldr lr, [sp, #44]
yading@10 1446 1:
yading@10 1447 ldr r9, [r2, #3]
yading@10 1448 ldr r8, [r2, #2]
yading@10 1449 ldr r7, [r2], #4
yading@10 1450
yading@10 1451 uxtb16 r9, r9, ror #8 @ src[6] | src[4]
yading@10 1452 uxtb16 r10, r8, ror #8 @ src[5] | src[3]
yading@10 1453 uxtb16 r8, r8 @ src[4] | src[2]
yading@10 1454 uxtb16 r11, r7, ror #8 @ src[3] | src[1]
yading@10 1455 uxtb16 r7, r7 @ src[2] | src[0]
yading@10 1456
yading@10 1457 mov r12, #0x40
yading@10 1458 smlad r9, r9, r6, r12 @ filter[3][1]
yading@10 1459 smlad r7, r7, r5, r12 @ filter[0][0]
yading@10 1460 smlad r9, r10, r5, r9 @ filter[3][0]
yading@10 1461 smlad r10, r10, r6, r12 @ filter[2][1]
yading@10 1462 smlad r12, r11, r5, r12 @ filter[1][0]
yading@10 1463 smlad r7, r11, r6, r7 @ filter[0][1]
yading@10 1464 smlad r10, r8, r5, r10 @ filter[2][0]
yading@10 1465 smlad r12, r8, r6, r12 @ filter[1][1]
yading@10 1466
yading@10 1467 subs r4, r4, #4
yading@10 1468
yading@10 1469 sat4 r7, r12, r10, r9
yading@10 1470 str r7, [r0], #4
yading@10 1471
yading@10 1472 bne 1b
yading@10 1473
yading@10 1474 subs lr, lr, #1
yading@10 1475 ldr r4, [sp, #40]
yading@10 1476 add r2, r2, r3
yading@10 1477 add r0, r0, r1
yading@10 1478
yading@10 1479 bne 1b
yading@10 1480
yading@10 1481 pop {r1, r4-r11, pc}
yading@10 1482 endfunc
yading@10 1483
yading@10 1484 function vp8_put_epel_v4_armv6
yading@10 1485 push {r1, r4-r11, lr}
yading@10 1486 movrel lr, fourtap_filters_1324 - 4
yading@10 1487 add lr, lr, r12, lsl #2
yading@10 1488 ldm lr, {r5, r6}
yading@10 1489 str r3, [sp, #48]
yading@10 1490 1:
yading@10 1491 ldr lr, [r2, r3, lsl #1]
yading@10 1492 ldr r12, [r2, r3]
yading@10 1493 ldr_nreg r7, r2, r3
yading@10 1494 ldr r11, [r2], #4
yading@10 1495
yading@10 1496 uxtb16 r8, lr, ror #8 @ src[3 + s*3] | src[1 + s*3]
yading@10 1497 uxtb16 r9, r12, ror #8 @ src[3 + s*2] | src[1 + s*2]
yading@10 1498 uxtb16 r3, r7, ror #8 @ src[3 + s*0] | src[1 + s*0]
yading@10 1499 uxtb16 r1, r11, ror #8 @ src[3 + s*1] | src[1 + s*1]
yading@10 1500 uxtb16 lr, lr @ src[2 + s*3] | src[0 + s*3]
yading@10 1501 uxtb16 r12, r12 @ src[2 + s*2] | src[0 + s*2]
yading@10 1502 uxtb16 r7, r7 @ src[2 + s*0] | src[0 + s*0]
yading@10 1503 uxtb16 r11, r11 @ src[2 + s*1] | src[0 + s*1]
yading@10 1504 pkhbt r10, r1, r8, lsl #16 @ src[1 + s*3] | src[1 + s*1]
yading@10 1505 pkhtb r1, r8, r1, asr #16 @ src[3 + s*3] | src[3 + s*1]
yading@10 1506 pkhbt r8, r3, r9, lsl #16 @ src[1 + s*2] | src[1 + s*0]
yading@10 1507 pkhtb r3, r9, r3, asr #16 @ src[3 + s*2] | src[3 + s*0]
yading@10 1508 pkhbt r9, r11, lr, lsl #16 @ src[0 + s*3] | src[0 + s*1]
yading@10 1509 pkhtb r11, lr, r11, asr #16 @ src[2 + s*3] | src[2 + s*1]
yading@10 1510 pkhbt lr, r7, r12, lsl #16 @ src[0 + s*2] | src[0 + s*0]
yading@10 1511 pkhtb r7, r12, r7, asr #16 @ src[2 + s*2] | src[2 + s*0]
yading@10 1512
yading@10 1513 mov r12, #0x40
yading@10 1514 smlad r9, r9, r6, r12 @ filter[0][1]
yading@10 1515 smlad r10, r10, r6, r12 @ filter[1][1]
yading@10 1516 smlad r11, r11, r6, r12 @ filter[2][1]
yading@10 1517 smlad r1, r1, r6, r12 @ filter[3][1]
yading@10 1518 smlad r9, lr, r5, r9 @ filter[0][0]
yading@10 1519 smlad r10, r8, r5, r10 @ filter[1][0]
yading@10 1520 smlad r11, r7, r5, r11 @ filter[2][0]
yading@10 1521 smlad r1, r3, r5, r1 @ filter[3][0]
yading@10 1522
yading@10 1523 subs r4, r4, #4
yading@10 1524 ldr r3, [sp, #48]
yading@10 1525
yading@10 1526 sat4 r9, r10, r11, r1
yading@10 1527 str r9, [r0], #4
yading@10 1528
yading@10 1529 bne 1b
yading@10 1530
yading@10 1531 ldr r4, [sp, #40]
yading@10 1532 ldr r12, [sp, #44]
yading@10 1533 add r2, r2, r3
yading@10 1534 ldr r9, [sp, #0]
yading@10 1535 subs r12, r12, #1
yading@10 1536 sub r2, r2, r4
yading@10 1537 str r12, [sp, #44]
yading@10 1538 add r0, r0, r9
yading@10 1539
yading@10 1540 bne 1b
yading@10 1541
yading@10 1542 pop {r1, r4-r11, pc}
yading@10 1543 endfunc
yading@10 1544
yading@10 1545 function vp8_put_bilin_h_armv6
yading@10 1546 push {r1, r4-r11, lr}
yading@10 1547 rsb r5, r12, r12, lsl #16
yading@10 1548 ldr r12, [sp, #44]
yading@10 1549 sub r3, r3, r4
yading@10 1550 add r5, r5, #8
yading@10 1551 1:
yading@10 1552 ldrb r6, [r2], #1
yading@10 1553 ldrb r7, [r2], #1
yading@10 1554 ldrb r8, [r2], #1
yading@10 1555 ldrb r9, [r2], #1
yading@10 1556 ldrb lr, [r2]
yading@10 1557
yading@10 1558 pkhbt r6, r6, r7, lsl #16 @ src[1] | src[0]
yading@10 1559 pkhbt r7, r7, r8, lsl #16 @ src[2] | src[1]
yading@10 1560 pkhbt r8, r8, r9, lsl #16 @ src[3] | src[2]
yading@10 1561 pkhbt r9, r9, lr, lsl #16 @ src[4] | src[3]
yading@10 1562
yading@10 1563 mov r10, #4
yading@10 1564 smlad r6, r6, r5, r10
yading@10 1565 smlad r7, r7, r5, r10
yading@10 1566 smlad r8, r8, r5, r10
yading@10 1567 smlad r9, r9, r5, r10
yading@10 1568
yading@10 1569 subs r4, r4, #4
yading@10 1570
yading@10 1571 asr r6, #3
yading@10 1572 asr r7, #3
yading@10 1573 pkhbt r6, r6, r8, lsl #13
yading@10 1574 pkhbt r7, r7, r9, lsl #13
yading@10 1575 orr r6, r6, r7, lsl #8
yading@10 1576 str r6, [r0], #4
yading@10 1577
yading@10 1578 bne 1b
yading@10 1579
yading@10 1580 ldr r4, [sp, #40]
yading@10 1581 subs r12, r12, #1
yading@10 1582 add r2, r2, r3
yading@10 1583 add r0, r0, r1
yading@10 1584
yading@10 1585 bne 1b
yading@10 1586
yading@10 1587 pop {r1, r4-r11, pc}
yading@10 1588 endfunc
yading@10 1589
yading@10 1590 function vp8_put_bilin_v_armv6
yading@10 1591 push {r1, r4-r11, lr}
yading@10 1592 rsb r5, r12, r12, lsl #16
yading@10 1593 ldr r12, [sp, #44]
yading@10 1594 add r5, r5, #8
yading@10 1595 1:
yading@10 1596 ldrb r10, [r2, r3]
yading@10 1597 ldrb r6, [r2], #1
yading@10 1598 ldrb r11, [r2, r3]
yading@10 1599 ldrb r7, [r2], #1
yading@10 1600 ldrb lr, [r2, r3]
yading@10 1601 ldrb r8, [r2], #1
yading@10 1602 ldrb r9, [r2, r3]
yading@10 1603 pkhbt r6, r6, r10, lsl #16
yading@10 1604 ldrb r10, [r2], #1
yading@10 1605 pkhbt r7, r7, r11, lsl #16
yading@10 1606 pkhbt r8, r8, lr, lsl #16
yading@10 1607 pkhbt r9, r10, r9, lsl #16
yading@10 1608
yading@10 1609 mov r10, #4
yading@10 1610 smlad r6, r6, r5, r10
yading@10 1611 smlad r7, r7, r5, r10
yading@10 1612 smlad r8, r8, r5, r10
yading@10 1613 smlad r9, r9, r5, r10
yading@10 1614
yading@10 1615 subs r4, r4, #4
yading@10 1616
yading@10 1617 asr r6, #3
yading@10 1618 asr r7, #3
yading@10 1619 pkhbt r6, r6, r8, lsl #13
yading@10 1620 pkhbt r7, r7, r9, lsl #13
yading@10 1621 orr r6, r6, r7, lsl #8
yading@10 1622 str r6, [r0], #4
yading@10 1623
yading@10 1624 bne 1b
yading@10 1625
yading@10 1626 ldr r4, [sp, #40]
yading@10 1627 subs r12, r12, #1
yading@10 1628 add r2, r2, r3
yading@10 1629 add r0, r0, r1
yading@10 1630 sub r2, r2, r4
yading@10 1631
yading@10 1632 bne 1b
yading@10 1633 pop {r1, r4-r11, pc}
yading@10 1634 endfunc