annotate ffmpeg/libavcodec/arm/mpegaudiodsp_fixed_armv6.S @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
yading@10 3 *
yading@10 4 * This file is part of Libav.
yading@10 5 *
yading@10 6 * Libav is free software; you can redistribute it and/or
yading@10 7 * modify it under the terms of the GNU Lesser General Public
yading@10 8 * License as published by the Free Software Foundation; either
yading@10 9 * version 2.1 of the License, or (at your option) any later version.
yading@10 10 *
yading@10 11 * Libav is distributed in the hope that it will be useful,
yading@10 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 14 * Lesser General Public License for more details.
yading@10 15 *
yading@10 16 * You should have received a copy of the GNU Lesser General Public
yading@10 17 * License along with Libav; if not, write to the Free Software
yading@10 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 19 */
yading@10 20
yading@10 21 #include "libavutil/arm/asm.S"
yading@10 22
yading@10 23 .macro skip args:vararg
yading@10 24 .endm
yading@10 25
yading@10 26 .macro sum8 lo, hi, w, p, t1, t2, t3, t4, rsb=skip, offs=0
yading@10 27 ldr \t1, [\w, #4*\offs]
yading@10 28 ldr \t2, [\p, #4]!
yading@10 29 \rsb \t1, \t1, #0
yading@10 30 .irpc i, 135
yading@10 31 ldr \t3, [\w, #4*64*\i+4*\offs]
yading@10 32 ldr \t4, [\p, #4*64*\i]
yading@10 33 smlal \lo, \hi, \t1, \t2
yading@10 34 \rsb \t3, \t3, #0
yading@10 35 ldr \t1, [\w, #4*64*(\i+1)+4*\offs]
yading@10 36 ldr \t2, [\p, #4*64*(\i+1)]
yading@10 37 smlal \lo, \hi, \t3, \t4
yading@10 38 \rsb \t1, \t1, #0
yading@10 39 .endr
yading@10 40 ldr \t3, [\w, #4*64*7+4*\offs]
yading@10 41 ldr \t4, [\p, #4*64*7]
yading@10 42 smlal \lo, \hi, \t1, \t2
yading@10 43 \rsb \t3, \t3, #0
yading@10 44 smlal \lo, \hi, \t3, \t4
yading@10 45 .endm
yading@10 46
yading@10 47 .macro round rd, lo, hi
yading@10 48 lsr \rd, \lo, #24
yading@10 49 bic \lo, \lo, #0xff000000
yading@10 50 orr \rd, \rd, \hi, lsl #8
yading@10 51 mov \hi, #0
yading@10 52 ssat \rd, #16, \rd
yading@10 53 .endm
yading@10 54
yading@10 55 function ff_mpadsp_apply_window_fixed_armv6, export=1
yading@10 56 push {r2,r4-r11,lr}
yading@10 57
yading@10 58 add r4, r0, #4*512 @ synth_buf + 512
yading@10 59 .rept 4
yading@10 60 ldm r0!, {r5-r12}
yading@10 61 stm r4!, {r5-r12}
yading@10 62 .endr
yading@10 63
yading@10 64 ldr r4, [sp, #40] @ incr
yading@10 65 sub r0, r0, #4*17 @ synth_buf + 16
yading@10 66 ldr r8, [r2] @ sum:low
yading@10 67 add r2, r0, #4*32 @ synth_buf + 48
yading@10 68 rsb r5, r4, r4, lsl #5 @ 31 * incr
yading@10 69 lsl r4, r4, #1
yading@10 70 asr r9, r8, #31 @ sum:high
yading@10 71 add r5, r3, r5, lsl #1 @ samples2
yading@10 72 add r6, r1, #4*32 @ w2
yading@10 73 str r4, [sp, #40]
yading@10 74
yading@10 75 sum8 r8, r9, r1, r0, r10, r11, r12, lr
yading@10 76 sum8 r8, r9, r1, r2, r10, r11, r12, lr, rsb, 32
yading@10 77 round r10, r8, r9
yading@10 78 strh_post r10, r3, r4
yading@10 79
yading@10 80 mov lr, #15
yading@10 81 1:
yading@10 82 ldr r12, [r0, #4]!
yading@10 83 ldr r11, [r6, #-4]!
yading@10 84 ldr r10, [r1, #4]!
yading@10 85 .irpc i, 0246
yading@10 86 .if \i
yading@10 87 ldr r11, [r6, #4*64*\i]
yading@10 88 ldr r10, [r1, #4*64*\i]
yading@10 89 .endif
yading@10 90 rsb r11, r11, #0
yading@10 91 smlal r8, r9, r10, r12
yading@10 92 ldr r10, [r0, #4*64*(\i+1)]
yading@10 93 .ifeq \i
yading@10 94 smull r4, r7, r11, r12
yading@10 95 .else
yading@10 96 smlal r4, r7, r11, r12
yading@10 97 .endif
yading@10 98 ldr r11, [r6, #4*64*(\i+1)]
yading@10 99 ldr r12, [r1, #4*64*(\i+1)]
yading@10 100 rsb r11, r11, #0
yading@10 101 smlal r8, r9, r12, r10
yading@10 102 .iflt \i-6
yading@10 103 ldr r12, [r0, #4*64*(\i+2)]
yading@10 104 .else
yading@10 105 ldr r12, [r2, #-4]!
yading@10 106 .endif
yading@10 107 smlal r4, r7, r11, r10
yading@10 108 .endr
yading@10 109 .irpc i, 0246
yading@10 110 ldr r10, [r1, #4*64*\i+4*32]
yading@10 111 rsb r12, r12, #0
yading@10 112 ldr r11, [r6, #4*64*\i+4*32]
yading@10 113 smlal r8, r9, r10, r12
yading@10 114 ldr r10, [r2, #4*64*(\i+1)]
yading@10 115 smlal r4, r7, r11, r12
yading@10 116 ldr r12, [r1, #4*64*(\i+1)+4*32]
yading@10 117 rsb r10, r10, #0
yading@10 118 ldr r11, [r6, #4*64*(\i+1)+4*32]
yading@10 119 smlal r8, r9, r12, r10
yading@10 120 .iflt \i-6
yading@10 121 ldr r12, [r2, #4*64*(\i+2)]
yading@10 122 .else
yading@10 123 ldr r12, [sp, #40]
yading@10 124 .endif
yading@10 125 smlal r4, r7, r11, r10
yading@10 126 .endr
yading@10 127 round r10, r8, r9
yading@10 128 adds r8, r8, r4
yading@10 129 adc r9, r9, r7
yading@10 130 strh_post r10, r3, r12
yading@10 131 round r11, r8, r9
yading@10 132 subs lr, lr, #1
yading@10 133 strh_dpost r11, r5, r12
yading@10 134 bgt 1b
yading@10 135
yading@10 136 sum8 r8, r9, r1, r0, r10, r11, r12, lr, rsb, 33
yading@10 137 pop {r4}
yading@10 138 round r10, r8, r9
yading@10 139 str r8, [r4]
yading@10 140 strh r10, [r3]
yading@10 141
yading@10 142 pop {r4-r11,pc}
yading@10 143 endfunc