annotate ffmpeg/libavcodec/mips/aacdec_mips.h @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Copyright (c) 2012
yading@10 3 * MIPS Technologies, Inc., California.
yading@10 4 *
yading@10 5 * Redistribution and use in source and binary forms, with or without
yading@10 6 * modification, are permitted provided that the following conditions
yading@10 7 * are met:
yading@10 8 * 1. Redistributions of source code must retain the above copyright
yading@10 9 * notice, this list of conditions and the following disclaimer.
yading@10 10 * 2. Redistributions in binary form must reproduce the above copyright
yading@10 11 * notice, this list of conditions and the following disclaimer in the
yading@10 12 * documentation and/or other materials provided with the distribution.
yading@10 13 * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
yading@10 14 * contributors may be used to endorse or promote products derived from
yading@10 15 * this software without specific prior written permission.
yading@10 16 *
yading@10 17 * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
yading@10 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
yading@10 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
yading@10 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
yading@10 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
yading@10 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
yading@10 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
yading@10 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
yading@10 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
yading@10 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
yading@10 27 * SUCH DAMAGE.
yading@10 28 *
yading@10 29 * Authors: Darko Laus (darko@mips.com)
yading@10 30 * Djordje Pesut (djordje@mips.com)
yading@10 31 * Mirjana Vulin (mvulin@mips.com)
yading@10 32 *
yading@10 33 * AAC Spectral Band Replication decoding functions optimized for MIPS
yading@10 34 *
yading@10 35 * This file is part of FFmpeg.
yading@10 36 *
yading@10 37 * FFmpeg is free software; you can redistribute it and/or
yading@10 38 * modify it under the terms of the GNU Lesser General Public
yading@10 39 * License as published by the Free Software Foundation; either
yading@10 40 * version 2.1 of the License, or (at your option) any later version.
yading@10 41 *
yading@10 42 * FFmpeg is distributed in the hope that it will be useful,
yading@10 43 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 44 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 45 * Lesser General Public License for more details.
yading@10 46 *
yading@10 47 * You should have received a copy of the GNU Lesser General Public
yading@10 48 * License along with FFmpeg; if not, write to the Free Software
yading@10 49 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 50 */
yading@10 51
yading@10 52 /**
yading@10 53 * @file
yading@10 54 * Reference: libavcodec/aacdec.c
yading@10 55 */
yading@10 56
yading@10 57 #ifndef AVCODEC_MIPS_AACDEC_FLOAT_H
yading@10 58 #define AVCODEC_MIPS_AACDEC_FLOAT_H
yading@10 59
yading@10 60 #include "libavcodec/aac.h"
yading@10 61
yading@10 62 #if HAVE_INLINE_ASM && HAVE_MIPSFPU
yading@10 63 static inline float *VMUL2_mips(float *dst, const float *v, unsigned idx,
yading@10 64 const float *scale)
yading@10 65 {
yading@10 66 float temp0, temp1, temp2;
yading@10 67 int temp3, temp4;
yading@10 68 float *ret;
yading@10 69
yading@10 70 __asm__ volatile(
yading@10 71 "andi %[temp3], %[idx], 15 \n\t"
yading@10 72 "ext %[temp4], %[idx], 4, 4 \n\t"
yading@10 73 "sll %[temp3], %[temp3], 2 \n\t"
yading@10 74 "sll %[temp4], %[temp4], 2 \n\t"
yading@10 75 "lwc1 %[temp2], 0(%[scale]) \n\t"
yading@10 76 "lwxc1 %[temp0], %[temp3](%[v]) \n\t"
yading@10 77 "lwxc1 %[temp1], %[temp4](%[v]) \n\t"
yading@10 78 "mul.s %[temp0], %[temp0], %[temp2] \n\t"
yading@10 79 "mul.s %[temp1], %[temp1], %[temp2] \n\t"
yading@10 80 "addiu %[ret], %[dst], 8 \n\t"
yading@10 81 "swc1 %[temp0], 0(%[dst]) \n\t"
yading@10 82 "swc1 %[temp1], 4(%[dst]) \n\t"
yading@10 83
yading@10 84 : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
yading@10 85 [temp2]"=&f"(temp2), [temp3]"=&r"(temp3),
yading@10 86 [temp4]"=&r"(temp4), [ret]"=&r"(ret)
yading@10 87 : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
yading@10 88 [dst]"r"(dst)
yading@10 89 : "memory"
yading@10 90 );
yading@10 91 return ret;
yading@10 92 }
yading@10 93
yading@10 94 static inline float *VMUL4_mips(float *dst, const float *v, unsigned idx,
yading@10 95 const float *scale)
yading@10 96 {
yading@10 97 int temp0, temp1, temp2, temp3;
yading@10 98 float temp4, temp5, temp6, temp7, temp8;
yading@10 99 float *ret;
yading@10 100
yading@10 101 __asm__ volatile(
yading@10 102 "andi %[temp0], %[idx], 3 \n\t"
yading@10 103 "ext %[temp1], %[idx], 2, 2 \n\t"
yading@10 104 "ext %[temp2], %[idx], 4, 2 \n\t"
yading@10 105 "ext %[temp3], %[idx], 6, 2 \n\t"
yading@10 106 "sll %[temp0], %[temp0], 2 \n\t"
yading@10 107 "sll %[temp1], %[temp1], 2 \n\t"
yading@10 108 "sll %[temp2], %[temp2], 2 \n\t"
yading@10 109 "sll %[temp3], %[temp3], 2 \n\t"
yading@10 110 "lwc1 %[temp4], 0(%[scale]) \n\t"
yading@10 111 "lwxc1 %[temp5], %[temp0](%[v]) \n\t"
yading@10 112 "lwxc1 %[temp6], %[temp1](%[v]) \n\t"
yading@10 113 "lwxc1 %[temp7], %[temp2](%[v]) \n\t"
yading@10 114 "lwxc1 %[temp8], %[temp3](%[v]) \n\t"
yading@10 115 "mul.s %[temp5], %[temp5], %[temp4] \n\t"
yading@10 116 "mul.s %[temp6], %[temp6], %[temp4] \n\t"
yading@10 117 "mul.s %[temp7], %[temp7], %[temp4] \n\t"
yading@10 118 "mul.s %[temp8], %[temp8], %[temp4] \n\t"
yading@10 119 "addiu %[ret], %[dst], 16 \n\t"
yading@10 120 "swc1 %[temp5], 0(%[dst]) \n\t"
yading@10 121 "swc1 %[temp6], 4(%[dst]) \n\t"
yading@10 122 "swc1 %[temp7], 8(%[dst]) \n\t"
yading@10 123 "swc1 %[temp8], 12(%[dst]) \n\t"
yading@10 124
yading@10 125 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
yading@10 126 [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
yading@10 127 [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
yading@10 128 [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
yading@10 129 [temp8]"=&f"(temp8), [ret]"=&r"(ret)
yading@10 130 : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
yading@10 131 [dst]"r"(dst)
yading@10 132 : "memory"
yading@10 133 );
yading@10 134 return ret;
yading@10 135 }
yading@10 136
yading@10 137 static inline float *VMUL2S_mips(float *dst, const float *v, unsigned idx,
yading@10 138 unsigned sign, const float *scale)
yading@10 139 {
yading@10 140 int temp0, temp1, temp2, temp3, temp4, temp5;
yading@10 141 float temp6, temp7, temp8, temp9;
yading@10 142 float *ret;
yading@10 143
yading@10 144 __asm__ volatile(
yading@10 145 "andi %[temp0], %[idx], 15 \n\t"
yading@10 146 "ext %[temp1], %[idx], 4, 4 \n\t"
yading@10 147 "lw %[temp4], 0(%[scale]) \n\t"
yading@10 148 "srl %[temp2], %[sign], 1 \n\t"
yading@10 149 "sll %[temp3], %[sign], 31 \n\t"
yading@10 150 "sll %[temp2], %[temp2], 31 \n\t"
yading@10 151 "sll %[temp0], %[temp0], 2 \n\t"
yading@10 152 "sll %[temp1], %[temp1], 2 \n\t"
yading@10 153 "lwxc1 %[temp8], %[temp0](%[v]) \n\t"
yading@10 154 "lwxc1 %[temp9], %[temp1](%[v]) \n\t"
yading@10 155 "xor %[temp5], %[temp4], %[temp2] \n\t"
yading@10 156 "xor %[temp4], %[temp4], %[temp3] \n\t"
yading@10 157 "mtc1 %[temp5], %[temp6] \n\t"
yading@10 158 "mtc1 %[temp4], %[temp7] \n\t"
yading@10 159 "mul.s %[temp8], %[temp8], %[temp6] \n\t"
yading@10 160 "mul.s %[temp9], %[temp9], %[temp7] \n\t"
yading@10 161 "addiu %[ret], %[dst], 8 \n\t"
yading@10 162 "swc1 %[temp8], 0(%[dst]) \n\t"
yading@10 163 "swc1 %[temp9], 4(%[dst]) \n\t"
yading@10 164
yading@10 165 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
yading@10 166 [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
yading@10 167 [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
yading@10 168 [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
yading@10 169 [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
yading@10 170 [ret]"=&r"(ret)
yading@10 171 : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
yading@10 172 [dst]"r"(dst), [sign]"r"(sign)
yading@10 173 : "memory"
yading@10 174 );
yading@10 175 return ret;
yading@10 176 }
yading@10 177
yading@10 178 static inline float *VMUL4S_mips(float *dst, const float *v, unsigned idx,
yading@10 179 unsigned sign, const float *scale)
yading@10 180 {
yading@10 181 int temp0, temp1, temp2, temp3, temp4;
yading@10 182 float temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17;
yading@10 183 float *ret;
yading@10 184 unsigned int mask = 1U << 31;
yading@10 185
yading@10 186 __asm__ volatile(
yading@10 187 "lw %[temp0], 0(%[scale]) \n\t"
yading@10 188 "and %[temp1], %[idx], 3 \n\t"
yading@10 189 "ext %[temp2], %[idx], 2, 2 \n\t"
yading@10 190 "ext %[temp3], %[idx], 4, 2 \n\t"
yading@10 191 "ext %[temp4], %[idx], 6, 2 \n\t"
yading@10 192 "sll %[temp1], %[temp1], 2 \n\t"
yading@10 193 "sll %[temp2], %[temp2], 2 \n\t"
yading@10 194 "sll %[temp3], %[temp3], 2 \n\t"
yading@10 195 "sll %[temp4], %[temp4], 2 \n\t"
yading@10 196 "lwxc1 %[temp10], %[temp1](%[v]) \n\t"
yading@10 197 "lwxc1 %[temp11], %[temp2](%[v]) \n\t"
yading@10 198 "lwxc1 %[temp12], %[temp3](%[v]) \n\t"
yading@10 199 "lwxc1 %[temp13], %[temp4](%[v]) \n\t"
yading@10 200 "and %[temp1], %[sign], %[mask] \n\t"
yading@10 201 "ext %[temp2], %[idx], 12, 1 \n\t"
yading@10 202 "ext %[temp3], %[idx], 13, 1 \n\t"
yading@10 203 "ext %[temp4], %[idx], 14, 1 \n\t"
yading@10 204 "sllv %[sign], %[sign], %[temp2] \n\t"
yading@10 205 "xor %[temp1], %[temp0], %[temp1] \n\t"
yading@10 206 "and %[temp2], %[sign], %[mask] \n\t"
yading@10 207 "mtc1 %[temp1], %[temp14] \n\t"
yading@10 208 "xor %[temp2], %[temp0], %[temp2] \n\t"
yading@10 209 "sllv %[sign], %[sign], %[temp3] \n\t"
yading@10 210 "mtc1 %[temp2], %[temp15] \n\t"
yading@10 211 "and %[temp3], %[sign], %[mask] \n\t"
yading@10 212 "sllv %[sign], %[sign], %[temp4] \n\t"
yading@10 213 "xor %[temp3], %[temp0], %[temp3] \n\t"
yading@10 214 "and %[temp4], %[sign], %[mask] \n\t"
yading@10 215 "mtc1 %[temp3], %[temp16] \n\t"
yading@10 216 "xor %[temp4], %[temp0], %[temp4] \n\t"
yading@10 217 "mtc1 %[temp4], %[temp17] \n\t"
yading@10 218 "mul.s %[temp10], %[temp10], %[temp14] \n\t"
yading@10 219 "mul.s %[temp11], %[temp11], %[temp15] \n\t"
yading@10 220 "mul.s %[temp12], %[temp12], %[temp16] \n\t"
yading@10 221 "mul.s %[temp13], %[temp13], %[temp17] \n\t"
yading@10 222 "addiu %[ret], %[dst], 16 \n\t"
yading@10 223 "swc1 %[temp10], 0(%[dst]) \n\t"
yading@10 224 "swc1 %[temp11], 4(%[dst]) \n\t"
yading@10 225 "swc1 %[temp12], 8(%[dst]) \n\t"
yading@10 226 "swc1 %[temp13], 12(%[dst]) \n\t"
yading@10 227
yading@10 228 : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
yading@10 229 [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
yading@10 230 [temp4]"=&r"(temp4), [temp10]"=&f"(temp10),
yading@10 231 [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
yading@10 232 [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
yading@10 233 [temp15]"=&f"(temp15), [temp16]"=&f"(temp16),
yading@10 234 [temp17]"=&f"(temp17), [ret]"=&r"(ret),
yading@10 235 [sign]"+r"(sign)
yading@10 236 : [idx]"r"(idx), [scale]"r"(scale), [v]"r"(v),
yading@10 237 [dst]"r"(dst), [mask]"r"(mask)
yading@10 238 : "memory"
yading@10 239 );
yading@10 240 return ret;
yading@10 241 }
yading@10 242
yading@10 243 #define VMUL2 VMUL2_mips
yading@10 244 #define VMUL4 VMUL4_mips
yading@10 245 #define VMUL2S VMUL2S_mips
yading@10 246 #define VMUL4S VMUL4S_mips
yading@10 247 #endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
yading@10 248
yading@10 249 #endif /* AVCODEC_MIPS_AACDEC_FLOAT_H */