yading@10: /* yading@10: * Copyright (c) 2011 Mans Rullgard yading@10: * yading@10: * This file is part of Libav. yading@10: * yading@10: * Libav is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * Libav is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with Libav; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: #ifndef AVCODEC_ARM_DCA_H yading@10: #define AVCODEC_ARM_DCA_H yading@10: yading@10: #include yading@10: yading@10: #include "config.h" yading@10: #include "libavcodec/mathops.h" yading@10: yading@10: #if HAVE_ARMV6_INLINE && AV_GCC_VERSION_AT_LEAST(4,4) && !CONFIG_THUMB yading@10: yading@10: #define decode_blockcodes decode_blockcodes yading@10: static inline int decode_blockcodes(int code1, int code2, int levels, yading@10: int *values) yading@10: { yading@10: int v0, v1, v2, v3, v4, v5; yading@10: yading@10: __asm__ ("smmul %0, %6, %10 \n" yading@10: "smmul %3, %7, %10 \n" yading@10: "smlabb %6, %0, %9, %6 \n" yading@10: "smlabb %7, %3, %9, %7 \n" yading@10: "smmul %1, %0, %10 \n" yading@10: "smmul %4, %3, %10 \n" yading@10: "sub %6, %6, %8, lsr #1 \n" yading@10: "sub %7, %7, %8, lsr #1 \n" yading@10: "smlabb %0, %1, %9, %0 \n" yading@10: "smlabb %3, %4, %9, %3 \n" yading@10: "smmul %2, %1, %10 \n" yading@10: "smmul %5, %4, %10 \n" yading@10: "str %6, [%11, #0] \n" yading@10: "str %7, [%11, #16] \n" yading@10: "sub %0, %0, %8, lsr #1 \n" yading@10: "sub %3, %3, %8, lsr #1 \n" yading@10: "smlabb %1, %2, %9, %1 \n" yading@10: "smlabb %4, %5, %9, %4 \n" yading@10: "smmul %6, %2, %10 \n" yading@10: "smmul %7, %5, %10 \n" yading@10: "str %0, [%11, #4] \n" yading@10: "str %3, [%11, #20] \n" yading@10: "sub %1, %1, %8, lsr #1 \n" yading@10: "sub %4, %4, %8, lsr #1 \n" yading@10: "smlabb %2, %6, %9, %2 \n" yading@10: "smlabb %5, %7, %9, %5 \n" yading@10: "str %1, [%11, #8] \n" yading@10: "str %4, [%11, #24] \n" yading@10: "sub %2, %2, %8, lsr #1 \n" yading@10: "sub %5, %5, %8, lsr #1 \n" yading@10: "str %2, [%11, #12] \n" yading@10: "str %5, [%11, #28] \n" yading@10: : "=&r"(v0), "=&r"(v1), "=&r"(v2), yading@10: "=&r"(v3), "=&r"(v4), "=&r"(v5), yading@10: "+&r"(code1), "+&r"(code2) yading@10: : "r"(levels - 1), "r"(-levels), yading@10: "r"(ff_inverse[levels]), "r"(values) yading@10: : "memory"); yading@10: yading@10: return code1 | code2; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: #if HAVE_NEON_INLINE && HAVE_ASM_MOD_Y yading@10: yading@10: #define int8x8_fmul_int32 int8x8_fmul_int32 yading@10: static inline void int8x8_fmul_int32(float *dst, const int8_t *src, int scale) yading@10: { yading@10: __asm__ ("vcvt.f32.s32 %2, %2, #4 \n" yading@10: "vld1.8 {d0}, [%1,:64] \n" yading@10: "vmovl.s8 q0, d0 \n" yading@10: "vmovl.s16 q1, d1 \n" yading@10: "vmovl.s16 q0, d0 \n" yading@10: "vcvt.f32.s32 q0, q0 \n" yading@10: "vcvt.f32.s32 q1, q1 \n" yading@10: "vmul.f32 q0, q0, %y2 \n" yading@10: "vmul.f32 q1, q1, %y2 \n" yading@10: "vst1.32 {q0-q1}, [%m0,:128] \n" yading@10: : "=Um"(*(float (*)[8])dst) yading@10: : "r"(src), "x"(scale) yading@10: : "d0", "d1", "d2", "d3"); yading@10: } yading@10: yading@10: #endif yading@10: yading@10: #endif /* AVCODEC_ARM_DCA_H */