yading@10
|
1 /*
|
yading@10
|
2 * simple math operations
|
yading@10
|
3 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 #ifndef AVCODEC_X86_MATHOPS_H
|
yading@10
|
23 #define AVCODEC_X86_MATHOPS_H
|
yading@10
|
24
|
yading@10
|
25 #include "config.h"
|
yading@10
|
26 #include "libavutil/common.h"
|
yading@10
|
27
|
yading@10
|
28 #if HAVE_INLINE_ASM
|
yading@10
|
29
|
yading@10
|
30 #if ARCH_X86_32
|
yading@10
|
31
|
yading@10
|
32 #define MULL MULL
|
yading@10
|
33 static av_always_inline av_const int MULL(int a, int b, unsigned shift)
|
yading@10
|
34 {
|
yading@10
|
35 int rt, dummy;
|
yading@10
|
36 __asm__ (
|
yading@10
|
37 "imull %3 \n\t"
|
yading@10
|
38 "shrdl %4, %%edx, %%eax \n\t"
|
yading@10
|
39 :"=a"(rt), "=d"(dummy)
|
yading@10
|
40 :"a"(a), "rm"(b), "ci"((uint8_t)shift)
|
yading@10
|
41 );
|
yading@10
|
42 return rt;
|
yading@10
|
43 }
|
yading@10
|
44
|
yading@10
|
45 #define MULH MULH
|
yading@10
|
46 static av_always_inline av_const int MULH(int a, int b)
|
yading@10
|
47 {
|
yading@10
|
48 int rt, dummy;
|
yading@10
|
49 __asm__ (
|
yading@10
|
50 "imull %3"
|
yading@10
|
51 :"=d"(rt), "=a"(dummy)
|
yading@10
|
52 :"a"(a), "rm"(b)
|
yading@10
|
53 );
|
yading@10
|
54 return rt;
|
yading@10
|
55 }
|
yading@10
|
56
|
yading@10
|
57 #define MUL64 MUL64
|
yading@10
|
58 static av_always_inline av_const int64_t MUL64(int a, int b)
|
yading@10
|
59 {
|
yading@10
|
60 int64_t rt;
|
yading@10
|
61 __asm__ (
|
yading@10
|
62 "imull %2"
|
yading@10
|
63 :"=A"(rt)
|
yading@10
|
64 :"a"(a), "rm"(b)
|
yading@10
|
65 );
|
yading@10
|
66 return rt;
|
yading@10
|
67 }
|
yading@10
|
68
|
yading@10
|
69 #endif /* ARCH_X86_32 */
|
yading@10
|
70
|
yading@10
|
71 #if HAVE_CMOV
|
yading@10
|
72 /* median of 3 */
|
yading@10
|
73 #define mid_pred mid_pred
|
yading@10
|
74 static inline av_const int mid_pred(int a, int b, int c)
|
yading@10
|
75 {
|
yading@10
|
76 int i=b;
|
yading@10
|
77 __asm__ volatile(
|
yading@10
|
78 "cmp %2, %1 \n\t"
|
yading@10
|
79 "cmovg %1, %0 \n\t"
|
yading@10
|
80 "cmovg %2, %1 \n\t"
|
yading@10
|
81 "cmp %3, %1 \n\t"
|
yading@10
|
82 "cmovl %3, %1 \n\t"
|
yading@10
|
83 "cmp %1, %0 \n\t"
|
yading@10
|
84 "cmovg %1, %0 \n\t"
|
yading@10
|
85 :"+&r"(i), "+&r"(a)
|
yading@10
|
86 :"r"(b), "r"(c)
|
yading@10
|
87 );
|
yading@10
|
88 return i;
|
yading@10
|
89 }
|
yading@10
|
90 #endif
|
yading@10
|
91
|
yading@10
|
92 #if HAVE_CMOV
|
yading@10
|
93 #define COPY3_IF_LT(x, y, a, b, c, d)\
|
yading@10
|
94 __asm__ volatile(\
|
yading@10
|
95 "cmpl %0, %3 \n\t"\
|
yading@10
|
96 "cmovl %3, %0 \n\t"\
|
yading@10
|
97 "cmovl %4, %1 \n\t"\
|
yading@10
|
98 "cmovl %5, %2 \n\t"\
|
yading@10
|
99 : "+&r" (x), "+&r" (a), "+r" (c)\
|
yading@10
|
100 : "r" (y), "r" (b), "r" (d)\
|
yading@10
|
101 );
|
yading@10
|
102 #endif
|
yading@10
|
103
|
yading@10
|
104 #define MASK_ABS(mask, level) \
|
yading@10
|
105 __asm__ ("cltd \n\t" \
|
yading@10
|
106 "xorl %1, %0 \n\t" \
|
yading@10
|
107 "subl %1, %0 \n\t" \
|
yading@10
|
108 : "+a"(level), "=&d"(mask))
|
yading@10
|
109
|
yading@10
|
110 // avoid +32 for shift optimization (gcc should do that ...)
|
yading@10
|
111 #define NEG_SSR32 NEG_SSR32
|
yading@10
|
112 static inline int32_t NEG_SSR32( int32_t a, int8_t s){
|
yading@10
|
113 __asm__ ("sarl %1, %0\n\t"
|
yading@10
|
114 : "+r" (a)
|
yading@10
|
115 : "ic" ((uint8_t)(-s))
|
yading@10
|
116 );
|
yading@10
|
117 return a;
|
yading@10
|
118 }
|
yading@10
|
119
|
yading@10
|
120 #define NEG_USR32 NEG_USR32
|
yading@10
|
121 static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
|
yading@10
|
122 __asm__ ("shrl %1, %0\n\t"
|
yading@10
|
123 : "+r" (a)
|
yading@10
|
124 : "ic" ((uint8_t)(-s))
|
yading@10
|
125 );
|
yading@10
|
126 return a;
|
yading@10
|
127 }
|
yading@10
|
128
|
yading@10
|
129 #endif /* HAVE_INLINE_ASM */
|
yading@10
|
130 #endif /* AVCODEC_X86_MATHOPS_H */
|