x86/cpu.c
Go to the documentation of this file.
1 /*
2  * CPU detection code, extracted from mmx.h
3  * (c)1997-99 by H. Dietz and R. Fisher
4  * Converted to C and improved by Fabrice Bellard.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavutil/cpu.h"
29 
30 #if HAVE_YASM
31 
32 #define cpuid(index, eax, ebx, ecx, edx) \
33  ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
34 
35 #define xgetbv(index, eax, edx) \
36  ff_cpu_xgetbv(index, &eax, &edx)
37 
38 #elif HAVE_INLINE_ASM
39 
40 /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
41 #define cpuid(index, eax, ebx, ecx, edx) \
42  __asm__ volatile ( \
43  "mov %%"REG_b", %%"REG_S" \n\t" \
44  "cpuid \n\t" \
45  "xchg %%"REG_b", %%"REG_S \
46  : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
47  : "0" (index))
48 
49 #define xgetbv(index, eax, edx) \
50  __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
51 
52 #define get_eflags(x) \
53  __asm__ volatile ("pushfl \n" \
54  "pop %0 \n" \
55  : "=r"(x))
56 
57 #define set_eflags(x) \
58  __asm__ volatile ("push %0 \n" \
59  "popfl \n" \
60  :: "r"(x))
61 
62 #endif /* HAVE_INLINE_ASM */
63 
64 #if ARCH_X86_64
65 
66 #define cpuid_test() 1
67 
68 #elif HAVE_YASM
69 
70 #define cpuid_test ff_cpu_cpuid_test
71 
72 #elif HAVE_INLINE_ASM
73 
74 static int cpuid_test(void)
75 {
76  x86_reg a, c;
77 
78  /* Check if CPUID is supported by attempting to toggle the ID bit in
79  * the EFLAGS register. */
80  get_eflags(a);
81  set_eflags(a ^ 0x200000);
82  get_eflags(c);
83 
84  return a != c;
85 }
86 #endif
87 
88 /* Function to test if multimedia instructions are supported... */
90 {
91  int rval = 0;
92 
93 #ifdef cpuid
94 
95  int eax, ebx, ecx, edx;
96  int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
97  int family = 0, model = 0;
98  union { int i[3]; char c[12]; } vendor;
99 
100  if (!cpuid_test())
101  return 0; /* CPUID not supported */
102 
103  cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
104 
105  if (max_std_level >= 1) {
106  cpuid(1, eax, ebx, ecx, std_caps);
107  family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
108  model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
109  if (std_caps & (1 << 15))
110  rval |= AV_CPU_FLAG_CMOV;
111  if (std_caps & (1 << 23))
112  rval |= AV_CPU_FLAG_MMX;
113  if (std_caps & (1 << 25))
114  rval |= AV_CPU_FLAG_MMXEXT;
115 #if HAVE_SSE
116  if (std_caps & (1 << 25))
117  rval |= AV_CPU_FLAG_SSE;
118  if (std_caps & (1 << 26))
119  rval |= AV_CPU_FLAG_SSE2;
120  if (ecx & 1)
121  rval |= AV_CPU_FLAG_SSE3;
122  if (ecx & 0x00000200 )
123  rval |= AV_CPU_FLAG_SSSE3;
124  if (ecx & 0x00080000 )
125  rval |= AV_CPU_FLAG_SSE4;
126  if (ecx & 0x00100000 )
127  rval |= AV_CPU_FLAG_SSE42;
128 #if HAVE_AVX
129  /* Check OXSAVE and AVX bits */
130  if ((ecx & 0x18000000) == 0x18000000) {
131  /* Check for OS support */
132  xgetbv(0, eax, edx);
133  if ((eax & 0x6) == 0x6)
134  rval |= AV_CPU_FLAG_AVX;
135  }
136 #endif /* HAVE_AVX */
137 #endif /* HAVE_SSE */
138  }
139 
140  cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
141 
142  if (max_ext_level >= 0x80000001) {
143  cpuid(0x80000001, eax, ebx, ecx, ext_caps);
144  if (ext_caps & (1U << 31))
145  rval |= AV_CPU_FLAG_3DNOW;
146  if (ext_caps & (1 << 30))
147  rval |= AV_CPU_FLAG_3DNOWEXT;
148  if (ext_caps & (1 << 23))
149  rval |= AV_CPU_FLAG_MMX;
150  if (ext_caps & (1 << 22))
151  rval |= AV_CPU_FLAG_MMXEXT;
152 
153  /* Allow for selectively disabling SSE2 functions on AMD processors
154  with SSE2 support but not SSE4a. This includes Athlon64, some
155  Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
156  than SSE2 often enough to utilize this special-case flag.
157  AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
158  so that SSE2 is used unless explicitly disabled by checking
159  AV_CPU_FLAG_SSE2SLOW. */
160  if (!strncmp(vendor.c, "AuthenticAMD", 12) &&
161  rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040)) {
162  rval |= AV_CPU_FLAG_SSE2SLOW;
163  }
164 
165  /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
166  * used unless the OS has AVX support. */
167  if (rval & AV_CPU_FLAG_AVX) {
168  if (ecx & 0x00000800)
169  rval |= AV_CPU_FLAG_XOP;
170  if (ecx & 0x00010000)
171  rval |= AV_CPU_FLAG_FMA4;
172  }
173  }
174 
175  if (!strncmp(vendor.c, "GenuineIntel", 12)) {
176  if (family == 6 && (model == 9 || model == 13 || model == 14)) {
177  /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
178  * 6/14 (core1 "yonah") theoretically support sse2, but it's
179  * usually slower than mmx, so let's just pretend they don't.
180  * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
181  * enabled so that SSE2 is not used unless explicitly enabled
182  * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
183  * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
184  if (rval & AV_CPU_FLAG_SSE2)
186  if (rval & AV_CPU_FLAG_SSE3)
188  }
189  /* The Atom processor has SSSE3 support, which is useful in many cases,
190  * but sometimes the SSSE3 version is slower than the SSE2 equivalent
191  * on the Atom, but is generally faster on other processors supporting
192  * SSSE3. This flag allows for selectively disabling certain SSSE3
193  * functions on the Atom. */
194  if (family == 6 && model == 28)
195  rval |= AV_CPU_FLAG_ATOM;
196  }
197 
198 #endif /* cpuid */
199 
200  return rval;
201 }
#define AV_CPU_FLAG_AVX
AVX functions: requires OS support even if YMM registers aren&#39;t used.
Definition: cpu.h:43
#define AV_CPU_FLAG_SSE
SSE functions.
Definition: cpu.h:33
#define AV_CPU_FLAG_CMOV
supports cmov instruction
Definition: cpu.h:47
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:30
#define AV_CPU_FLAG_ATOM
Atom processor, some SSSE3 instructions are slower.
Definition: cpu.h:40
#define AV_CPU_FLAG_SSE2SLOW
SSE2 supported, but usually not faster.
Definition: cpu.h:35
#define AV_CPU_FLAG_XOP
Bulldozer XOP functions.
Definition: cpu.h:44
#define U(x)
#define AV_CPU_FLAG_SSE42
Nehalem SSE4.2 functions.
Definition: cpu.h:42
#define AV_CPU_FLAG_SSSE3
Conroe SSSE3 functions.
Definition: cpu.h:39
#define AV_CPU_FLAG_SSE3
Prescott SSE3 functions.
Definition: cpu.h:37
int ff_get_cpu_flags_x86(void)
Definition: x86/cpu.c:89
#define AV_CPU_FLAG_3DNOW
AMD 3DNOW.
Definition: cpu.h:32
#define AV_CPU_FLAG_SSE3SLOW
SSE3 supported, but usually not faster.
Definition: cpu.h:38
#define AV_CPU_FLAG_MMX
standard MMX
Definition: cpu.h:29
synthesis window for stochastic i
#define AV_CPU_FLAG_FMA4
Bulldozer FMA4 functions.
Definition: cpu.h:45
#define AV_CPU_FLAG_SSE4
Penryn SSE4.1 functions.
Definition: cpu.h:41
#define AV_CPU_FLAG_3DNOWEXT
AMD 3DNowExt.
Definition: cpu.h:36
static double c[64]
int x86_reg
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:34