xref: /openbmc/qemu/util/cpuinfo-i386.c (revision b18236897ca15c3db1506d8edb9a191dfe51429c)
1 /*
2  * SPDX-License-Identifier: GPL-2.0-or-later
3  * Host specific cpu identification for x86.
4  */
5 
6 #include "qemu/osdep.h"
7 #include "host/cpuinfo.h"
8 #ifdef CONFIG_CPUID_H
9 # include "qemu/cpuid.h"
10 #endif
11 
12 unsigned cpuinfo;
13 
14 /* Called both as constructor and (possibly) via other constructors. */
15 unsigned __attribute__((constructor)) cpuinfo_init(void)
16 {
17     unsigned info = cpuinfo;
18 
19     if (info) {
20         return info;
21     }
22 
23 #ifdef CONFIG_CPUID_H
24     unsigned max, a, b, c, d, b7 = 0, c7 = 0;
25 
26     max = __get_cpuid_max(0, 0);
27 
28     if (max >= 7) {
29         __cpuid_count(7, 0, a, b7, c7, d);
30         info |= (b7 & bit_BMI ? CPUINFO_BMI1 : 0);
31         info |= (b7 & bit_BMI2 ? CPUINFO_BMI2 : 0);
32     }
33 
34     if (max >= 1) {
35         __cpuid(1, a, b, c, d);
36 
37         info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0);
38         info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0);
39         info |= (c & bit_PCLMUL ? CPUINFO_PCLMUL : 0);
40 
41         /* Our AES support requires PSHUFB as well. */
42         info |= ((c & bit_AES) && (c & bit_SSSE3) ? CPUINFO_AES : 0);
43 
44         /* For AVX features, we must check available and usable. */
45         if ((c & bit_AVX) && (c & bit_OSXSAVE)) {
46             unsigned bv = xgetbv_low(0);
47 
48             if ((bv & 6) == 6) {
49                 info |= CPUINFO_AVX1;
50                 info |= (b7 & bit_AVX2 ? CPUINFO_AVX2 : 0);
51 
52                 if ((bv & 0xe0) == 0xe0) {
53                     info |= (b7 & bit_AVX512F ? CPUINFO_AVX512F : 0);
54                     info |= (b7 & bit_AVX512VL ? CPUINFO_AVX512VL : 0);
55                     info |= (b7 & bit_AVX512BW ? CPUINFO_AVX512BW : 0);
56                     info |= (b7 & bit_AVX512DQ ? CPUINFO_AVX512DQ : 0);
57                     info |= (c7 & bit_AVX512VBMI2 ? CPUINFO_AVX512VBMI2 : 0);
58                 }
59 
60                 /*
61                  * The Intel SDM has added:
62                  *   Processors that enumerate support for Intel® AVX
63                  *   (by setting the feature flag CPUID.01H:ECX.AVX[bit 28])
64                  *   guarantee that the 16-byte memory operations performed
65                  *   by the following instructions will always be carried
66                  *   out atomically:
67                  *   - MOVAPD, MOVAPS, and MOVDQA.
68                  *   - VMOVAPD, VMOVAPS, and VMOVDQA when encoded with VEX.128.
69                  *   - VMOVAPD, VMOVAPS, VMOVDQA32, and VMOVDQA64 when encoded
70                  *     with EVEX.128 and k0 (masking disabled).
71                  * Note that these instructions require the linear addresses
72                  * of their memory operands to be 16-byte aligned.
73                  *
74                  * AMD has provided an even stronger guarantee that processors
75                  * with AVX provide 16-byte atomicity for all cacheable,
76                  * naturally aligned single loads and stores, e.g. MOVDQU.
77                  *
78                  * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104688
79                  */
80                 __cpuid(0, a, b, c, d);
81                 if (c == signature_INTEL_ecx) {
82                     info |= CPUINFO_ATOMIC_VMOVDQA;
83                 } else if (c == signature_AMD_ecx) {
84                     info |= CPUINFO_ATOMIC_VMOVDQA | CPUINFO_ATOMIC_VMOVDQU;
85                 }
86             }
87         }
88     }
89 
90     max = __get_cpuid_max(0x8000000, 0);
91     if (max >= 1) {
92         __cpuid(0x80000001, a, b, c, d);
93         info |= (c & bit_LZCNT ? CPUINFO_LZCNT : 0);
94     }
95 #endif
96 
97     info |= CPUINFO_ALWAYS;
98     cpuinfo = info;
99     return info;
100 }
101