xref: /openbmc/linux/arch/x86/kvm/reverse_cpuid.h (revision 276e552e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_REVERSE_CPUID_H
3 #define ARCH_X86_KVM_REVERSE_CPUID_H
4 
5 #include <uapi/asm/kvm.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cpufeatures.h>
8 
9 /*
10  * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
11  * be directly used by KVM.  Note, these word values conflict with the kernel's
12  * "bug" caps, but KVM doesn't use those.
13  */
14 enum kvm_only_cpuid_leafs {
15 	CPUID_12_EAX	 = NCAPINTS,
16 	NR_KVM_CPU_CAPS,
17 
18 	NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
19 };
20 
21 #define KVM_X86_FEATURE(w, f)		((w)*32 + (f))
22 
23 /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
24 #define KVM_X86_FEATURE_SGX1		KVM_X86_FEATURE(CPUID_12_EAX, 0)
25 #define KVM_X86_FEATURE_SGX2		KVM_X86_FEATURE(CPUID_12_EAX, 1)
26 
27 struct cpuid_reg {
28 	u32 function;
29 	u32 index;
30 	int reg;
31 };
32 
33 static const struct cpuid_reg reverse_cpuid[] = {
34 	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
35 	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
36 	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
37 	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
38 	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
39 	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
40 	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
41 	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
42 	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
43 	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
44 	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
45 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
46 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
47 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
48 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
49 	[CPUID_12_EAX]        = {0x00000012, 0, CPUID_EAX},
50 	[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
51 };
52 
53 /*
54  * Reverse CPUID and its derivatives can only be used for hardware-defined
55  * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
56  * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
57  * is nonsensical as the bit number/mask is an arbitrary software-defined value
58  * and can't be used by KVM to query/control guest capabilities.  And obviously
59  * the leaf being queried must have an entry in the lookup table.
60  */
61 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
62 {
63 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
64 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
65 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
66 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
67 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
68 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
69 }
70 
71 /*
72  * Translate feature bits that are scattered in the kernel's cpufeatures word
73  * into KVM feature words that align with hardware's definitions.
74  */
75 static __always_inline u32 __feature_translate(int x86_feature)
76 {
77 	if (x86_feature == X86_FEATURE_SGX1)
78 		return KVM_X86_FEATURE_SGX1;
79 	else if (x86_feature == X86_FEATURE_SGX2)
80 		return KVM_X86_FEATURE_SGX2;
81 
82 	return x86_feature;
83 }
84 
85 static __always_inline u32 __feature_leaf(int x86_feature)
86 {
87 	return __feature_translate(x86_feature) / 32;
88 }
89 
90 /*
91  * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
92  * the hardware defined bit number (stored in bits 4:0) and a software defined
93  * "word" (stored in bits 31:5).  The word is used to index into arrays of
94  * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
95  */
96 static __always_inline u32 __feature_bit(int x86_feature)
97 {
98 	x86_feature = __feature_translate(x86_feature);
99 
100 	reverse_cpuid_check(x86_feature / 32);
101 	return 1 << (x86_feature & 31);
102 }
103 
104 #define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
105 
106 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
107 {
108 	unsigned int x86_leaf = __feature_leaf(x86_feature);
109 
110 	reverse_cpuid_check(x86_leaf);
111 	return reverse_cpuid[x86_leaf];
112 }
113 
114 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
115 						  u32 reg)
116 {
117 	switch (reg) {
118 	case CPUID_EAX:
119 		return &entry->eax;
120 	case CPUID_EBX:
121 		return &entry->ebx;
122 	case CPUID_ECX:
123 		return &entry->ecx;
124 	case CPUID_EDX:
125 		return &entry->edx;
126 	default:
127 		BUILD_BUG();
128 		return NULL;
129 	}
130 }
131 
132 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
133 						unsigned int x86_feature)
134 {
135 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
136 
137 	return __cpuid_entry_get_reg(entry, cpuid.reg);
138 }
139 
140 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
141 					   unsigned int x86_feature)
142 {
143 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
144 
145 	return *reg & __feature_bit(x86_feature);
146 }
147 
148 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
149 					    unsigned int x86_feature)
150 {
151 	return cpuid_entry_get(entry, x86_feature);
152 }
153 
154 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
155 					      unsigned int x86_feature)
156 {
157 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
158 
159 	*reg &= ~__feature_bit(x86_feature);
160 }
161 
162 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
163 					    unsigned int x86_feature)
164 {
165 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
166 
167 	*reg |= __feature_bit(x86_feature);
168 }
169 
170 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
171 					       unsigned int x86_feature,
172 					       bool set)
173 {
174 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
175 
176 	/*
177 	 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
178 	 * compiler into using CMOV instead of Jcc when possible.
179 	 */
180 	if (set)
181 		*reg |= __feature_bit(x86_feature);
182 	else
183 		*reg &= ~__feature_bit(x86_feature);
184 }
185 
186 #endif /* ARCH_X86_KVM_REVERSE_CPUID_H */
187