xref: /openbmc/linux/arch/x86/kvm/cpuid.h (revision 45fe9262)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4 
5 #include "x86.h"
6 #include <asm/cpu.h>
7 #include <asm/processor.h>
8 #include <uapi/asm/kvm_para.h>
9 
10 extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11 void kvm_set_cpu_caps(void);
12 
13 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
15 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
16 					      u32 function, u32 index);
17 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
18 			    struct kvm_cpuid_entry2 __user *entries,
19 			    unsigned int type);
20 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
21 			     struct kvm_cpuid *cpuid,
22 			     struct kvm_cpuid_entry __user *entries);
23 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24 			      struct kvm_cpuid2 *cpuid,
25 			      struct kvm_cpuid_entry2 __user *entries);
26 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27 			      struct kvm_cpuid2 *cpuid,
28 			      struct kvm_cpuid_entry2 __user *entries);
29 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
30 	       u32 *ecx, u32 *edx, bool exact_only);
31 
32 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
33 
34 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
35 {
36 	return vcpu->arch.maxphyaddr;
37 }
38 
39 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
40 {
41 	return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
42 }
43 
44 struct cpuid_reg {
45 	u32 function;
46 	u32 index;
47 	int reg;
48 };
49 
50 static const struct cpuid_reg reverse_cpuid[] = {
51 	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
52 	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
53 	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
54 	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
55 	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
56 	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
57 	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
58 	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
59 	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
60 	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
61 	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
62 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
63 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
64 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
65 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
66 };
67 
68 /*
69  * Reverse CPUID and its derivatives can only be used for hardware-defined
70  * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
71  * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
72  * is nonsensical as the bit number/mask is an arbitrary software-defined value
73  * and can't be used by KVM to query/control guest capabilities.  And obviously
74  * the leaf being queried must have an entry in the lookup table.
75  */
76 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
77 {
78 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
79 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
80 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
81 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
82 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
83 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
84 }
85 
86 /*
87  * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
88  * the hardware defined bit number (stored in bits 4:0) and a software defined
89  * "word" (stored in bits 31:5).  The word is used to index into arrays of
90  * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
91  */
92 static __always_inline u32 __feature_bit(int x86_feature)
93 {
94 	reverse_cpuid_check(x86_feature / 32);
95 	return 1 << (x86_feature & 31);
96 }
97 
98 #define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
99 
100 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
101 {
102 	unsigned int x86_leaf = x86_feature / 32;
103 
104 	reverse_cpuid_check(x86_leaf);
105 	return reverse_cpuid[x86_leaf];
106 }
107 
108 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
109 						  u32 reg)
110 {
111 	switch (reg) {
112 	case CPUID_EAX:
113 		return &entry->eax;
114 	case CPUID_EBX:
115 		return &entry->ebx;
116 	case CPUID_ECX:
117 		return &entry->ecx;
118 	case CPUID_EDX:
119 		return &entry->edx;
120 	default:
121 		BUILD_BUG();
122 		return NULL;
123 	}
124 }
125 
126 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
127 						unsigned int x86_feature)
128 {
129 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
130 
131 	return __cpuid_entry_get_reg(entry, cpuid.reg);
132 }
133 
134 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
135 					   unsigned int x86_feature)
136 {
137 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
138 
139 	return *reg & __feature_bit(x86_feature);
140 }
141 
142 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
143 					    unsigned int x86_feature)
144 {
145 	return cpuid_entry_get(entry, x86_feature);
146 }
147 
148 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
149 					      unsigned int x86_feature)
150 {
151 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
152 
153 	*reg &= ~__feature_bit(x86_feature);
154 }
155 
156 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
157 					    unsigned int x86_feature)
158 {
159 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
160 
161 	*reg |= __feature_bit(x86_feature);
162 }
163 
164 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
165 					       unsigned int x86_feature,
166 					       bool set)
167 {
168 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
169 
170 	/*
171 	 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
172 	 * compiler into using CMOV instead of Jcc when possible.
173 	 */
174 	if (set)
175 		*reg |= __feature_bit(x86_feature);
176 	else
177 		*reg &= ~__feature_bit(x86_feature);
178 }
179 
180 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
181 						 enum cpuid_leafs leaf)
182 {
183 	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
184 
185 	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
186 	*reg = kvm_cpu_caps[leaf];
187 }
188 
189 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
190 						     unsigned int x86_feature)
191 {
192 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
193 	struct kvm_cpuid_entry2 *entry;
194 
195 	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
196 	if (!entry)
197 		return NULL;
198 
199 	return __cpuid_entry_get_reg(entry, cpuid.reg);
200 }
201 
202 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
203 					    unsigned int x86_feature)
204 {
205 	u32 *reg;
206 
207 	reg = guest_cpuid_get_register(vcpu, x86_feature);
208 	if (!reg)
209 		return false;
210 
211 	return *reg & __feature_bit(x86_feature);
212 }
213 
214 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
215 					      unsigned int x86_feature)
216 {
217 	u32 *reg;
218 
219 	reg = guest_cpuid_get_register(vcpu, x86_feature);
220 	if (reg)
221 		*reg &= ~__feature_bit(x86_feature);
222 }
223 
224 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
225 {
226 	struct kvm_cpuid_entry2 *best;
227 
228 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
229 	return best &&
230 	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
231 		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
232 }
233 
234 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
235 {
236 	struct kvm_cpuid_entry2 *best;
237 
238 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
239 	if (!best)
240 		return -1;
241 
242 	return x86_family(best->eax);
243 }
244 
245 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
246 {
247 	struct kvm_cpuid_entry2 *best;
248 
249 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
250 	if (!best)
251 		return -1;
252 
253 	return x86_model(best->eax);
254 }
255 
256 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
257 {
258 	struct kvm_cpuid_entry2 *best;
259 
260 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
261 	if (!best)
262 		return -1;
263 
264 	return x86_stepping(best->eax);
265 }
266 
267 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
268 {
269 	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
270 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
271 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
272 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
273 }
274 
275 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
276 {
277 	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
278 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
279 }
280 
281 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
282 {
283 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
284 }
285 
286 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
287 {
288 	return vcpu->arch.msr_misc_features_enables &
289 		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
290 }
291 
292 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
293 {
294 	unsigned int x86_leaf = x86_feature / 32;
295 
296 	reverse_cpuid_check(x86_leaf);
297 	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
298 }
299 
300 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
301 {
302 	unsigned int x86_leaf = x86_feature / 32;
303 
304 	reverse_cpuid_check(x86_leaf);
305 	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
306 }
307 
308 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
309 {
310 	unsigned int x86_leaf = x86_feature / 32;
311 
312 	reverse_cpuid_check(x86_leaf);
313 	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
314 }
315 
316 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
317 {
318 	return !!kvm_cpu_cap_get(x86_feature);
319 }
320 
321 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
322 {
323 	if (boot_cpu_has(x86_feature))
324 		kvm_cpu_cap_set(x86_feature);
325 }
326 
327 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
328 {
329 	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
330 }
331 
332 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
333 					 unsigned int kvm_feature)
334 {
335 	if (!vcpu->arch.pv_cpuid.enforce)
336 		return true;
337 
338 	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
339 }
340 
341 #endif
342