xref: /openbmc/linux/arch/x86/kvm/cpuid.c (revision 7a6ee0bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  * cpuid support routines
5  *
6  * derived from arch/x86/kvm/x86.c
7  *
8  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9  * Copyright IBM Corporation, 2008
10  */
11 
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
17 
18 #include <asm/processor.h>
19 #include <asm/user.h>
20 #include <asm/fpu/xstate.h>
21 #include <asm/sgx.h>
22 #include "cpuid.h"
23 #include "lapic.h"
24 #include "mmu.h"
25 #include "trace.h"
26 #include "pmu.h"
27 
28 /*
29  * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
30  * aligned to sizeof(unsigned long) because it's not accessed via bitops.
31  */
32 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
33 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
34 
35 u32 xstate_required_size(u64 xstate_bv, bool compacted)
36 {
37 	int feature_bit = 0;
38 	u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
39 
40 	xstate_bv &= XFEATURE_MASK_EXTEND;
41 	while (xstate_bv) {
42 		if (xstate_bv & 0x1) {
43 		        u32 eax, ebx, ecx, edx, offset;
44 		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
45 			/* ECX[1]: 64B alignment in compacted form */
46 			if (compacted)
47 				offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
48 			else
49 				offset = ebx;
50 			ret = max(ret, offset + eax);
51 		}
52 
53 		xstate_bv >>= 1;
54 		feature_bit++;
55 	}
56 
57 	return ret;
58 }
59 
60 /*
61  * This one is tied to SSB in the user API, and not
62  * visible in /proc/cpuinfo.
63  */
64 #define KVM_X86_FEATURE_PSFD		(13*32+28) /* Predictive Store Forwarding Disable */
65 
66 #define F feature_bit
67 #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
68 
69 
70 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
71 	struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
72 {
73 	struct kvm_cpuid_entry2 *e;
74 	int i;
75 
76 	for (i = 0; i < nent; i++) {
77 		e = &entries[i];
78 
79 		if (e->function == function &&
80 		    (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
81 			return e;
82 	}
83 
84 	return NULL;
85 }
86 
87 static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
88 			   struct kvm_cpuid_entry2 *entries,
89 			   int nent)
90 {
91 	struct kvm_cpuid_entry2 *best;
92 	u64 xfeatures;
93 
94 	/*
95 	 * The existing code assumes virtual address is 48-bit or 57-bit in the
96 	 * canonical address checks; exit if it is ever changed.
97 	 */
98 	best = cpuid_entry2_find(entries, nent, 0x80000008, 0);
99 	if (best) {
100 		int vaddr_bits = (best->eax & 0xff00) >> 8;
101 
102 		if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
103 			return -EINVAL;
104 	}
105 
106 	/*
107 	 * Exposing dynamic xfeatures to the guest requires additional
108 	 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
109 	 */
110 	best = cpuid_entry2_find(entries, nent, 0xd, 0);
111 	if (!best)
112 		return 0;
113 
114 	xfeatures = best->eax | ((u64)best->edx << 32);
115 	xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
116 	if (!xfeatures)
117 		return 0;
118 
119 	return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
120 }
121 
122 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
123 static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
124 				 int nent)
125 {
126 	struct kvm_cpuid_entry2 *orig;
127 	int i;
128 
129 	if (nent != vcpu->arch.cpuid_nent)
130 		return -EINVAL;
131 
132 	for (i = 0; i < nent; i++) {
133 		orig = &vcpu->arch.cpuid_entries[i];
134 		if (e2[i].function != orig->function ||
135 		    e2[i].index != orig->index ||
136 		    e2[i].flags != orig->flags ||
137 		    e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
138 		    e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
139 			return -EINVAL;
140 	}
141 
142 	return 0;
143 }
144 
145 static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
146 {
147 	u32 function;
148 	struct kvm_cpuid_entry2 *entry;
149 
150 	vcpu->arch.kvm_cpuid_base = 0;
151 
152 	for_each_possible_hypervisor_cpuid_base(function) {
153 		entry = kvm_find_cpuid_entry(vcpu, function, 0);
154 
155 		if (entry) {
156 			u32 signature[3];
157 
158 			signature[0] = entry->ebx;
159 			signature[1] = entry->ecx;
160 			signature[2] = entry->edx;
161 
162 			BUILD_BUG_ON(sizeof(signature) > sizeof(KVM_SIGNATURE));
163 			if (!memcmp(signature, KVM_SIGNATURE, sizeof(signature))) {
164 				vcpu->arch.kvm_cpuid_base = function;
165 				break;
166 			}
167 		}
168 	}
169 }
170 
171 static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
172 					      struct kvm_cpuid_entry2 *entries, int nent)
173 {
174 	u32 base = vcpu->arch.kvm_cpuid_base;
175 
176 	if (!base)
177 		return NULL;
178 
179 	return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES, 0);
180 }
181 
182 static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
183 {
184 	return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
185 					     vcpu->arch.cpuid_nent);
186 }
187 
188 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
189 {
190 	struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
191 
192 	/*
193 	 * save the feature bitmap to avoid cpuid lookup for every PV
194 	 * operation
195 	 */
196 	if (best)
197 		vcpu->arch.pv_cpuid.features = best->eax;
198 }
199 
200 /*
201  * Calculate guest's supported XCR0 taking into account guest CPUID data and
202  * supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
203  */
204 static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
205 {
206 	struct kvm_cpuid_entry2 *best;
207 
208 	best = cpuid_entry2_find(entries, nent, 0xd, 0);
209 	if (!best)
210 		return 0;
211 
212 	return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
213 }
214 
215 static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
216 				       int nent)
217 {
218 	struct kvm_cpuid_entry2 *best;
219 	u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
220 
221 	best = cpuid_entry2_find(entries, nent, 1, 0);
222 	if (best) {
223 		/* Update OSXSAVE bit */
224 		if (boot_cpu_has(X86_FEATURE_XSAVE))
225 			cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
226 				   kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
227 
228 		cpuid_entry_change(best, X86_FEATURE_APIC,
229 			   vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
230 	}
231 
232 	best = cpuid_entry2_find(entries, nent, 7, 0);
233 	if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
234 		cpuid_entry_change(best, X86_FEATURE_OSPKE,
235 				   kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
236 
237 	best = cpuid_entry2_find(entries, nent, 0xD, 0);
238 	if (best)
239 		best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
240 
241 	best = cpuid_entry2_find(entries, nent, 0xD, 1);
242 	if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
243 		     cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
244 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
245 
246 	best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
247 	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
248 		(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
249 		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
250 
251 	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
252 		best = cpuid_entry2_find(entries, nent, 0x1, 0);
253 		if (best)
254 			cpuid_entry_change(best, X86_FEATURE_MWAIT,
255 					   vcpu->arch.ia32_misc_enable_msr &
256 					   MSR_IA32_MISC_ENABLE_MWAIT);
257 	}
258 
259 	/*
260 	 * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
261 	 * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
262 	 * requested XCR0 value.  The enclave's XFRM must be a subset of XCRO
263 	 * at the time of EENTER, thus adjust the allowed XFRM by the guest's
264 	 * supported XCR0.  Similar to XCR0 handling, FP and SSE are forced to
265 	 * '1' even on CPUs that don't support XSAVE.
266 	 */
267 	best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
268 	if (best) {
269 		best->ecx &= guest_supported_xcr0 & 0xffffffff;
270 		best->edx &= guest_supported_xcr0 >> 32;
271 		best->ecx |= XFEATURE_MASK_FPSSE;
272 	}
273 }
274 
275 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
276 {
277 	__kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
278 }
279 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
280 
281 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
282 {
283 	struct kvm_lapic *apic = vcpu->arch.apic;
284 	struct kvm_cpuid_entry2 *best;
285 	u64 guest_supported_xcr0;
286 
287 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
288 	if (best && apic) {
289 		if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
290 			apic->lapic_timer.timer_mode_mask = 3 << 17;
291 		else
292 			apic->lapic_timer.timer_mode_mask = 1 << 17;
293 
294 		kvm_apic_set_version(vcpu);
295 	}
296 
297 	guest_supported_xcr0 =
298 		cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
299 
300 	vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
301 
302 	kvm_update_pv_runtime(vcpu);
303 
304 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
305 	vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
306 
307 	kvm_pmu_refresh(vcpu);
308 	vcpu->arch.cr4_guest_rsvd_bits =
309 	    __cr4_reserved_bits(guest_cpuid_has, vcpu);
310 
311 	kvm_hv_set_cpuid(vcpu);
312 
313 	/* Invoke the vendor callback only after the above state is updated. */
314 	static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
315 
316 	/*
317 	 * Except for the MMU, which needs to do its thing any vendor specific
318 	 * adjustments to the reserved GPA bits.
319 	 */
320 	kvm_mmu_after_set_cpuid(vcpu);
321 }
322 
323 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
324 {
325 	struct kvm_cpuid_entry2 *best;
326 
327 	best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
328 	if (!best || best->eax < 0x80000008)
329 		goto not_found;
330 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
331 	if (best)
332 		return best->eax & 0xff;
333 not_found:
334 	return 36;
335 }
336 
337 /*
338  * This "raw" version returns the reserved GPA bits without any adjustments for
339  * encryption technologies that usurp bits.  The raw mask should be used if and
340  * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
341  */
342 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
343 {
344 	return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
345 }
346 
347 static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
348                         int nent)
349 {
350 	int r;
351 
352 	__kvm_update_cpuid_runtime(vcpu, e2, nent);
353 
354 	/*
355 	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
356 	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
357 	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
358 	 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
359 	 * the core vCPU model on the fly. It would've been better to forbid any
360 	 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
361 	 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
362 	 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
363 	 * whether the supplied CPUID data is equal to what's already set.
364 	 */
365 	if (vcpu->arch.last_vmentry_cpu != -1) {
366 		r = kvm_cpuid_check_equal(vcpu, e2, nent);
367 		if (r)
368 			return r;
369 
370 		kvfree(e2);
371 		return 0;
372 	}
373 
374 	r = kvm_check_cpuid(vcpu, e2, nent);
375 	if (r)
376 		return r;
377 
378 	kvfree(vcpu->arch.cpuid_entries);
379 	vcpu->arch.cpuid_entries = e2;
380 	vcpu->arch.cpuid_nent = nent;
381 
382 	kvm_update_kvm_cpuid_base(vcpu);
383 	kvm_vcpu_after_set_cpuid(vcpu);
384 
385 	return 0;
386 }
387 
388 /* when an old userspace process fills a new kernel module */
389 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
390 			     struct kvm_cpuid *cpuid,
391 			     struct kvm_cpuid_entry __user *entries)
392 {
393 	int r, i;
394 	struct kvm_cpuid_entry *e = NULL;
395 	struct kvm_cpuid_entry2 *e2 = NULL;
396 
397 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
398 		return -E2BIG;
399 
400 	if (cpuid->nent) {
401 		e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
402 		if (IS_ERR(e))
403 			return PTR_ERR(e);
404 
405 		e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
406 		if (!e2) {
407 			r = -ENOMEM;
408 			goto out_free_cpuid;
409 		}
410 	}
411 	for (i = 0; i < cpuid->nent; i++) {
412 		e2[i].function = e[i].function;
413 		e2[i].eax = e[i].eax;
414 		e2[i].ebx = e[i].ebx;
415 		e2[i].ecx = e[i].ecx;
416 		e2[i].edx = e[i].edx;
417 		e2[i].index = 0;
418 		e2[i].flags = 0;
419 		e2[i].padding[0] = 0;
420 		e2[i].padding[1] = 0;
421 		e2[i].padding[2] = 0;
422 	}
423 
424 	r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
425 	if (r)
426 		kvfree(e2);
427 
428 out_free_cpuid:
429 	kvfree(e);
430 
431 	return r;
432 }
433 
434 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
435 			      struct kvm_cpuid2 *cpuid,
436 			      struct kvm_cpuid_entry2 __user *entries)
437 {
438 	struct kvm_cpuid_entry2 *e2 = NULL;
439 	int r;
440 
441 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
442 		return -E2BIG;
443 
444 	if (cpuid->nent) {
445 		e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
446 		if (IS_ERR(e2))
447 			return PTR_ERR(e2);
448 	}
449 
450 	r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
451 	if (r)
452 		kvfree(e2);
453 
454 	return r;
455 }
456 
457 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
458 			      struct kvm_cpuid2 *cpuid,
459 			      struct kvm_cpuid_entry2 __user *entries)
460 {
461 	int r;
462 
463 	r = -E2BIG;
464 	if (cpuid->nent < vcpu->arch.cpuid_nent)
465 		goto out;
466 	r = -EFAULT;
467 	if (copy_to_user(entries, vcpu->arch.cpuid_entries,
468 			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
469 		goto out;
470 	return 0;
471 
472 out:
473 	cpuid->nent = vcpu->arch.cpuid_nent;
474 	return r;
475 }
476 
477 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
478 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
479 {
480 	const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
481 	struct kvm_cpuid_entry2 entry;
482 
483 	reverse_cpuid_check(leaf);
484 
485 	cpuid_count(cpuid.function, cpuid.index,
486 		    &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
487 
488 	kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
489 }
490 
491 static __always_inline
492 void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
493 {
494 	/* Use kvm_cpu_cap_mask for non-scattered leafs. */
495 	BUILD_BUG_ON(leaf < NCAPINTS);
496 
497 	kvm_cpu_caps[leaf] = mask;
498 
499 	__kvm_cpu_cap_mask(leaf);
500 }
501 
502 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
503 {
504 	/* Use kvm_cpu_cap_init_scattered for scattered leafs. */
505 	BUILD_BUG_ON(leaf >= NCAPINTS);
506 
507 	kvm_cpu_caps[leaf] &= mask;
508 
509 	__kvm_cpu_cap_mask(leaf);
510 }
511 
512 void kvm_set_cpu_caps(void)
513 {
514 #ifdef CONFIG_X86_64
515 	unsigned int f_gbpages = F(GBPAGES);
516 	unsigned int f_lm = F(LM);
517 	unsigned int f_xfd = F(XFD);
518 #else
519 	unsigned int f_gbpages = 0;
520 	unsigned int f_lm = 0;
521 	unsigned int f_xfd = 0;
522 #endif
523 	memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
524 
525 	BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
526 		     sizeof(boot_cpu_data.x86_capability));
527 
528 	memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
529 	       sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
530 
531 	kvm_cpu_cap_mask(CPUID_1_ECX,
532 		/*
533 		 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
534 		 * advertised to guests via CPUID!
535 		 */
536 		F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
537 		0 /* DS-CPL, VMX, SMX, EST */ |
538 		0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
539 		F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
540 		F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
541 		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
542 		0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
543 		F(F16C) | F(RDRAND)
544 	);
545 	/* KVM emulates x2apic in software irrespective of host support. */
546 	kvm_cpu_cap_set(X86_FEATURE_X2APIC);
547 
548 	kvm_cpu_cap_mask(CPUID_1_EDX,
549 		F(FPU) | F(VME) | F(DE) | F(PSE) |
550 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
551 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
552 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
553 		F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
554 		0 /* Reserved, DS, ACPI */ | F(MMX) |
555 		F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
556 		0 /* HTT, TM, Reserved, PBE */
557 	);
558 
559 	kvm_cpu_cap_mask(CPUID_7_0_EBX,
560 		F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
561 		F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
562 		F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
563 		F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
564 		F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
565 		F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
566 		F(AVX512VL));
567 
568 	kvm_cpu_cap_mask(CPUID_7_ECX,
569 		F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
570 		F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
571 		F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
572 		F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
573 		F(SGX_LC) | F(BUS_LOCK_DETECT)
574 	);
575 	/* Set LA57 based on hardware capability. */
576 	if (cpuid_ecx(7) & F(LA57))
577 		kvm_cpu_cap_set(X86_FEATURE_LA57);
578 
579 	/*
580 	 * PKU not yet implemented for shadow paging and requires OSPKE
581 	 * to be set on the host. Clear it if that is not the case
582 	 */
583 	if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
584 		kvm_cpu_cap_clear(X86_FEATURE_PKU);
585 
586 	kvm_cpu_cap_mask(CPUID_7_EDX,
587 		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
588 		F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
589 		F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
590 		F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
591 		F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16)
592 	);
593 
594 	/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
595 	kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
596 	kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
597 
598 	if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
599 		kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
600 	if (boot_cpu_has(X86_FEATURE_STIBP))
601 		kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
602 	if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
603 		kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
604 
605 	kvm_cpu_cap_mask(CPUID_7_1_EAX,
606 		F(AVX_VNNI) | F(AVX512_BF16)
607 	);
608 
609 	kvm_cpu_cap_mask(CPUID_D_1_EAX,
610 		F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
611 	);
612 
613 	kvm_cpu_cap_init_scattered(CPUID_12_EAX,
614 		SF(SGX1) | SF(SGX2)
615 	);
616 
617 	kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
618 		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
619 		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
620 		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
621 		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
622 		F(TOPOEXT) | 0 /* PERFCTR_CORE */
623 	);
624 
625 	kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
626 		F(FPU) | F(VME) | F(DE) | F(PSE) |
627 		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
628 		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
629 		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
630 		F(PAT) | F(PSE36) | 0 /* Reserved */ |
631 		F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
632 		F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
633 		0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
634 	);
635 
636 	if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
637 		kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
638 
639 	kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
640 		F(CLZERO) | F(XSAVEERPTR) |
641 		F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
642 		F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
643 		__feature_bit(KVM_X86_FEATURE_PSFD)
644 	);
645 
646 	/*
647 	 * AMD has separate bits for each SPEC_CTRL bit.
648 	 * arch/x86/kernel/cpu/bugs.c is kind enough to
649 	 * record that in cpufeatures so use them.
650 	 */
651 	if (boot_cpu_has(X86_FEATURE_IBPB))
652 		kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
653 	if (boot_cpu_has(X86_FEATURE_IBRS))
654 		kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
655 	if (boot_cpu_has(X86_FEATURE_STIBP))
656 		kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
657 	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
658 		kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
659 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
660 		kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
661 	/*
662 	 * The preference is to use SPEC CTRL MSR instead of the
663 	 * VIRT_SPEC MSR.
664 	 */
665 	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
666 	    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
667 		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
668 
669 	/*
670 	 * Hide all SVM features by default, SVM will set the cap bits for
671 	 * features it emulates and/or exposes for L1.
672 	 */
673 	kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
674 
675 	kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
676 		0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
677 		F(SME_COHERENT));
678 
679 	kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
680 		F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
681 		F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
682 		F(PMM) | F(PMM_EN)
683 	);
684 
685 	/*
686 	 * Hide RDTSCP and RDPID if either feature is reported as supported but
687 	 * probing MSR_TSC_AUX failed.  This is purely a sanity check and
688 	 * should never happen, but the guest will likely crash if RDTSCP or
689 	 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
690 	 * the past.  For example, the sanity check may fire if this instance of
691 	 * KVM is running as L1 on top of an older, broken KVM.
692 	 */
693 	if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
694 		     kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
695 		     !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
696 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
697 		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
698 	}
699 }
700 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
701 
702 struct kvm_cpuid_array {
703 	struct kvm_cpuid_entry2 *entries;
704 	int maxnent;
705 	int nent;
706 };
707 
708 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
709 					      u32 function, u32 index)
710 {
711 	struct kvm_cpuid_entry2 *entry;
712 
713 	if (array->nent >= array->maxnent)
714 		return NULL;
715 
716 	entry = &array->entries[array->nent++];
717 
718 	entry->function = function;
719 	entry->index = index;
720 	entry->flags = 0;
721 
722 	cpuid_count(entry->function, entry->index,
723 		    &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
724 
725 	switch (function) {
726 	case 4:
727 	case 7:
728 	case 0xb:
729 	case 0xd:
730 	case 0xf:
731 	case 0x10:
732 	case 0x12:
733 	case 0x14:
734 	case 0x17:
735 	case 0x18:
736 	case 0x1d:
737 	case 0x1e:
738 	case 0x1f:
739 	case 0x8000001d:
740 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
741 		break;
742 	}
743 
744 	return entry;
745 }
746 
747 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
748 {
749 	struct kvm_cpuid_entry2 *entry;
750 
751 	if (array->nent >= array->maxnent)
752 		return -E2BIG;
753 
754 	entry = &array->entries[array->nent];
755 	entry->function = func;
756 	entry->index = 0;
757 	entry->flags = 0;
758 
759 	switch (func) {
760 	case 0:
761 		entry->eax = 7;
762 		++array->nent;
763 		break;
764 	case 1:
765 		entry->ecx = F(MOVBE);
766 		++array->nent;
767 		break;
768 	case 7:
769 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
770 		entry->eax = 0;
771 		if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
772 			entry->ecx = F(RDPID);
773 		++array->nent;
774 		break;
775 	default:
776 		break;
777 	}
778 
779 	return 0;
780 }
781 
782 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
783 {
784 	struct kvm_cpuid_entry2 *entry;
785 	int r, i, max_idx;
786 
787 	/* all calls to cpuid_count() should be made on the same cpu */
788 	get_cpu();
789 
790 	r = -E2BIG;
791 
792 	entry = do_host_cpuid(array, function, 0);
793 	if (!entry)
794 		goto out;
795 
796 	switch (function) {
797 	case 0:
798 		/* Limited to the highest leaf implemented in KVM. */
799 		entry->eax = min(entry->eax, 0x1fU);
800 		break;
801 	case 1:
802 		cpuid_entry_override(entry, CPUID_1_EDX);
803 		cpuid_entry_override(entry, CPUID_1_ECX);
804 		break;
805 	case 2:
806 		/*
807 		 * On ancient CPUs, function 2 entries are STATEFUL.  That is,
808 		 * CPUID(function=2, index=0) may return different results each
809 		 * time, with the least-significant byte in EAX enumerating the
810 		 * number of times software should do CPUID(2, 0).
811 		 *
812 		 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
813 		 * idiotic.  Intel's SDM states that EAX & 0xff "will always
814 		 * return 01H. Software should ignore this value and not
815 		 * interpret it as an informational descriptor", while AMD's
816 		 * APM states that CPUID(2) is reserved.
817 		 *
818 		 * WARN if a frankenstein CPU that supports virtualization and
819 		 * a stateful CPUID.0x2 is encountered.
820 		 */
821 		WARN_ON_ONCE((entry->eax & 0xff) > 1);
822 		break;
823 	/* functions 4 and 0x8000001d have additional index. */
824 	case 4:
825 	case 0x8000001d:
826 		/*
827 		 * Read entries until the cache type in the previous entry is
828 		 * zero, i.e. indicates an invalid entry.
829 		 */
830 		for (i = 1; entry->eax & 0x1f; ++i) {
831 			entry = do_host_cpuid(array, function, i);
832 			if (!entry)
833 				goto out;
834 		}
835 		break;
836 	case 6: /* Thermal management */
837 		entry->eax = 0x4; /* allow ARAT */
838 		entry->ebx = 0;
839 		entry->ecx = 0;
840 		entry->edx = 0;
841 		break;
842 	/* function 7 has additional index. */
843 	case 7:
844 		entry->eax = min(entry->eax, 1u);
845 		cpuid_entry_override(entry, CPUID_7_0_EBX);
846 		cpuid_entry_override(entry, CPUID_7_ECX);
847 		cpuid_entry_override(entry, CPUID_7_EDX);
848 
849 		/* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
850 		if (entry->eax == 1) {
851 			entry = do_host_cpuid(array, function, 1);
852 			if (!entry)
853 				goto out;
854 
855 			cpuid_entry_override(entry, CPUID_7_1_EAX);
856 			entry->ebx = 0;
857 			entry->ecx = 0;
858 			entry->edx = 0;
859 		}
860 		break;
861 	case 9:
862 		break;
863 	case 0xa: { /* Architectural Performance Monitoring */
864 		struct x86_pmu_capability cap;
865 		union cpuid10_eax eax;
866 		union cpuid10_edx edx;
867 
868 		perf_get_x86_pmu_capability(&cap);
869 
870 		/*
871 		 * The guest architecture pmu is only supported if the architecture
872 		 * pmu exists on the host and the module parameters allow it.
873 		 */
874 		if (!cap.version || !enable_pmu)
875 			memset(&cap, 0, sizeof(cap));
876 
877 		eax.split.version_id = min(cap.version, 2);
878 		eax.split.num_counters = cap.num_counters_gp;
879 		eax.split.bit_width = cap.bit_width_gp;
880 		eax.split.mask_length = cap.events_mask_len;
881 
882 		edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
883 		edx.split.bit_width_fixed = cap.bit_width_fixed;
884 		if (cap.version)
885 			edx.split.anythread_deprecated = 1;
886 		edx.split.reserved1 = 0;
887 		edx.split.reserved2 = 0;
888 
889 		entry->eax = eax.full;
890 		entry->ebx = cap.events_mask;
891 		entry->ecx = 0;
892 		entry->edx = edx.full;
893 		break;
894 	}
895 	/*
896 	 * Per Intel's SDM, the 0x1f is a superset of 0xb,
897 	 * thus they can be handled by common code.
898 	 */
899 	case 0x1f:
900 	case 0xb:
901 		/*
902 		 * Populate entries until the level type (ECX[15:8]) of the
903 		 * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
904 		 * the starting entry, filled by the primary do_host_cpuid().
905 		 */
906 		for (i = 1; entry->ecx & 0xff00; ++i) {
907 			entry = do_host_cpuid(array, function, i);
908 			if (!entry)
909 				goto out;
910 		}
911 		break;
912 	case 0xd: {
913 		u64 permitted_xcr0 = supported_xcr0 & xstate_get_guest_group_perm();
914 		u64 permitted_xss = supported_xss;
915 
916 		entry->eax &= permitted_xcr0;
917 		entry->ebx = xstate_required_size(permitted_xcr0, false);
918 		entry->ecx = entry->ebx;
919 		entry->edx &= permitted_xcr0 >> 32;
920 		if (!permitted_xcr0)
921 			break;
922 
923 		entry = do_host_cpuid(array, function, 1);
924 		if (!entry)
925 			goto out;
926 
927 		cpuid_entry_override(entry, CPUID_D_1_EAX);
928 		if (entry->eax & (F(XSAVES)|F(XSAVEC)))
929 			entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
930 							  true);
931 		else {
932 			WARN_ON_ONCE(permitted_xss != 0);
933 			entry->ebx = 0;
934 		}
935 		entry->ecx &= permitted_xss;
936 		entry->edx &= permitted_xss >> 32;
937 
938 		for (i = 2; i < 64; ++i) {
939 			bool s_state;
940 			if (permitted_xcr0 & BIT_ULL(i))
941 				s_state = false;
942 			else if (permitted_xss & BIT_ULL(i))
943 				s_state = true;
944 			else
945 				continue;
946 
947 			entry = do_host_cpuid(array, function, i);
948 			if (!entry)
949 				goto out;
950 
951 			/*
952 			 * The supported check above should have filtered out
953 			 * invalid sub-leafs.  Only valid sub-leafs should
954 			 * reach this point, and they should have a non-zero
955 			 * save state size.  Furthermore, check whether the
956 			 * processor agrees with permitted_xcr0/permitted_xss
957 			 * on whether this is an XCR0- or IA32_XSS-managed area.
958 			 */
959 			if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
960 				--array->nent;
961 				continue;
962 			}
963 
964 			if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
965 				entry->ecx &= ~BIT_ULL(2);
966 			entry->edx = 0;
967 		}
968 		break;
969 	}
970 	case 0x12:
971 		/* Intel SGX */
972 		if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
973 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
974 			break;
975 		}
976 
977 		/*
978 		 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
979 		 * and max enclave sizes.   The SGX sub-features and MISCSELECT
980 		 * are restricted by kernel and KVM capabilities (like most
981 		 * feature flags), while enclave size is unrestricted.
982 		 */
983 		cpuid_entry_override(entry, CPUID_12_EAX);
984 		entry->ebx &= SGX_MISC_EXINFO;
985 
986 		entry = do_host_cpuid(array, function, 1);
987 		if (!entry)
988 			goto out;
989 
990 		/*
991 		 * Index 1: SECS.ATTRIBUTES.  ATTRIBUTES are restricted a la
992 		 * feature flags.  Advertise all supported flags, including
993 		 * privileged attributes that require explicit opt-in from
994 		 * userspace.  ATTRIBUTES.XFRM is not adjusted as userspace is
995 		 * expected to derive it from supported XCR0.
996 		 */
997 		entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
998 			      SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY |
999 			      SGX_ATTR_KSS;
1000 		entry->ebx &= 0;
1001 		break;
1002 	/* Intel PT */
1003 	case 0x14:
1004 		if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1005 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1006 			break;
1007 		}
1008 
1009 		for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1010 			if (!do_host_cpuid(array, function, i))
1011 				goto out;
1012 		}
1013 		break;
1014 	/* Intel AMX TILE */
1015 	case 0x1d:
1016 		if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1017 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1018 			break;
1019 		}
1020 
1021 		for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1022 			if (!do_host_cpuid(array, function, i))
1023 				goto out;
1024 		}
1025 		break;
1026 	case 0x1e: /* TMUL information */
1027 		if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1028 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1029 			break;
1030 		}
1031 		break;
1032 	case KVM_CPUID_SIGNATURE: {
1033 		const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1034 		entry->eax = KVM_CPUID_FEATURES;
1035 		entry->ebx = sigptr[0];
1036 		entry->ecx = sigptr[1];
1037 		entry->edx = sigptr[2];
1038 		break;
1039 	}
1040 	case KVM_CPUID_FEATURES:
1041 		entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1042 			     (1 << KVM_FEATURE_NOP_IO_DELAY) |
1043 			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
1044 			     (1 << KVM_FEATURE_ASYNC_PF) |
1045 			     (1 << KVM_FEATURE_PV_EOI) |
1046 			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1047 			     (1 << KVM_FEATURE_PV_UNHALT) |
1048 			     (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1049 			     (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1050 			     (1 << KVM_FEATURE_PV_SEND_IPI) |
1051 			     (1 << KVM_FEATURE_POLL_CONTROL) |
1052 			     (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1053 			     (1 << KVM_FEATURE_ASYNC_PF_INT);
1054 
1055 		if (sched_info_on())
1056 			entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1057 
1058 		entry->ebx = 0;
1059 		entry->ecx = 0;
1060 		entry->edx = 0;
1061 		break;
1062 	case 0x80000000:
1063 		entry->eax = min(entry->eax, 0x8000001f);
1064 		break;
1065 	case 0x80000001:
1066 		cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1067 		cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1068 		break;
1069 	case 0x80000006:
1070 		/* L2 cache and TLB: pass through host info. */
1071 		break;
1072 	case 0x80000007: /* Advanced power management */
1073 		/* invariant TSC is CPUID.80000007H:EDX[8] */
1074 		entry->edx &= (1 << 8);
1075 		/* mask against host */
1076 		entry->edx &= boot_cpu_data.x86_power;
1077 		entry->eax = entry->ebx = entry->ecx = 0;
1078 		break;
1079 	case 0x80000008: {
1080 		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1081 		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1082 		unsigned phys_as = entry->eax & 0xff;
1083 
1084 		/*
1085 		 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1086 		 * the guest operates in the same PA space as the host, i.e.
1087 		 * reductions in MAXPHYADDR for memory encryption affect shadow
1088 		 * paging, too.
1089 		 *
1090 		 * If TDP is enabled but an explicit guest MAXPHYADDR is not
1091 		 * provided, use the raw bare metal MAXPHYADDR as reductions to
1092 		 * the HPAs do not affect GPAs.
1093 		 */
1094 		if (!tdp_enabled)
1095 			g_phys_as = boot_cpu_data.x86_phys_bits;
1096 		else if (!g_phys_as)
1097 			g_phys_as = phys_as;
1098 
1099 		entry->eax = g_phys_as | (virt_as << 8);
1100 		entry->edx = 0;
1101 		cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1102 		break;
1103 	}
1104 	case 0x8000000A:
1105 		if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1106 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1107 			break;
1108 		}
1109 		entry->eax = 1; /* SVM revision 1 */
1110 		entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1111 				   ASID emulation to nested SVM */
1112 		entry->ecx = 0; /* Reserved */
1113 		cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1114 		break;
1115 	case 0x80000019:
1116 		entry->ecx = entry->edx = 0;
1117 		break;
1118 	case 0x8000001a:
1119 	case 0x8000001e:
1120 		break;
1121 	case 0x8000001F:
1122 		if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1123 			entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1124 		} else {
1125 			cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1126 
1127 			/*
1128 			 * Enumerate '0' for "PA bits reduction", the adjusted
1129 			 * MAXPHYADDR is enumerated directly (see 0x80000008).
1130 			 */
1131 			entry->ebx &= ~GENMASK(11, 6);
1132 		}
1133 		break;
1134 	/*Add support for Centaur's CPUID instruction*/
1135 	case 0xC0000000:
1136 		/*Just support up to 0xC0000004 now*/
1137 		entry->eax = min(entry->eax, 0xC0000004);
1138 		break;
1139 	case 0xC0000001:
1140 		cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1141 		break;
1142 	case 3: /* Processor serial number */
1143 	case 5: /* MONITOR/MWAIT */
1144 	case 0xC0000002:
1145 	case 0xC0000003:
1146 	case 0xC0000004:
1147 	default:
1148 		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1149 		break;
1150 	}
1151 
1152 	r = 0;
1153 
1154 out:
1155 	put_cpu();
1156 
1157 	return r;
1158 }
1159 
1160 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1161 			 unsigned int type)
1162 {
1163 	if (type == KVM_GET_EMULATED_CPUID)
1164 		return __do_cpuid_func_emulated(array, func);
1165 
1166 	return __do_cpuid_func(array, func);
1167 }
1168 
1169 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1170 
1171 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1172 			  unsigned int type)
1173 {
1174 	u32 limit;
1175 	int r;
1176 
1177 	if (func == CENTAUR_CPUID_SIGNATURE &&
1178 	    boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1179 		return 0;
1180 
1181 	r = do_cpuid_func(array, func, type);
1182 	if (r)
1183 		return r;
1184 
1185 	limit = array->entries[array->nent - 1].eax;
1186 	for (func = func + 1; func <= limit; ++func) {
1187 		r = do_cpuid_func(array, func, type);
1188 		if (r)
1189 			break;
1190 	}
1191 
1192 	return r;
1193 }
1194 
1195 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1196 				 __u32 num_entries, unsigned int ioctl_type)
1197 {
1198 	int i;
1199 	__u32 pad[3];
1200 
1201 	if (ioctl_type != KVM_GET_EMULATED_CPUID)
1202 		return false;
1203 
1204 	/*
1205 	 * We want to make sure that ->padding is being passed clean from
1206 	 * userspace in case we want to use it for something in the future.
1207 	 *
1208 	 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1209 	 * have to give ourselves satisfied only with the emulated side. /me
1210 	 * sheds a tear.
1211 	 */
1212 	for (i = 0; i < num_entries; i++) {
1213 		if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1214 			return true;
1215 
1216 		if (pad[0] || pad[1] || pad[2])
1217 			return true;
1218 	}
1219 	return false;
1220 }
1221 
1222 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1223 			    struct kvm_cpuid_entry2 __user *entries,
1224 			    unsigned int type)
1225 {
1226 	static const u32 funcs[] = {
1227 		0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1228 	};
1229 
1230 	struct kvm_cpuid_array array = {
1231 		.nent = 0,
1232 	};
1233 	int r, i;
1234 
1235 	if (cpuid->nent < 1)
1236 		return -E2BIG;
1237 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1238 		cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1239 
1240 	if (sanity_check_entries(entries, cpuid->nent, type))
1241 		return -EINVAL;
1242 
1243 	array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
1244 					   cpuid->nent));
1245 	if (!array.entries)
1246 		return -ENOMEM;
1247 
1248 	array.maxnent = cpuid->nent;
1249 
1250 	for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1251 		r = get_cpuid_func(&array, funcs[i], type);
1252 		if (r)
1253 			goto out_free;
1254 	}
1255 	cpuid->nent = array.nent;
1256 
1257 	if (copy_to_user(entries, array.entries,
1258 			 array.nent * sizeof(struct kvm_cpuid_entry2)))
1259 		r = -EFAULT;
1260 
1261 out_free:
1262 	vfree(array.entries);
1263 	return r;
1264 }
1265 
1266 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1267 					      u32 function, u32 index)
1268 {
1269 	return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1270 				 function, index);
1271 }
1272 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1273 
1274 /*
1275  * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1276  * highest basic leaf (i.e. CPUID.0H:EAX) were requested.  AMD CPUID semantics
1277  * returns all zeroes for any undefined leaf, whether or not the leaf is in
1278  * range.  Centaur/VIA follows Intel semantics.
1279  *
1280  * A leaf is considered out-of-range if its function is higher than the maximum
1281  * supported leaf of its associated class or if its associated class does not
1282  * exist.
1283  *
1284  * There are three primary classes to be considered, with their respective
1285  * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive.  A primary
1286  * class exists if a guest CPUID entry for its <base> leaf exists.  For a given
1287  * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1288  *
1289  *  - Basic:      0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1290  *  - Hypervisor: 0x40000000 - 0x4fffffff
1291  *  - Extended:   0x80000000 - 0xbfffffff
1292  *  - Centaur:    0xc0000000 - 0xcfffffff
1293  *
1294  * The Hypervisor class is further subdivided into sub-classes that each act as
1295  * their own independent class associated with a 0x100 byte range.  E.g. if Qemu
1296  * is advertising support for both HyperV and KVM, the resulting Hypervisor
1297  * CPUID sub-classes are:
1298  *
1299  *  - HyperV:     0x40000000 - 0x400000ff
1300  *  - KVM:        0x40000100 - 0x400001ff
1301  */
1302 static struct kvm_cpuid_entry2 *
1303 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1304 {
1305 	struct kvm_cpuid_entry2 *basic, *class;
1306 	u32 function = *fn_ptr;
1307 
1308 	basic = kvm_find_cpuid_entry(vcpu, 0, 0);
1309 	if (!basic)
1310 		return NULL;
1311 
1312 	if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1313 	    is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1314 		return NULL;
1315 
1316 	if (function >= 0x40000000 && function <= 0x4fffffff)
1317 		class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
1318 	else if (function >= 0xc0000000)
1319 		class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
1320 	else
1321 		class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
1322 
1323 	if (class && function <= class->eax)
1324 		return NULL;
1325 
1326 	/*
1327 	 * Leaf specific adjustments are also applied when redirecting to the
1328 	 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1329 	 * entry for CPUID.0xb.index (see below), then the output value for EDX
1330 	 * needs to be pulled from CPUID.0xb.1.
1331 	 */
1332 	*fn_ptr = basic->eax;
1333 
1334 	/*
1335 	 * The class does not exist or the requested function is out of range;
1336 	 * the effective CPUID entry is the max basic leaf.  Note, the index of
1337 	 * the original requested leaf is observed!
1338 	 */
1339 	return kvm_find_cpuid_entry(vcpu, basic->eax, index);
1340 }
1341 
1342 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1343 	       u32 *ecx, u32 *edx, bool exact_only)
1344 {
1345 	u32 orig_function = *eax, function = *eax, index = *ecx;
1346 	struct kvm_cpuid_entry2 *entry;
1347 	bool exact, used_max_basic = false;
1348 
1349 	entry = kvm_find_cpuid_entry(vcpu, function, index);
1350 	exact = !!entry;
1351 
1352 	if (!entry && !exact_only) {
1353 		entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1354 		used_max_basic = !!entry;
1355 	}
1356 
1357 	if (entry) {
1358 		*eax = entry->eax;
1359 		*ebx = entry->ebx;
1360 		*ecx = entry->ecx;
1361 		*edx = entry->edx;
1362 		if (function == 7 && index == 0) {
1363 			u64 data;
1364 		        if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1365 			    (data & TSX_CTRL_CPUID_CLEAR))
1366 				*ebx &= ~(F(RTM) | F(HLE));
1367 		}
1368 	} else {
1369 		*eax = *ebx = *ecx = *edx = 0;
1370 		/*
1371 		 * When leaf 0BH or 1FH is defined, CL is pass-through
1372 		 * and EDX is always the x2APIC ID, even for undefined
1373 		 * subleaves. Index 1 will exist iff the leaf is
1374 		 * implemented, so we pass through CL iff leaf 1
1375 		 * exists. EDX can be copied from any existing index.
1376 		 */
1377 		if (function == 0xb || function == 0x1f) {
1378 			entry = kvm_find_cpuid_entry(vcpu, function, 1);
1379 			if (entry) {
1380 				*ecx = index & 0xff;
1381 				*edx = entry->edx;
1382 			}
1383 		}
1384 	}
1385 	trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1386 			used_max_basic);
1387 	return exact;
1388 }
1389 EXPORT_SYMBOL_GPL(kvm_cpuid);
1390 
1391 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1392 {
1393 	u32 eax, ebx, ecx, edx;
1394 
1395 	if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1396 		return 1;
1397 
1398 	eax = kvm_rax_read(vcpu);
1399 	ecx = kvm_rcx_read(vcpu);
1400 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1401 	kvm_rax_write(vcpu, eax);
1402 	kvm_rbx_write(vcpu, ebx);
1403 	kvm_rcx_write(vcpu, ecx);
1404 	kvm_rdx_write(vcpu, edx);
1405 	return kvm_skip_emulated_instruction(vcpu);
1406 }
1407 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1408