1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * cpuid support routines 5 * 6 * derived from arch/x86/kvm/x86.c 7 * 8 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 9 * Copyright IBM Corporation, 2008 10 */ 11 12 #include <linux/kvm_host.h> 13 #include <linux/export.h> 14 #include <linux/vmalloc.h> 15 #include <linux/uaccess.h> 16 #include <linux/sched/stat.h> 17 18 #include <asm/processor.h> 19 #include <asm/user.h> 20 #include <asm/fpu/xstate.h> 21 #include <asm/sgx.h> 22 #include "cpuid.h" 23 #include "lapic.h" 24 #include "mmu.h" 25 #include "trace.h" 26 #include "pmu.h" 27 28 /* 29 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be 30 * aligned to sizeof(unsigned long) because it's not accessed via bitops. 31 */ 32 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; 33 EXPORT_SYMBOL_GPL(kvm_cpu_caps); 34 35 static u32 xstate_required_size(u64 xstate_bv, bool compacted) 36 { 37 int feature_bit = 0; 38 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 39 40 xstate_bv &= XFEATURE_MASK_EXTEND; 41 while (xstate_bv) { 42 if (xstate_bv & 0x1) { 43 u32 eax, ebx, ecx, edx, offset; 44 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); 45 offset = compacted ? ret : ebx; 46 ret = max(ret, offset + eax); 47 } 48 49 xstate_bv >>= 1; 50 feature_bit++; 51 } 52 53 return ret; 54 } 55 56 #define F feature_bit 57 #define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) 58 59 static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( 60 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) 61 { 62 struct kvm_cpuid_entry2 *e; 63 int i; 64 65 for (i = 0; i < nent; i++) { 66 e = &entries[i]; 67 68 if (e->function == function && (e->index == index || 69 !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) 70 return e; 71 } 72 73 return NULL; 74 } 75 76 static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent) 77 { 78 struct kvm_cpuid_entry2 *best; 79 80 /* 81 * The existing code assumes virtual address is 48-bit or 57-bit in the 82 * canonical address checks; exit if it is ever changed. 83 */ 84 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); 85 if (best) { 86 int vaddr_bits = (best->eax & 0xff00) >> 8; 87 88 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) 89 return -EINVAL; 90 } 91 92 return 0; 93 } 94 95 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) 96 { 97 struct kvm_cpuid_entry2 *best; 98 99 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); 100 101 /* 102 * save the feature bitmap to avoid cpuid lookup for every PV 103 * operation 104 */ 105 if (best) 106 vcpu->arch.pv_cpuid.features = best->eax; 107 } 108 109 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) 110 { 111 struct kvm_cpuid_entry2 *best; 112 113 best = kvm_find_cpuid_entry(vcpu, 1, 0); 114 if (best) { 115 /* Update OSXSAVE bit */ 116 if (boot_cpu_has(X86_FEATURE_XSAVE)) 117 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, 118 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)); 119 120 cpuid_entry_change(best, X86_FEATURE_APIC, 121 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); 122 } 123 124 best = kvm_find_cpuid_entry(vcpu, 7, 0); 125 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) 126 cpuid_entry_change(best, X86_FEATURE_OSPKE, 127 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)); 128 129 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); 130 if (best) 131 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); 132 133 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); 134 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || 135 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) 136 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 137 138 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); 139 if (kvm_hlt_in_guest(vcpu->kvm) && best && 140 (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) 141 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); 142 143 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { 144 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 145 if (best) 146 cpuid_entry_change(best, X86_FEATURE_MWAIT, 147 vcpu->arch.ia32_misc_enable_msr & 148 MSR_IA32_MISC_ENABLE_MWAIT); 149 } 150 } 151 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); 152 153 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 154 { 155 struct kvm_lapic *apic = vcpu->arch.apic; 156 struct kvm_cpuid_entry2 *best; 157 158 best = kvm_find_cpuid_entry(vcpu, 1, 0); 159 if (best && apic) { 160 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) 161 apic->lapic_timer.timer_mode_mask = 3 << 17; 162 else 163 apic->lapic_timer.timer_mode_mask = 1 << 17; 164 165 kvm_apic_set_version(vcpu); 166 } 167 168 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); 169 if (!best) 170 vcpu->arch.guest_supported_xcr0 = 0; 171 else 172 vcpu->arch.guest_supported_xcr0 = 173 (best->eax | ((u64)best->edx << 32)) & supported_xcr0; 174 175 /* 176 * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate 177 * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's 178 * requested XCR0 value. The enclave's XFRM must be a subset of XCRO 179 * at the time of EENTER, thus adjust the allowed XFRM by the guest's 180 * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to 181 * '1' even on CPUs that don't support XSAVE. 182 */ 183 best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1); 184 if (best) { 185 best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff; 186 best->edx &= vcpu->arch.guest_supported_xcr0 >> 32; 187 best->ecx |= XFEATURE_MASK_FPSSE; 188 } 189 190 kvm_update_pv_runtime(vcpu); 191 192 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 193 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 194 195 kvm_pmu_refresh(vcpu); 196 vcpu->arch.cr4_guest_rsvd_bits = 197 __cr4_reserved_bits(guest_cpuid_has, vcpu); 198 199 kvm_hv_set_cpuid(vcpu); 200 201 /* Invoke the vendor callback only after the above state is updated. */ 202 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); 203 204 /* 205 * Except for the MMU, which needs to be reset after any vendor 206 * specific adjustments to the reserved GPA bits. 207 */ 208 kvm_mmu_reset_context(vcpu); 209 } 210 211 static int is_efer_nx(void) 212 { 213 return host_efer & EFER_NX; 214 } 215 216 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) 217 { 218 int i; 219 struct kvm_cpuid_entry2 *e, *entry; 220 221 entry = NULL; 222 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 223 e = &vcpu->arch.cpuid_entries[i]; 224 if (e->function == 0x80000001) { 225 entry = e; 226 break; 227 } 228 } 229 if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) { 230 cpuid_entry_clear(entry, X86_FEATURE_NX); 231 printk(KERN_INFO "kvm: guest NX capability removed\n"); 232 } 233 } 234 235 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) 236 { 237 struct kvm_cpuid_entry2 *best; 238 239 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); 240 if (!best || best->eax < 0x80000008) 241 goto not_found; 242 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); 243 if (best) 244 return best->eax & 0xff; 245 not_found: 246 return 36; 247 } 248 249 /* 250 * This "raw" version returns the reserved GPA bits without any adjustments for 251 * encryption technologies that usurp bits. The raw mask should be used if and 252 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. 253 */ 254 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) 255 { 256 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); 257 } 258 259 /* when an old userspace process fills a new kernel module */ 260 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 261 struct kvm_cpuid *cpuid, 262 struct kvm_cpuid_entry __user *entries) 263 { 264 int r, i; 265 struct kvm_cpuid_entry *e = NULL; 266 struct kvm_cpuid_entry2 *e2 = NULL; 267 268 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 269 return -E2BIG; 270 271 if (cpuid->nent) { 272 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent)); 273 if (IS_ERR(e)) 274 return PTR_ERR(e); 275 276 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); 277 if (!e2) { 278 r = -ENOMEM; 279 goto out_free_cpuid; 280 } 281 } 282 for (i = 0; i < cpuid->nent; i++) { 283 e2[i].function = e[i].function; 284 e2[i].eax = e[i].eax; 285 e2[i].ebx = e[i].ebx; 286 e2[i].ecx = e[i].ecx; 287 e2[i].edx = e[i].edx; 288 e2[i].index = 0; 289 e2[i].flags = 0; 290 e2[i].padding[0] = 0; 291 e2[i].padding[1] = 0; 292 e2[i].padding[2] = 0; 293 } 294 295 r = kvm_check_cpuid(e2, cpuid->nent); 296 if (r) { 297 kvfree(e2); 298 goto out_free_cpuid; 299 } 300 301 kvfree(vcpu->arch.cpuid_entries); 302 vcpu->arch.cpuid_entries = e2; 303 vcpu->arch.cpuid_nent = cpuid->nent; 304 305 cpuid_fix_nx_cap(vcpu); 306 kvm_update_cpuid_runtime(vcpu); 307 kvm_vcpu_after_set_cpuid(vcpu); 308 309 out_free_cpuid: 310 kvfree(e); 311 312 return r; 313 } 314 315 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 316 struct kvm_cpuid2 *cpuid, 317 struct kvm_cpuid_entry2 __user *entries) 318 { 319 struct kvm_cpuid_entry2 *e2 = NULL; 320 int r; 321 322 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 323 return -E2BIG; 324 325 if (cpuid->nent) { 326 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent)); 327 if (IS_ERR(e2)) 328 return PTR_ERR(e2); 329 } 330 331 r = kvm_check_cpuid(e2, cpuid->nent); 332 if (r) { 333 kvfree(e2); 334 return r; 335 } 336 337 kvfree(vcpu->arch.cpuid_entries); 338 vcpu->arch.cpuid_entries = e2; 339 vcpu->arch.cpuid_nent = cpuid->nent; 340 341 kvm_update_cpuid_runtime(vcpu); 342 kvm_vcpu_after_set_cpuid(vcpu); 343 344 return 0; 345 } 346 347 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 348 struct kvm_cpuid2 *cpuid, 349 struct kvm_cpuid_entry2 __user *entries) 350 { 351 int r; 352 353 r = -E2BIG; 354 if (cpuid->nent < vcpu->arch.cpuid_nent) 355 goto out; 356 r = -EFAULT; 357 if (copy_to_user(entries, vcpu->arch.cpuid_entries, 358 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 359 goto out; 360 return 0; 361 362 out: 363 cpuid->nent = vcpu->arch.cpuid_nent; 364 return r; 365 } 366 367 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ 368 static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) 369 { 370 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); 371 struct kvm_cpuid_entry2 entry; 372 373 reverse_cpuid_check(leaf); 374 375 cpuid_count(cpuid.function, cpuid.index, 376 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); 377 378 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); 379 } 380 381 static __always_inline 382 void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) 383 { 384 /* Use kvm_cpu_cap_mask for non-scattered leafs. */ 385 BUILD_BUG_ON(leaf < NCAPINTS); 386 387 kvm_cpu_caps[leaf] = mask; 388 389 __kvm_cpu_cap_mask(leaf); 390 } 391 392 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) 393 { 394 /* Use kvm_cpu_cap_init_scattered for scattered leafs. */ 395 BUILD_BUG_ON(leaf >= NCAPINTS); 396 397 kvm_cpu_caps[leaf] &= mask; 398 399 __kvm_cpu_cap_mask(leaf); 400 } 401 402 void kvm_set_cpu_caps(void) 403 { 404 unsigned int f_nx = is_efer_nx() ? F(NX) : 0; 405 #ifdef CONFIG_X86_64 406 unsigned int f_gbpages = F(GBPAGES); 407 unsigned int f_lm = F(LM); 408 #else 409 unsigned int f_gbpages = 0; 410 unsigned int f_lm = 0; 411 #endif 412 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); 413 414 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > 415 sizeof(boot_cpu_data.x86_capability)); 416 417 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, 418 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); 419 420 kvm_cpu_cap_mask(CPUID_1_ECX, 421 /* 422 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* 423 * advertised to guests via CPUID! 424 */ 425 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 426 0 /* DS-CPL, VMX, SMX, EST */ | 427 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 428 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | 429 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | 430 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 431 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | 432 F(F16C) | F(RDRAND) 433 ); 434 /* KVM emulates x2apic in software irrespective of host support. */ 435 kvm_cpu_cap_set(X86_FEATURE_X2APIC); 436 437 kvm_cpu_cap_mask(CPUID_1_EDX, 438 F(FPU) | F(VME) | F(DE) | F(PSE) | 439 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 440 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | 441 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 442 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | 443 0 /* Reserved, DS, ACPI */ | F(MMX) | 444 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 445 0 /* HTT, TM, Reserved, PBE */ 446 ); 447 448 kvm_cpu_cap_mask(CPUID_7_0_EBX, 449 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 450 F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) | 451 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | 452 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | 453 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/ 454 ); 455 456 kvm_cpu_cap_mask(CPUID_7_ECX, 457 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | 458 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | 459 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | 460 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | 461 F(SGX_LC) | F(BUS_LOCK_DETECT) 462 ); 463 /* Set LA57 based on hardware capability. */ 464 if (cpuid_ecx(7) & F(LA57)) 465 kvm_cpu_cap_set(X86_FEATURE_LA57); 466 467 /* 468 * PKU not yet implemented for shadow paging and requires OSPKE 469 * to be set on the host. Clear it if that is not the case 470 */ 471 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 472 kvm_cpu_cap_clear(X86_FEATURE_PKU); 473 474 kvm_cpu_cap_mask(CPUID_7_EDX, 475 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 476 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | 477 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | 478 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) 479 ); 480 481 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ 482 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST); 483 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES); 484 485 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) 486 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); 487 if (boot_cpu_has(X86_FEATURE_STIBP)) 488 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); 489 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) 490 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); 491 492 kvm_cpu_cap_mask(CPUID_7_1_EAX, 493 F(AVX_VNNI) | F(AVX512_BF16) 494 ); 495 496 kvm_cpu_cap_mask(CPUID_D_1_EAX, 497 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) 498 ); 499 500 kvm_cpu_cap_init_scattered(CPUID_12_EAX, 501 SF(SGX1) | SF(SGX2) 502 ); 503 504 kvm_cpu_cap_mask(CPUID_8000_0001_ECX, 505 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 506 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 507 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 508 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | 509 F(TOPOEXT) | F(PERFCTR_CORE) 510 ); 511 512 kvm_cpu_cap_mask(CPUID_8000_0001_EDX, 513 F(FPU) | F(VME) | F(DE) | F(PSE) | 514 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 515 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 516 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 517 F(PAT) | F(PSE36) | 0 /* Reserved */ | 518 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 519 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | 520 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) 521 ); 522 523 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) 524 kvm_cpu_cap_set(X86_FEATURE_GBPAGES); 525 526 kvm_cpu_cap_mask(CPUID_8000_0008_EBX, 527 F(CLZERO) | F(XSAVEERPTR) | 528 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 529 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) 530 ); 531 532 /* 533 * AMD has separate bits for each SPEC_CTRL bit. 534 * arch/x86/kernel/cpu/bugs.c is kind enough to 535 * record that in cpufeatures so use them. 536 */ 537 if (boot_cpu_has(X86_FEATURE_IBPB)) 538 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); 539 if (boot_cpu_has(X86_FEATURE_IBRS)) 540 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); 541 if (boot_cpu_has(X86_FEATURE_STIBP)) 542 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); 543 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) 544 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); 545 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 546 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); 547 /* 548 * The preference is to use SPEC CTRL MSR instead of the 549 * VIRT_SPEC MSR. 550 */ 551 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 552 !boot_cpu_has(X86_FEATURE_AMD_SSBD)) 553 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 554 555 /* 556 * Hide all SVM features by default, SVM will set the cap bits for 557 * features it emulates and/or exposes for L1. 558 */ 559 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); 560 561 kvm_cpu_cap_mask(CPUID_8000_001F_EAX, 562 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | 563 F(SME_COHERENT)); 564 565 kvm_cpu_cap_mask(CPUID_C000_0001_EDX, 566 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | 567 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | 568 F(PMM) | F(PMM_EN) 569 ); 570 571 /* 572 * Hide RDTSCP and RDPID if either feature is reported as supported but 573 * probing MSR_TSC_AUX failed. This is purely a sanity check and 574 * should never happen, but the guest will likely crash if RDTSCP or 575 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in 576 * the past. For example, the sanity check may fire if this instance of 577 * KVM is running as L1 on top of an older, broken KVM. 578 */ 579 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || 580 kvm_cpu_cap_has(X86_FEATURE_RDPID)) && 581 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { 582 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); 583 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 584 } 585 } 586 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); 587 588 struct kvm_cpuid_array { 589 struct kvm_cpuid_entry2 *entries; 590 int maxnent; 591 int nent; 592 }; 593 594 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, 595 u32 function, u32 index) 596 { 597 struct kvm_cpuid_entry2 *entry; 598 599 if (array->nent >= array->maxnent) 600 return NULL; 601 602 entry = &array->entries[array->nent++]; 603 604 entry->function = function; 605 entry->index = index; 606 entry->flags = 0; 607 608 cpuid_count(entry->function, entry->index, 609 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 610 611 switch (function) { 612 case 4: 613 case 7: 614 case 0xb: 615 case 0xd: 616 case 0xf: 617 case 0x10: 618 case 0x12: 619 case 0x14: 620 case 0x17: 621 case 0x18: 622 case 0x1f: 623 case 0x8000001d: 624 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 625 break; 626 } 627 628 return entry; 629 } 630 631 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) 632 { 633 struct kvm_cpuid_entry2 *entry; 634 635 if (array->nent >= array->maxnent) 636 return -E2BIG; 637 638 entry = &array->entries[array->nent]; 639 entry->function = func; 640 entry->index = 0; 641 entry->flags = 0; 642 643 switch (func) { 644 case 0: 645 entry->eax = 7; 646 ++array->nent; 647 break; 648 case 1: 649 entry->ecx = F(MOVBE); 650 ++array->nent; 651 break; 652 case 7: 653 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 654 entry->eax = 0; 655 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) 656 entry->ecx = F(RDPID); 657 ++array->nent; 658 default: 659 break; 660 } 661 662 return 0; 663 } 664 665 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) 666 { 667 struct kvm_cpuid_entry2 *entry; 668 int r, i, max_idx; 669 670 /* all calls to cpuid_count() should be made on the same cpu */ 671 get_cpu(); 672 673 r = -E2BIG; 674 675 entry = do_host_cpuid(array, function, 0); 676 if (!entry) 677 goto out; 678 679 switch (function) { 680 case 0: 681 /* Limited to the highest leaf implemented in KVM. */ 682 entry->eax = min(entry->eax, 0x1fU); 683 break; 684 case 1: 685 cpuid_entry_override(entry, CPUID_1_EDX); 686 cpuid_entry_override(entry, CPUID_1_ECX); 687 break; 688 case 2: 689 /* 690 * On ancient CPUs, function 2 entries are STATEFUL. That is, 691 * CPUID(function=2, index=0) may return different results each 692 * time, with the least-significant byte in EAX enumerating the 693 * number of times software should do CPUID(2, 0). 694 * 695 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less 696 * idiotic. Intel's SDM states that EAX & 0xff "will always 697 * return 01H. Software should ignore this value and not 698 * interpret it as an informational descriptor", while AMD's 699 * APM states that CPUID(2) is reserved. 700 * 701 * WARN if a frankenstein CPU that supports virtualization and 702 * a stateful CPUID.0x2 is encountered. 703 */ 704 WARN_ON_ONCE((entry->eax & 0xff) > 1); 705 break; 706 /* functions 4 and 0x8000001d have additional index. */ 707 case 4: 708 case 0x8000001d: 709 /* 710 * Read entries until the cache type in the previous entry is 711 * zero, i.e. indicates an invalid entry. 712 */ 713 for (i = 1; entry->eax & 0x1f; ++i) { 714 entry = do_host_cpuid(array, function, i); 715 if (!entry) 716 goto out; 717 } 718 break; 719 case 6: /* Thermal management */ 720 entry->eax = 0x4; /* allow ARAT */ 721 entry->ebx = 0; 722 entry->ecx = 0; 723 entry->edx = 0; 724 break; 725 /* function 7 has additional index. */ 726 case 7: 727 entry->eax = min(entry->eax, 1u); 728 cpuid_entry_override(entry, CPUID_7_0_EBX); 729 cpuid_entry_override(entry, CPUID_7_ECX); 730 cpuid_entry_override(entry, CPUID_7_EDX); 731 732 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ 733 if (entry->eax == 1) { 734 entry = do_host_cpuid(array, function, 1); 735 if (!entry) 736 goto out; 737 738 cpuid_entry_override(entry, CPUID_7_1_EAX); 739 entry->ebx = 0; 740 entry->ecx = 0; 741 entry->edx = 0; 742 } 743 break; 744 case 9: 745 break; 746 case 0xa: { /* Architectural Performance Monitoring */ 747 struct x86_pmu_capability cap; 748 union cpuid10_eax eax; 749 union cpuid10_edx edx; 750 751 perf_get_x86_pmu_capability(&cap); 752 753 /* 754 * Only support guest architectural pmu on a host 755 * with architectural pmu. 756 */ 757 if (!cap.version) 758 memset(&cap, 0, sizeof(cap)); 759 760 eax.split.version_id = min(cap.version, 2); 761 eax.split.num_counters = cap.num_counters_gp; 762 eax.split.bit_width = cap.bit_width_gp; 763 eax.split.mask_length = cap.events_mask_len; 764 765 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); 766 edx.split.bit_width_fixed = cap.bit_width_fixed; 767 edx.split.anythread_deprecated = 1; 768 edx.split.reserved1 = 0; 769 edx.split.reserved2 = 0; 770 771 entry->eax = eax.full; 772 entry->ebx = cap.events_mask; 773 entry->ecx = 0; 774 entry->edx = edx.full; 775 break; 776 } 777 /* 778 * Per Intel's SDM, the 0x1f is a superset of 0xb, 779 * thus they can be handled by common code. 780 */ 781 case 0x1f: 782 case 0xb: 783 /* 784 * Populate entries until the level type (ECX[15:8]) of the 785 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is 786 * the starting entry, filled by the primary do_host_cpuid(). 787 */ 788 for (i = 1; entry->ecx & 0xff00; ++i) { 789 entry = do_host_cpuid(array, function, i); 790 if (!entry) 791 goto out; 792 } 793 break; 794 case 0xd: 795 entry->eax &= supported_xcr0; 796 entry->ebx = xstate_required_size(supported_xcr0, false); 797 entry->ecx = entry->ebx; 798 entry->edx &= supported_xcr0 >> 32; 799 if (!supported_xcr0) 800 break; 801 802 entry = do_host_cpuid(array, function, 1); 803 if (!entry) 804 goto out; 805 806 cpuid_entry_override(entry, CPUID_D_1_EAX); 807 if (entry->eax & (F(XSAVES)|F(XSAVEC))) 808 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss, 809 true); 810 else { 811 WARN_ON_ONCE(supported_xss != 0); 812 entry->ebx = 0; 813 } 814 entry->ecx &= supported_xss; 815 entry->edx &= supported_xss >> 32; 816 817 for (i = 2; i < 64; ++i) { 818 bool s_state; 819 if (supported_xcr0 & BIT_ULL(i)) 820 s_state = false; 821 else if (supported_xss & BIT_ULL(i)) 822 s_state = true; 823 else 824 continue; 825 826 entry = do_host_cpuid(array, function, i); 827 if (!entry) 828 goto out; 829 830 /* 831 * The supported check above should have filtered out 832 * invalid sub-leafs. Only valid sub-leafs should 833 * reach this point, and they should have a non-zero 834 * save state size. Furthermore, check whether the 835 * processor agrees with supported_xcr0/supported_xss 836 * on whether this is an XCR0- or IA32_XSS-managed area. 837 */ 838 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { 839 --array->nent; 840 continue; 841 } 842 entry->edx = 0; 843 } 844 break; 845 case 0x12: 846 /* Intel SGX */ 847 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { 848 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 849 break; 850 } 851 852 /* 853 * Index 0: Sub-features, MISCSELECT (a.k.a extended features) 854 * and max enclave sizes. The SGX sub-features and MISCSELECT 855 * are restricted by kernel and KVM capabilities (like most 856 * feature flags), while enclave size is unrestricted. 857 */ 858 cpuid_entry_override(entry, CPUID_12_EAX); 859 entry->ebx &= SGX_MISC_EXINFO; 860 861 entry = do_host_cpuid(array, function, 1); 862 if (!entry) 863 goto out; 864 865 /* 866 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la 867 * feature flags. Advertise all supported flags, including 868 * privileged attributes that require explicit opt-in from 869 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is 870 * expected to derive it from supported XCR0. 871 */ 872 entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | 873 SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | 874 SGX_ATTR_KSS; 875 entry->ebx &= 0; 876 break; 877 /* Intel PT */ 878 case 0x14: 879 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { 880 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 881 break; 882 } 883 884 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { 885 if (!do_host_cpuid(array, function, i)) 886 goto out; 887 } 888 break; 889 case KVM_CPUID_SIGNATURE: { 890 static const char signature[12] = "KVMKVMKVM\0\0"; 891 const u32 *sigptr = (const u32 *)signature; 892 entry->eax = KVM_CPUID_FEATURES; 893 entry->ebx = sigptr[0]; 894 entry->ecx = sigptr[1]; 895 entry->edx = sigptr[2]; 896 break; 897 } 898 case KVM_CPUID_FEATURES: 899 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 900 (1 << KVM_FEATURE_NOP_IO_DELAY) | 901 (1 << KVM_FEATURE_CLOCKSOURCE2) | 902 (1 << KVM_FEATURE_ASYNC_PF) | 903 (1 << KVM_FEATURE_PV_EOI) | 904 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | 905 (1 << KVM_FEATURE_PV_UNHALT) | 906 (1 << KVM_FEATURE_PV_TLB_FLUSH) | 907 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | 908 (1 << KVM_FEATURE_PV_SEND_IPI) | 909 (1 << KVM_FEATURE_POLL_CONTROL) | 910 (1 << KVM_FEATURE_PV_SCHED_YIELD) | 911 (1 << KVM_FEATURE_ASYNC_PF_INT); 912 913 if (sched_info_on()) 914 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 915 916 entry->ebx = 0; 917 entry->ecx = 0; 918 entry->edx = 0; 919 break; 920 case 0x80000000: 921 entry->eax = min(entry->eax, 0x8000001f); 922 break; 923 case 0x80000001: 924 cpuid_entry_override(entry, CPUID_8000_0001_EDX); 925 cpuid_entry_override(entry, CPUID_8000_0001_ECX); 926 break; 927 case 0x80000006: 928 /* L2 cache and TLB: pass through host info. */ 929 break; 930 case 0x80000007: /* Advanced power management */ 931 /* invariant TSC is CPUID.80000007H:EDX[8] */ 932 entry->edx &= (1 << 8); 933 /* mask against host */ 934 entry->edx &= boot_cpu_data.x86_power; 935 entry->eax = entry->ebx = entry->ecx = 0; 936 break; 937 case 0x80000008: { 938 unsigned g_phys_as = (entry->eax >> 16) & 0xff; 939 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); 940 unsigned phys_as = entry->eax & 0xff; 941 942 if (!g_phys_as) 943 g_phys_as = phys_as; 944 entry->eax = g_phys_as | (virt_as << 8); 945 entry->edx = 0; 946 cpuid_entry_override(entry, CPUID_8000_0008_EBX); 947 break; 948 } 949 case 0x8000000A: 950 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { 951 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 952 break; 953 } 954 entry->eax = 1; /* SVM revision 1 */ 955 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 956 ASID emulation to nested SVM */ 957 entry->ecx = 0; /* Reserved */ 958 cpuid_entry_override(entry, CPUID_8000_000A_EDX); 959 break; 960 case 0x80000019: 961 entry->ecx = entry->edx = 0; 962 break; 963 case 0x8000001a: 964 case 0x8000001e: 965 break; 966 /* Support memory encryption cpuid if host supports it */ 967 case 0x8000001F: 968 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) 969 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 970 else 971 cpuid_entry_override(entry, CPUID_8000_001F_EAX); 972 break; 973 /*Add support for Centaur's CPUID instruction*/ 974 case 0xC0000000: 975 /*Just support up to 0xC0000004 now*/ 976 entry->eax = min(entry->eax, 0xC0000004); 977 break; 978 case 0xC0000001: 979 cpuid_entry_override(entry, CPUID_C000_0001_EDX); 980 break; 981 case 3: /* Processor serial number */ 982 case 5: /* MONITOR/MWAIT */ 983 case 0xC0000002: 984 case 0xC0000003: 985 case 0xC0000004: 986 default: 987 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 988 break; 989 } 990 991 r = 0; 992 993 out: 994 put_cpu(); 995 996 return r; 997 } 998 999 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, 1000 unsigned int type) 1001 { 1002 if (type == KVM_GET_EMULATED_CPUID) 1003 return __do_cpuid_func_emulated(array, func); 1004 1005 return __do_cpuid_func(array, func); 1006 } 1007 1008 #define CENTAUR_CPUID_SIGNATURE 0xC0000000 1009 1010 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, 1011 unsigned int type) 1012 { 1013 u32 limit; 1014 int r; 1015 1016 if (func == CENTAUR_CPUID_SIGNATURE && 1017 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) 1018 return 0; 1019 1020 r = do_cpuid_func(array, func, type); 1021 if (r) 1022 return r; 1023 1024 limit = array->entries[array->nent - 1].eax; 1025 for (func = func + 1; func <= limit; ++func) { 1026 r = do_cpuid_func(array, func, type); 1027 if (r) 1028 break; 1029 } 1030 1031 return r; 1032 } 1033 1034 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, 1035 __u32 num_entries, unsigned int ioctl_type) 1036 { 1037 int i; 1038 __u32 pad[3]; 1039 1040 if (ioctl_type != KVM_GET_EMULATED_CPUID) 1041 return false; 1042 1043 /* 1044 * We want to make sure that ->padding is being passed clean from 1045 * userspace in case we want to use it for something in the future. 1046 * 1047 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we 1048 * have to give ourselves satisfied only with the emulated side. /me 1049 * sheds a tear. 1050 */ 1051 for (i = 0; i < num_entries; i++) { 1052 if (copy_from_user(pad, entries[i].padding, sizeof(pad))) 1053 return true; 1054 1055 if (pad[0] || pad[1] || pad[2]) 1056 return true; 1057 } 1058 return false; 1059 } 1060 1061 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 1062 struct kvm_cpuid_entry2 __user *entries, 1063 unsigned int type) 1064 { 1065 static const u32 funcs[] = { 1066 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, 1067 }; 1068 1069 struct kvm_cpuid_array array = { 1070 .nent = 0, 1071 }; 1072 int r, i; 1073 1074 if (cpuid->nent < 1) 1075 return -E2BIG; 1076 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 1077 cpuid->nent = KVM_MAX_CPUID_ENTRIES; 1078 1079 if (sanity_check_entries(entries, cpuid->nent, type)) 1080 return -EINVAL; 1081 1082 array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), 1083 cpuid->nent)); 1084 if (!array.entries) 1085 return -ENOMEM; 1086 1087 array.maxnent = cpuid->nent; 1088 1089 for (i = 0; i < ARRAY_SIZE(funcs); i++) { 1090 r = get_cpuid_func(&array, funcs[i], type); 1091 if (r) 1092 goto out_free; 1093 } 1094 cpuid->nent = array.nent; 1095 1096 if (copy_to_user(entries, array.entries, 1097 array.nent * sizeof(struct kvm_cpuid_entry2))) 1098 r = -EFAULT; 1099 1100 out_free: 1101 vfree(array.entries); 1102 return r; 1103 } 1104 1105 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 1106 u32 function, u32 index) 1107 { 1108 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, 1109 function, index); 1110 } 1111 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 1112 1113 /* 1114 * Intel CPUID semantics treats any query for an out-of-range leaf as if the 1115 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics 1116 * returns all zeroes for any undefined leaf, whether or not the leaf is in 1117 * range. Centaur/VIA follows Intel semantics. 1118 * 1119 * A leaf is considered out-of-range if its function is higher than the maximum 1120 * supported leaf of its associated class or if its associated class does not 1121 * exist. 1122 * 1123 * There are three primary classes to be considered, with their respective 1124 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary 1125 * class exists if a guest CPUID entry for its <base> leaf exists. For a given 1126 * class, CPUID.<base>.EAX contains the max supported leaf for the class. 1127 * 1128 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff 1129 * - Hypervisor: 0x40000000 - 0x4fffffff 1130 * - Extended: 0x80000000 - 0xbfffffff 1131 * - Centaur: 0xc0000000 - 0xcfffffff 1132 * 1133 * The Hypervisor class is further subdivided into sub-classes that each act as 1134 * their own independent class associated with a 0x100 byte range. E.g. if Qemu 1135 * is advertising support for both HyperV and KVM, the resulting Hypervisor 1136 * CPUID sub-classes are: 1137 * 1138 * - HyperV: 0x40000000 - 0x400000ff 1139 * - KVM: 0x40000100 - 0x400001ff 1140 */ 1141 static struct kvm_cpuid_entry2 * 1142 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) 1143 { 1144 struct kvm_cpuid_entry2 *basic, *class; 1145 u32 function = *fn_ptr; 1146 1147 basic = kvm_find_cpuid_entry(vcpu, 0, 0); 1148 if (!basic) 1149 return NULL; 1150 1151 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) || 1152 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx)) 1153 return NULL; 1154 1155 if (function >= 0x40000000 && function <= 0x4fffffff) 1156 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0); 1157 else if (function >= 0xc0000000) 1158 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0); 1159 else 1160 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); 1161 1162 if (class && function <= class->eax) 1163 return NULL; 1164 1165 /* 1166 * Leaf specific adjustments are also applied when redirecting to the 1167 * max basic entry, e.g. if the max basic leaf is 0xb but there is no 1168 * entry for CPUID.0xb.index (see below), then the output value for EDX 1169 * needs to be pulled from CPUID.0xb.1. 1170 */ 1171 *fn_ptr = basic->eax; 1172 1173 /* 1174 * The class does not exist or the requested function is out of range; 1175 * the effective CPUID entry is the max basic leaf. Note, the index of 1176 * the original requested leaf is observed! 1177 */ 1178 return kvm_find_cpuid_entry(vcpu, basic->eax, index); 1179 } 1180 1181 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 1182 u32 *ecx, u32 *edx, bool exact_only) 1183 { 1184 u32 orig_function = *eax, function = *eax, index = *ecx; 1185 struct kvm_cpuid_entry2 *entry; 1186 bool exact, used_max_basic = false; 1187 1188 entry = kvm_find_cpuid_entry(vcpu, function, index); 1189 exact = !!entry; 1190 1191 if (!entry && !exact_only) { 1192 entry = get_out_of_range_cpuid_entry(vcpu, &function, index); 1193 used_max_basic = !!entry; 1194 } 1195 1196 if (entry) { 1197 *eax = entry->eax; 1198 *ebx = entry->ebx; 1199 *ecx = entry->ecx; 1200 *edx = entry->edx; 1201 if (function == 7 && index == 0) { 1202 u64 data; 1203 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) && 1204 (data & TSX_CTRL_CPUID_CLEAR)) 1205 *ebx &= ~(F(RTM) | F(HLE)); 1206 } 1207 } else { 1208 *eax = *ebx = *ecx = *edx = 0; 1209 /* 1210 * When leaf 0BH or 1FH is defined, CL is pass-through 1211 * and EDX is always the x2APIC ID, even for undefined 1212 * subleaves. Index 1 will exist iff the leaf is 1213 * implemented, so we pass through CL iff leaf 1 1214 * exists. EDX can be copied from any existing index. 1215 */ 1216 if (function == 0xb || function == 0x1f) { 1217 entry = kvm_find_cpuid_entry(vcpu, function, 1); 1218 if (entry) { 1219 *ecx = index & 0xff; 1220 *edx = entry->edx; 1221 } 1222 } 1223 } 1224 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact, 1225 used_max_basic); 1226 return exact; 1227 } 1228 EXPORT_SYMBOL_GPL(kvm_cpuid); 1229 1230 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 1231 { 1232 u32 eax, ebx, ecx, edx; 1233 1234 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) 1235 return 1; 1236 1237 eax = kvm_rax_read(vcpu); 1238 ecx = kvm_rcx_read(vcpu); 1239 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); 1240 kvm_rax_write(vcpu, eax); 1241 kvm_rbx_write(vcpu, ebx); 1242 kvm_rcx_write(vcpu, ecx); 1243 kvm_rdx_write(vcpu, edx); 1244 return kvm_skip_emulated_instruction(vcpu); 1245 } 1246 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1247