1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * cpuid support routines 5 * 6 * derived from arch/x86/kvm/x86.c 7 * 8 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 9 * Copyright IBM Corporation, 2008 10 */ 11 12 #include <linux/kvm_host.h> 13 #include <linux/export.h> 14 #include <linux/vmalloc.h> 15 #include <linux/uaccess.h> 16 #include <linux/sched/stat.h> 17 18 #include <asm/processor.h> 19 #include <asm/user.h> 20 #include <asm/fpu/xstate.h> 21 #include "cpuid.h" 22 #include "lapic.h" 23 #include "mmu.h" 24 #include "trace.h" 25 #include "pmu.h" 26 27 static u32 xstate_required_size(u64 xstate_bv, bool compacted) 28 { 29 int feature_bit = 0; 30 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 31 32 xstate_bv &= XFEATURE_MASK_EXTEND; 33 while (xstate_bv) { 34 if (xstate_bv & 0x1) { 35 u32 eax, ebx, ecx, edx, offset; 36 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); 37 offset = compacted ? ret : ebx; 38 ret = max(ret, offset + eax); 39 } 40 41 xstate_bv >>= 1; 42 feature_bit++; 43 } 44 45 return ret; 46 } 47 48 bool kvm_mpx_supported(void) 49 { 50 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 51 && kvm_x86_ops->mpx_supported()); 52 } 53 EXPORT_SYMBOL_GPL(kvm_mpx_supported); 54 55 u64 kvm_supported_xcr0(void) 56 { 57 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; 58 59 if (!kvm_mpx_supported()) 60 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 61 62 return xcr0; 63 } 64 65 #define F(x) bit(X86_FEATURE_##x) 66 67 int kvm_update_cpuid(struct kvm_vcpu *vcpu) 68 { 69 struct kvm_cpuid_entry2 *best; 70 struct kvm_lapic *apic = vcpu->arch.apic; 71 72 best = kvm_find_cpuid_entry(vcpu, 1, 0); 73 if (!best) 74 return 0; 75 76 /* Update OSXSAVE bit */ 77 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) { 78 best->ecx &= ~F(OSXSAVE); 79 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) 80 best->ecx |= F(OSXSAVE); 81 } 82 83 best->edx &= ~F(APIC); 84 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE) 85 best->edx |= F(APIC); 86 87 if (apic) { 88 if (best->ecx & F(TSC_DEADLINE_TIMER)) 89 apic->lapic_timer.timer_mode_mask = 3 << 17; 90 else 91 apic->lapic_timer.timer_mode_mask = 1 << 17; 92 } 93 94 best = kvm_find_cpuid_entry(vcpu, 7, 0); 95 if (best) { 96 /* Update OSPKE bit */ 97 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) { 98 best->ecx &= ~F(OSPKE); 99 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) 100 best->ecx |= F(OSPKE); 101 } 102 } 103 104 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); 105 if (!best) { 106 vcpu->arch.guest_supported_xcr0 = 0; 107 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 108 } else { 109 vcpu->arch.guest_supported_xcr0 = 110 (best->eax | ((u64)best->edx << 32)) & 111 kvm_supported_xcr0(); 112 vcpu->arch.guest_xstate_size = best->ebx = 113 xstate_required_size(vcpu->arch.xcr0, false); 114 } 115 116 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); 117 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 118 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 119 120 /* 121 * The existing code assumes virtual address is 48-bit or 57-bit in the 122 * canonical address checks; exit if it is ever changed. 123 */ 124 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); 125 if (best) { 126 int vaddr_bits = (best->eax & 0xff00) >> 8; 127 128 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) 129 return -EINVAL; 130 } 131 132 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); 133 if (kvm_hlt_in_guest(vcpu->kvm) && best && 134 (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) 135 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); 136 137 /* Update physical-address width */ 138 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 139 kvm_mmu_reset_context(vcpu); 140 141 kvm_pmu_refresh(vcpu); 142 return 0; 143 } 144 145 static int is_efer_nx(void) 146 { 147 unsigned long long efer = 0; 148 149 rdmsrl_safe(MSR_EFER, &efer); 150 return efer & EFER_NX; 151 } 152 153 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) 154 { 155 int i; 156 struct kvm_cpuid_entry2 *e, *entry; 157 158 entry = NULL; 159 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 160 e = &vcpu->arch.cpuid_entries[i]; 161 if (e->function == 0x80000001) { 162 entry = e; 163 break; 164 } 165 } 166 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) { 167 entry->edx &= ~F(NX); 168 printk(KERN_INFO "kvm: guest NX capability removed\n"); 169 } 170 } 171 172 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) 173 { 174 struct kvm_cpuid_entry2 *best; 175 176 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); 177 if (!best || best->eax < 0x80000008) 178 goto not_found; 179 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); 180 if (best) 181 return best->eax & 0xff; 182 not_found: 183 return 36; 184 } 185 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr); 186 187 /* when an old userspace process fills a new kernel module */ 188 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 189 struct kvm_cpuid *cpuid, 190 struct kvm_cpuid_entry __user *entries) 191 { 192 int r, i; 193 struct kvm_cpuid_entry *cpuid_entries = NULL; 194 195 r = -E2BIG; 196 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 197 goto out; 198 r = -ENOMEM; 199 if (cpuid->nent) { 200 cpuid_entries = 201 vmalloc(array_size(sizeof(struct kvm_cpuid_entry), 202 cpuid->nent)); 203 if (!cpuid_entries) 204 goto out; 205 r = -EFAULT; 206 if (copy_from_user(cpuid_entries, entries, 207 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 208 goto out; 209 } 210 for (i = 0; i < cpuid->nent; i++) { 211 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; 212 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; 213 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; 214 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; 215 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; 216 vcpu->arch.cpuid_entries[i].index = 0; 217 vcpu->arch.cpuid_entries[i].flags = 0; 218 vcpu->arch.cpuid_entries[i].padding[0] = 0; 219 vcpu->arch.cpuid_entries[i].padding[1] = 0; 220 vcpu->arch.cpuid_entries[i].padding[2] = 0; 221 } 222 vcpu->arch.cpuid_nent = cpuid->nent; 223 cpuid_fix_nx_cap(vcpu); 224 kvm_apic_set_version(vcpu); 225 kvm_x86_ops->cpuid_update(vcpu); 226 r = kvm_update_cpuid(vcpu); 227 228 out: 229 vfree(cpuid_entries); 230 return r; 231 } 232 233 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 234 struct kvm_cpuid2 *cpuid, 235 struct kvm_cpuid_entry2 __user *entries) 236 { 237 int r; 238 239 r = -E2BIG; 240 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 241 goto out; 242 r = -EFAULT; 243 if (copy_from_user(&vcpu->arch.cpuid_entries, entries, 244 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) 245 goto out; 246 vcpu->arch.cpuid_nent = cpuid->nent; 247 kvm_apic_set_version(vcpu); 248 kvm_x86_ops->cpuid_update(vcpu); 249 r = kvm_update_cpuid(vcpu); 250 out: 251 return r; 252 } 253 254 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 255 struct kvm_cpuid2 *cpuid, 256 struct kvm_cpuid_entry2 __user *entries) 257 { 258 int r; 259 260 r = -E2BIG; 261 if (cpuid->nent < vcpu->arch.cpuid_nent) 262 goto out; 263 r = -EFAULT; 264 if (copy_to_user(entries, &vcpu->arch.cpuid_entries, 265 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 266 goto out; 267 return 0; 268 269 out: 270 cpuid->nent = vcpu->arch.cpuid_nent; 271 return r; 272 } 273 274 static void cpuid_mask(u32 *word, int wordnum) 275 { 276 *word &= boot_cpu_data.x86_capability[wordnum]; 277 } 278 279 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, 280 u32 index) 281 { 282 entry->function = function; 283 entry->index = index; 284 cpuid_count(entry->function, entry->index, 285 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 286 entry->flags = 0; 287 } 288 289 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, 290 u32 func, u32 index, int *nent, int maxnent) 291 { 292 switch (func) { 293 case 0: 294 entry->eax = 7; 295 ++*nent; 296 break; 297 case 1: 298 entry->ecx = F(MOVBE); 299 ++*nent; 300 break; 301 case 7: 302 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 303 if (index == 0) 304 entry->ecx = F(RDPID); 305 ++*nent; 306 default: 307 break; 308 } 309 310 entry->function = func; 311 entry->index = index; 312 313 return 0; 314 } 315 316 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 317 u32 index, int *nent, int maxnent) 318 { 319 int r; 320 unsigned f_nx = is_efer_nx() ? F(NX) : 0; 321 #ifdef CONFIG_X86_64 322 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) 323 ? F(GBPAGES) : 0; 324 unsigned f_lm = F(LM); 325 #else 326 unsigned f_gbpages = 0; 327 unsigned f_lm = 0; 328 #endif 329 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; 330 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; 331 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; 332 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; 333 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; 334 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; 335 unsigned f_la57 = 0; 336 337 /* cpuid 1.edx */ 338 const u32 kvm_cpuid_1_edx_x86_features = 339 F(FPU) | F(VME) | F(DE) | F(PSE) | 340 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 341 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | 342 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 343 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | 344 0 /* Reserved, DS, ACPI */ | F(MMX) | 345 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 346 0 /* HTT, TM, Reserved, PBE */; 347 /* cpuid 0x80000001.edx */ 348 const u32 kvm_cpuid_8000_0001_edx_x86_features = 349 F(FPU) | F(VME) | F(DE) | F(PSE) | 350 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 351 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 352 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 353 F(PAT) | F(PSE36) | 0 /* Reserved */ | 354 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 355 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | 356 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); 357 /* cpuid 1.ecx */ 358 const u32 kvm_cpuid_1_ecx_x86_features = 359 /* NOTE: MONITOR (and MWAIT) are emulated as NOP, 360 * but *not* advertised to guests via CPUID ! */ 361 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 362 0 /* DS-CPL, VMX, SMX, EST */ | 363 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 364 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ | 365 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | 366 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 367 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | 368 F(F16C) | F(RDRAND); 369 /* cpuid 0x80000001.ecx */ 370 const u32 kvm_cpuid_8000_0001_ecx_x86_features = 371 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 372 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 373 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 374 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | 375 F(TOPOEXT) | F(PERFCTR_CORE); 376 377 /* cpuid 0x80000008.ebx */ 378 const u32 kvm_cpuid_8000_0008_ebx_x86_features = 379 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 380 F(AMD_SSB_NO) | F(AMD_STIBP); 381 382 /* cpuid 0xC0000001.edx */ 383 const u32 kvm_cpuid_C000_0001_edx_x86_features = 384 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | 385 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | 386 F(PMM) | F(PMM_EN); 387 388 /* cpuid 7.0.ebx */ 389 const u32 kvm_cpuid_7_0_ebx_x86_features = 390 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 391 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | 392 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | 393 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | 394 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt; 395 396 /* cpuid 0xD.1.eax */ 397 const u32 kvm_cpuid_D_1_eax_x86_features = 398 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves; 399 400 /* cpuid 7.0.ecx*/ 401 const u32 kvm_cpuid_7_0_ecx_x86_features = 402 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | 403 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | 404 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | 405 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B); 406 407 /* cpuid 7.0.edx*/ 408 const u32 kvm_cpuid_7_0_edx_x86_features = 409 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 410 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | 411 F(MD_CLEAR); 412 413 /* all calls to cpuid_count() should be made on the same cpu */ 414 get_cpu(); 415 416 r = -E2BIG; 417 418 if (*nent >= maxnent) 419 goto out; 420 421 do_cpuid_1_ent(entry, function, index); 422 ++*nent; 423 424 switch (function) { 425 case 0: 426 entry->eax = min(entry->eax, (u32)(f_intel_pt ? 0x14 : 0xd)); 427 break; 428 case 1: 429 entry->edx &= kvm_cpuid_1_edx_x86_features; 430 cpuid_mask(&entry->edx, CPUID_1_EDX); 431 entry->ecx &= kvm_cpuid_1_ecx_x86_features; 432 cpuid_mask(&entry->ecx, CPUID_1_ECX); 433 /* we support x2apic emulation even if host does not support 434 * it since we emulate x2apic in software */ 435 entry->ecx |= F(X2APIC); 436 break; 437 /* function 2 entries are STATEFUL. That is, repeated cpuid commands 438 * may return different values. This forces us to get_cpu() before 439 * issuing the first command, and also to emulate this annoying behavior 440 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ 441 case 2: { 442 int t, times = entry->eax & 0xff; 443 444 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; 445 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 446 for (t = 1; t < times; ++t) { 447 if (*nent >= maxnent) 448 goto out; 449 450 do_cpuid_1_ent(&entry[t], function, 0); 451 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; 452 ++*nent; 453 } 454 break; 455 } 456 /* functions 4 and 0x8000001d have additional index. */ 457 case 4: 458 case 0x8000001d: { 459 int i, cache_type; 460 461 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 462 /* read more entries until cache_type is zero */ 463 for (i = 1; ; ++i) { 464 if (*nent >= maxnent) 465 goto out; 466 467 cache_type = entry[i - 1].eax & 0x1f; 468 if (!cache_type) 469 break; 470 do_cpuid_1_ent(&entry[i], function, i); 471 entry[i].flags |= 472 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 473 ++*nent; 474 } 475 break; 476 } 477 case 6: /* Thermal management */ 478 entry->eax = 0x4; /* allow ARAT */ 479 entry->ebx = 0; 480 entry->ecx = 0; 481 entry->edx = 0; 482 break; 483 case 7: { 484 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 485 /* Mask ebx against host capability word 9 */ 486 if (index == 0) { 487 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; 488 cpuid_mask(&entry->ebx, CPUID_7_0_EBX); 489 // TSC_ADJUST is emulated 490 entry->ebx |= F(TSC_ADJUST); 491 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; 492 f_la57 = entry->ecx & F(LA57); 493 cpuid_mask(&entry->ecx, CPUID_7_ECX); 494 /* Set LA57 based on hardware capability. */ 495 entry->ecx |= f_la57; 496 entry->ecx |= f_umip; 497 /* PKU is not yet implemented for shadow paging. */ 498 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 499 entry->ecx &= ~F(PKU); 500 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 501 cpuid_mask(&entry->edx, CPUID_7_EDX); 502 /* 503 * We emulate ARCH_CAPABILITIES in software even 504 * if the host doesn't support it. 505 */ 506 entry->edx |= F(ARCH_CAPABILITIES); 507 } else { 508 entry->ebx = 0; 509 entry->ecx = 0; 510 entry->edx = 0; 511 } 512 entry->eax = 0; 513 break; 514 } 515 case 9: 516 break; 517 case 0xa: { /* Architectural Performance Monitoring */ 518 struct x86_pmu_capability cap; 519 union cpuid10_eax eax; 520 union cpuid10_edx edx; 521 522 perf_get_x86_pmu_capability(&cap); 523 524 /* 525 * Only support guest architectural pmu on a host 526 * with architectural pmu. 527 */ 528 if (!cap.version) 529 memset(&cap, 0, sizeof(cap)); 530 531 eax.split.version_id = min(cap.version, 2); 532 eax.split.num_counters = cap.num_counters_gp; 533 eax.split.bit_width = cap.bit_width_gp; 534 eax.split.mask_length = cap.events_mask_len; 535 536 edx.split.num_counters_fixed = cap.num_counters_fixed; 537 edx.split.bit_width_fixed = cap.bit_width_fixed; 538 edx.split.reserved = 0; 539 540 entry->eax = eax.full; 541 entry->ebx = cap.events_mask; 542 entry->ecx = 0; 543 entry->edx = edx.full; 544 break; 545 } 546 /* function 0xb has additional index. */ 547 case 0xb: { 548 int i, level_type; 549 550 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 551 /* read more entries until level_type is zero */ 552 for (i = 1; ; ++i) { 553 if (*nent >= maxnent) 554 goto out; 555 556 level_type = entry[i - 1].ecx & 0xff00; 557 if (!level_type) 558 break; 559 do_cpuid_1_ent(&entry[i], function, i); 560 entry[i].flags |= 561 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 562 ++*nent; 563 } 564 break; 565 } 566 case 0xd: { 567 int idx, i; 568 u64 supported = kvm_supported_xcr0(); 569 570 entry->eax &= supported; 571 entry->ebx = xstate_required_size(supported, false); 572 entry->ecx = entry->ebx; 573 entry->edx &= supported >> 32; 574 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 575 if (!supported) 576 break; 577 578 for (idx = 1, i = 1; idx < 64; ++idx) { 579 u64 mask = ((u64)1 << idx); 580 if (*nent >= maxnent) 581 goto out; 582 583 do_cpuid_1_ent(&entry[i], function, idx); 584 if (idx == 1) { 585 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features; 586 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX); 587 entry[i].ebx = 0; 588 if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) 589 entry[i].ebx = 590 xstate_required_size(supported, 591 true); 592 } else { 593 if (entry[i].eax == 0 || !(supported & mask)) 594 continue; 595 if (WARN_ON_ONCE(entry[i].ecx & 1)) 596 continue; 597 } 598 entry[i].ecx = 0; 599 entry[i].edx = 0; 600 entry[i].flags |= 601 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 602 ++*nent; 603 ++i; 604 } 605 break; 606 } 607 /* Intel PT */ 608 case 0x14: { 609 int t, times = entry->eax; 610 611 if (!f_intel_pt) 612 break; 613 614 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 615 for (t = 1; t <= times; ++t) { 616 if (*nent >= maxnent) 617 goto out; 618 do_cpuid_1_ent(&entry[t], function, t); 619 entry[t].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 620 ++*nent; 621 } 622 break; 623 } 624 case KVM_CPUID_SIGNATURE: { 625 static const char signature[12] = "KVMKVMKVM\0\0"; 626 const u32 *sigptr = (const u32 *)signature; 627 entry->eax = KVM_CPUID_FEATURES; 628 entry->ebx = sigptr[0]; 629 entry->ecx = sigptr[1]; 630 entry->edx = sigptr[2]; 631 break; 632 } 633 case KVM_CPUID_FEATURES: 634 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 635 (1 << KVM_FEATURE_NOP_IO_DELAY) | 636 (1 << KVM_FEATURE_CLOCKSOURCE2) | 637 (1 << KVM_FEATURE_ASYNC_PF) | 638 (1 << KVM_FEATURE_PV_EOI) | 639 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | 640 (1 << KVM_FEATURE_PV_UNHALT) | 641 (1 << KVM_FEATURE_PV_TLB_FLUSH) | 642 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | 643 (1 << KVM_FEATURE_PV_SEND_IPI); 644 645 if (sched_info_on()) 646 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 647 648 entry->ebx = 0; 649 entry->ecx = 0; 650 entry->edx = 0; 651 break; 652 case 0x80000000: 653 entry->eax = min(entry->eax, 0x8000001f); 654 break; 655 case 0x80000001: 656 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features; 657 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX); 658 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features; 659 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX); 660 break; 661 case 0x80000007: /* Advanced power management */ 662 /* invariant TSC is CPUID.80000007H:EDX[8] */ 663 entry->edx &= (1 << 8); 664 /* mask against host */ 665 entry->edx &= boot_cpu_data.x86_power; 666 entry->eax = entry->ebx = entry->ecx = 0; 667 break; 668 case 0x80000008: { 669 unsigned g_phys_as = (entry->eax >> 16) & 0xff; 670 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); 671 unsigned phys_as = entry->eax & 0xff; 672 673 if (!g_phys_as) 674 g_phys_as = phys_as; 675 entry->eax = g_phys_as | (virt_as << 8); 676 entry->edx = 0; 677 /* 678 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in 679 * hardware cpuid 680 */ 681 if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) 682 entry->ebx |= F(AMD_IBPB); 683 if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) 684 entry->ebx |= F(AMD_IBRS); 685 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 686 entry->ebx |= F(VIRT_SSBD); 687 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; 688 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); 689 /* 690 * The preference is to use SPEC CTRL MSR instead of the 691 * VIRT_SPEC MSR. 692 */ 693 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 694 !boot_cpu_has(X86_FEATURE_AMD_SSBD)) 695 entry->ebx |= F(VIRT_SSBD); 696 break; 697 } 698 case 0x80000019: 699 entry->ecx = entry->edx = 0; 700 break; 701 case 0x8000001a: 702 case 0x8000001e: 703 break; 704 /*Add support for Centaur's CPUID instruction*/ 705 case 0xC0000000: 706 /*Just support up to 0xC0000004 now*/ 707 entry->eax = min(entry->eax, 0xC0000004); 708 break; 709 case 0xC0000001: 710 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features; 711 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX); 712 break; 713 case 3: /* Processor serial number */ 714 case 5: /* MONITOR/MWAIT */ 715 case 0xC0000002: 716 case 0xC0000003: 717 case 0xC0000004: 718 default: 719 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 720 break; 721 } 722 723 kvm_x86_ops->set_supported_cpuid(function, entry); 724 725 r = 0; 726 727 out: 728 put_cpu(); 729 730 return r; 731 } 732 733 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, 734 u32 idx, int *nent, int maxnent, unsigned int type) 735 { 736 if (type == KVM_GET_EMULATED_CPUID) 737 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); 738 739 return __do_cpuid_ent(entry, func, idx, nent, maxnent); 740 } 741 742 #undef F 743 744 struct kvm_cpuid_param { 745 u32 func; 746 u32 idx; 747 bool has_leaf_count; 748 bool (*qualifier)(const struct kvm_cpuid_param *param); 749 }; 750 751 static bool is_centaur_cpu(const struct kvm_cpuid_param *param) 752 { 753 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR; 754 } 755 756 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, 757 __u32 num_entries, unsigned int ioctl_type) 758 { 759 int i; 760 __u32 pad[3]; 761 762 if (ioctl_type != KVM_GET_EMULATED_CPUID) 763 return false; 764 765 /* 766 * We want to make sure that ->padding is being passed clean from 767 * userspace in case we want to use it for something in the future. 768 * 769 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we 770 * have to give ourselves satisfied only with the emulated side. /me 771 * sheds a tear. 772 */ 773 for (i = 0; i < num_entries; i++) { 774 if (copy_from_user(pad, entries[i].padding, sizeof(pad))) 775 return true; 776 777 if (pad[0] || pad[1] || pad[2]) 778 return true; 779 } 780 return false; 781 } 782 783 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 784 struct kvm_cpuid_entry2 __user *entries, 785 unsigned int type) 786 { 787 struct kvm_cpuid_entry2 *cpuid_entries; 788 int limit, nent = 0, r = -E2BIG, i; 789 u32 func; 790 static const struct kvm_cpuid_param param[] = { 791 { .func = 0, .has_leaf_count = true }, 792 { .func = 0x80000000, .has_leaf_count = true }, 793 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true }, 794 { .func = KVM_CPUID_SIGNATURE }, 795 { .func = KVM_CPUID_FEATURES }, 796 }; 797 798 if (cpuid->nent < 1) 799 goto out; 800 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 801 cpuid->nent = KVM_MAX_CPUID_ENTRIES; 802 803 if (sanity_check_entries(entries, cpuid->nent, type)) 804 return -EINVAL; 805 806 r = -ENOMEM; 807 cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), 808 cpuid->nent)); 809 if (!cpuid_entries) 810 goto out; 811 812 r = 0; 813 for (i = 0; i < ARRAY_SIZE(param); i++) { 814 const struct kvm_cpuid_param *ent = ¶m[i]; 815 816 if (ent->qualifier && !ent->qualifier(ent)) 817 continue; 818 819 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx, 820 &nent, cpuid->nent, type); 821 822 if (r) 823 goto out_free; 824 825 if (!ent->has_leaf_count) 826 continue; 827 828 limit = cpuid_entries[nent - 1].eax; 829 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) 830 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx, 831 &nent, cpuid->nent, type); 832 833 if (r) 834 goto out_free; 835 } 836 837 r = -EFAULT; 838 if (copy_to_user(entries, cpuid_entries, 839 nent * sizeof(struct kvm_cpuid_entry2))) 840 goto out_free; 841 cpuid->nent = nent; 842 r = 0; 843 844 out_free: 845 vfree(cpuid_entries); 846 out: 847 return r; 848 } 849 850 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 851 { 852 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 853 struct kvm_cpuid_entry2 *ej; 854 int j = i; 855 int nent = vcpu->arch.cpuid_nent; 856 857 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 858 /* when no next entry is found, the current entry[i] is reselected */ 859 do { 860 j = (j + 1) % nent; 861 ej = &vcpu->arch.cpuid_entries[j]; 862 } while (ej->function != e->function); 863 864 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 865 866 return j; 867 } 868 869 /* find an entry with matching function, matching index (if needed), and that 870 * should be read next (if it's stateful) */ 871 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, 872 u32 function, u32 index) 873 { 874 if (e->function != function) 875 return 0; 876 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) 877 return 0; 878 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && 879 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) 880 return 0; 881 return 1; 882 } 883 884 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 885 u32 function, u32 index) 886 { 887 int i; 888 struct kvm_cpuid_entry2 *best = NULL; 889 890 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 891 struct kvm_cpuid_entry2 *e; 892 893 e = &vcpu->arch.cpuid_entries[i]; 894 if (is_matching_cpuid_entry(e, function, index)) { 895 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) 896 move_to_next_stateful_cpuid_entry(vcpu, i); 897 best = e; 898 break; 899 } 900 } 901 return best; 902 } 903 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 904 905 /* 906 * If no match is found, check whether we exceed the vCPU's limit 907 * and return the content of the highest valid _standard_ leaf instead. 908 * This is to satisfy the CPUID specification. 909 */ 910 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, 911 u32 function, u32 index) 912 { 913 struct kvm_cpuid_entry2 *maxlevel; 914 915 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); 916 if (!maxlevel || maxlevel->eax >= function) 917 return NULL; 918 if (function & 0x80000000) { 919 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); 920 if (!maxlevel) 921 return NULL; 922 } 923 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); 924 } 925 926 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 927 u32 *ecx, u32 *edx, bool check_limit) 928 { 929 u32 function = *eax, index = *ecx; 930 struct kvm_cpuid_entry2 *best; 931 bool entry_found = true; 932 933 best = kvm_find_cpuid_entry(vcpu, function, index); 934 935 if (!best) { 936 entry_found = false; 937 if (!check_limit) 938 goto out; 939 940 best = check_cpuid_limit(vcpu, function, index); 941 } 942 943 out: 944 if (best) { 945 *eax = best->eax; 946 *ebx = best->ebx; 947 *ecx = best->ecx; 948 *edx = best->edx; 949 } else 950 *eax = *ebx = *ecx = *edx = 0; 951 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found); 952 return entry_found; 953 } 954 EXPORT_SYMBOL_GPL(kvm_cpuid); 955 956 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 957 { 958 u32 eax, ebx, ecx, edx; 959 960 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) 961 return 1; 962 963 eax = kvm_rax_read(vcpu); 964 ecx = kvm_rcx_read(vcpu); 965 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true); 966 kvm_rax_write(vcpu, eax); 967 kvm_rbx_write(vcpu, ebx); 968 kvm_rcx_write(vcpu, ecx); 969 kvm_rdx_write(vcpu, edx); 970 return kvm_skip_emulated_instruction(vcpu); 971 } 972 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 973