1 /* 2 * Kernel-based Virtual Machine driver for Linux 3 * cpuid support routines 4 * 5 * derived from arch/x86/kvm/x86.c 6 * 7 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 8 * Copyright IBM Corporation, 2008 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include <linux/kvm_host.h> 16 #include <linux/module.h> 17 #include <linux/vmalloc.h> 18 #include <linux/uaccess.h> 19 #include <asm/user.h> 20 #include <asm/xsave.h> 21 #include "cpuid.h" 22 #include "lapic.h" 23 #include "mmu.h" 24 #include "trace.h" 25 26 void kvm_update_cpuid(struct kvm_vcpu *vcpu) 27 { 28 struct kvm_cpuid_entry2 *best; 29 struct kvm_lapic *apic = vcpu->arch.apic; 30 31 best = kvm_find_cpuid_entry(vcpu, 1, 0); 32 if (!best) 33 return; 34 35 /* Update OSXSAVE bit */ 36 if (cpu_has_xsave && best->function == 0x1) { 37 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE)); 38 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) 39 best->ecx |= bit(X86_FEATURE_OSXSAVE); 40 } 41 42 if (apic) { 43 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER)) 44 apic->lapic_timer.timer_mode_mask = 3 << 17; 45 else 46 apic->lapic_timer.timer_mode_mask = 1 << 17; 47 } 48 49 kvm_pmu_cpuid_update(vcpu); 50 } 51 52 static int is_efer_nx(void) 53 { 54 unsigned long long efer = 0; 55 56 rdmsrl_safe(MSR_EFER, &efer); 57 return efer & EFER_NX; 58 } 59 60 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) 61 { 62 int i; 63 struct kvm_cpuid_entry2 *e, *entry; 64 65 entry = NULL; 66 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 67 e = &vcpu->arch.cpuid_entries[i]; 68 if (e->function == 0x80000001) { 69 entry = e; 70 break; 71 } 72 } 73 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) { 74 entry->edx &= ~(1 << 20); 75 printk(KERN_INFO "kvm: guest NX capability removed\n"); 76 } 77 } 78 79 /* when an old userspace process fills a new kernel module */ 80 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 81 struct kvm_cpuid *cpuid, 82 struct kvm_cpuid_entry __user *entries) 83 { 84 int r, i; 85 struct kvm_cpuid_entry *cpuid_entries; 86 87 r = -E2BIG; 88 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 89 goto out; 90 r = -ENOMEM; 91 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); 92 if (!cpuid_entries) 93 goto out; 94 r = -EFAULT; 95 if (copy_from_user(cpuid_entries, entries, 96 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 97 goto out_free; 98 for (i = 0; i < cpuid->nent; i++) { 99 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; 100 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; 101 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; 102 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; 103 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; 104 vcpu->arch.cpuid_entries[i].index = 0; 105 vcpu->arch.cpuid_entries[i].flags = 0; 106 vcpu->arch.cpuid_entries[i].padding[0] = 0; 107 vcpu->arch.cpuid_entries[i].padding[1] = 0; 108 vcpu->arch.cpuid_entries[i].padding[2] = 0; 109 } 110 vcpu->arch.cpuid_nent = cpuid->nent; 111 cpuid_fix_nx_cap(vcpu); 112 r = 0; 113 kvm_apic_set_version(vcpu); 114 kvm_x86_ops->cpuid_update(vcpu); 115 kvm_update_cpuid(vcpu); 116 117 out_free: 118 vfree(cpuid_entries); 119 out: 120 return r; 121 } 122 123 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 124 struct kvm_cpuid2 *cpuid, 125 struct kvm_cpuid_entry2 __user *entries) 126 { 127 int r; 128 129 r = -E2BIG; 130 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 131 goto out; 132 r = -EFAULT; 133 if (copy_from_user(&vcpu->arch.cpuid_entries, entries, 134 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) 135 goto out; 136 vcpu->arch.cpuid_nent = cpuid->nent; 137 kvm_apic_set_version(vcpu); 138 kvm_x86_ops->cpuid_update(vcpu); 139 kvm_update_cpuid(vcpu); 140 return 0; 141 142 out: 143 return r; 144 } 145 146 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 147 struct kvm_cpuid2 *cpuid, 148 struct kvm_cpuid_entry2 __user *entries) 149 { 150 int r; 151 152 r = -E2BIG; 153 if (cpuid->nent < vcpu->arch.cpuid_nent) 154 goto out; 155 r = -EFAULT; 156 if (copy_to_user(entries, &vcpu->arch.cpuid_entries, 157 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 158 goto out; 159 return 0; 160 161 out: 162 cpuid->nent = vcpu->arch.cpuid_nent; 163 return r; 164 } 165 166 static void cpuid_mask(u32 *word, int wordnum) 167 { 168 *word &= boot_cpu_data.x86_capability[wordnum]; 169 } 170 171 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function, 172 u32 index) 173 { 174 entry->function = function; 175 entry->index = index; 176 cpuid_count(entry->function, entry->index, 177 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 178 entry->flags = 0; 179 } 180 181 static bool supported_xcr0_bit(unsigned bit) 182 { 183 u64 mask = ((u64)1 << bit); 184 185 return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0; 186 } 187 188 #define F(x) bit(X86_FEATURE_##x) 189 190 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 191 u32 index, int *nent, int maxnent) 192 { 193 int r; 194 unsigned f_nx = is_efer_nx() ? F(NX) : 0; 195 #ifdef CONFIG_X86_64 196 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) 197 ? F(GBPAGES) : 0; 198 unsigned f_lm = F(LM); 199 #else 200 unsigned f_gbpages = 0; 201 unsigned f_lm = 0; 202 #endif 203 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; 204 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; 205 206 /* cpuid 1.edx */ 207 const u32 kvm_supported_word0_x86_features = 208 F(FPU) | F(VME) | F(DE) | F(PSE) | 209 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 210 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | 211 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 212 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) | 213 0 /* Reserved, DS, ACPI */ | F(MMX) | 214 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 215 0 /* HTT, TM, Reserved, PBE */; 216 /* cpuid 0x80000001.edx */ 217 const u32 kvm_supported_word1_x86_features = 218 F(FPU) | F(VME) | F(DE) | F(PSE) | 219 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 220 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 221 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 222 F(PAT) | F(PSE36) | 0 /* Reserved */ | 223 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 224 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | 225 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); 226 /* cpuid 1.ecx */ 227 const u32 kvm_supported_word4_x86_features = 228 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 229 0 /* DS-CPL, VMX, SMX, EST */ | 230 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 231 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ | 232 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | 233 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 234 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | 235 F(F16C) | F(RDRAND); 236 /* cpuid 0x80000001.ecx */ 237 const u32 kvm_supported_word6_x86_features = 238 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 239 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 240 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 241 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); 242 243 /* cpuid 0xC0000001.edx */ 244 const u32 kvm_supported_word5_x86_features = 245 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | 246 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | 247 F(PMM) | F(PMM_EN); 248 249 /* cpuid 7.0.ebx */ 250 const u32 kvm_supported_word9_x86_features = 251 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 252 F(BMI2) | F(ERMS) | f_invpcid | F(RTM); 253 254 /* all calls to cpuid_count() should be made on the same cpu */ 255 get_cpu(); 256 257 r = -E2BIG; 258 259 if (*nent >= maxnent) 260 goto out; 261 262 do_cpuid_1_ent(entry, function, index); 263 ++*nent; 264 265 switch (function) { 266 case 0: 267 entry->eax = min(entry->eax, (u32)0xd); 268 break; 269 case 1: 270 entry->edx &= kvm_supported_word0_x86_features; 271 cpuid_mask(&entry->edx, 0); 272 entry->ecx &= kvm_supported_word4_x86_features; 273 cpuid_mask(&entry->ecx, 4); 274 /* we support x2apic emulation even if host does not support 275 * it since we emulate x2apic in software */ 276 entry->ecx |= F(X2APIC); 277 break; 278 /* function 2 entries are STATEFUL. That is, repeated cpuid commands 279 * may return different values. This forces us to get_cpu() before 280 * issuing the first command, and also to emulate this annoying behavior 281 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ 282 case 2: { 283 int t, times = entry->eax & 0xff; 284 285 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; 286 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 287 for (t = 1; t < times; ++t) { 288 if (*nent >= maxnent) 289 goto out; 290 291 do_cpuid_1_ent(&entry[t], function, 0); 292 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; 293 ++*nent; 294 } 295 break; 296 } 297 /* function 4 has additional index. */ 298 case 4: { 299 int i, cache_type; 300 301 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 302 /* read more entries until cache_type is zero */ 303 for (i = 1; ; ++i) { 304 if (*nent >= maxnent) 305 goto out; 306 307 cache_type = entry[i - 1].eax & 0x1f; 308 if (!cache_type) 309 break; 310 do_cpuid_1_ent(&entry[i], function, i); 311 entry[i].flags |= 312 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 313 ++*nent; 314 } 315 break; 316 } 317 case 7: { 318 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 319 /* Mask ebx against host capbability word 9 */ 320 if (index == 0) { 321 entry->ebx &= kvm_supported_word9_x86_features; 322 cpuid_mask(&entry->ebx, 9); 323 } else 324 entry->ebx = 0; 325 entry->eax = 0; 326 entry->ecx = 0; 327 entry->edx = 0; 328 break; 329 } 330 case 9: 331 break; 332 case 0xa: { /* Architectural Performance Monitoring */ 333 struct x86_pmu_capability cap; 334 union cpuid10_eax eax; 335 union cpuid10_edx edx; 336 337 perf_get_x86_pmu_capability(&cap); 338 339 /* 340 * Only support guest architectural pmu on a host 341 * with architectural pmu. 342 */ 343 if (!cap.version) 344 memset(&cap, 0, sizeof(cap)); 345 346 eax.split.version_id = min(cap.version, 2); 347 eax.split.num_counters = cap.num_counters_gp; 348 eax.split.bit_width = cap.bit_width_gp; 349 eax.split.mask_length = cap.events_mask_len; 350 351 edx.split.num_counters_fixed = cap.num_counters_fixed; 352 edx.split.bit_width_fixed = cap.bit_width_fixed; 353 edx.split.reserved = 0; 354 355 entry->eax = eax.full; 356 entry->ebx = cap.events_mask; 357 entry->ecx = 0; 358 entry->edx = edx.full; 359 break; 360 } 361 /* function 0xb has additional index. */ 362 case 0xb: { 363 int i, level_type; 364 365 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 366 /* read more entries until level_type is zero */ 367 for (i = 1; ; ++i) { 368 if (*nent >= maxnent) 369 goto out; 370 371 level_type = entry[i - 1].ecx & 0xff00; 372 if (!level_type) 373 break; 374 do_cpuid_1_ent(&entry[i], function, i); 375 entry[i].flags |= 376 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 377 ++*nent; 378 } 379 break; 380 } 381 case 0xd: { 382 int idx, i; 383 384 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 385 for (idx = 1, i = 1; idx < 64; ++idx) { 386 if (*nent >= maxnent) 387 goto out; 388 389 do_cpuid_1_ent(&entry[i], function, idx); 390 if (entry[i].eax == 0 || !supported_xcr0_bit(idx)) 391 continue; 392 entry[i].flags |= 393 KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 394 ++*nent; 395 ++i; 396 } 397 break; 398 } 399 case KVM_CPUID_SIGNATURE: { 400 char signature[12] = "KVMKVMKVM\0\0"; 401 u32 *sigptr = (u32 *)signature; 402 entry->eax = KVM_CPUID_FEATURES; 403 entry->ebx = sigptr[0]; 404 entry->ecx = sigptr[1]; 405 entry->edx = sigptr[2]; 406 break; 407 } 408 case KVM_CPUID_FEATURES: 409 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 410 (1 << KVM_FEATURE_NOP_IO_DELAY) | 411 (1 << KVM_FEATURE_CLOCKSOURCE2) | 412 (1 << KVM_FEATURE_ASYNC_PF) | 413 (1 << KVM_FEATURE_PV_EOI) | 414 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); 415 416 if (sched_info_on()) 417 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 418 419 entry->ebx = 0; 420 entry->ecx = 0; 421 entry->edx = 0; 422 break; 423 case 0x80000000: 424 entry->eax = min(entry->eax, 0x8000001a); 425 break; 426 case 0x80000001: 427 entry->edx &= kvm_supported_word1_x86_features; 428 cpuid_mask(&entry->edx, 1); 429 entry->ecx &= kvm_supported_word6_x86_features; 430 cpuid_mask(&entry->ecx, 6); 431 break; 432 case 0x80000008: { 433 unsigned g_phys_as = (entry->eax >> 16) & 0xff; 434 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); 435 unsigned phys_as = entry->eax & 0xff; 436 437 if (!g_phys_as) 438 g_phys_as = phys_as; 439 entry->eax = g_phys_as | (virt_as << 8); 440 entry->ebx = entry->edx = 0; 441 break; 442 } 443 case 0x80000019: 444 entry->ecx = entry->edx = 0; 445 break; 446 case 0x8000001a: 447 break; 448 case 0x8000001d: 449 break; 450 /*Add support for Centaur's CPUID instruction*/ 451 case 0xC0000000: 452 /*Just support up to 0xC0000004 now*/ 453 entry->eax = min(entry->eax, 0xC0000004); 454 break; 455 case 0xC0000001: 456 entry->edx &= kvm_supported_word5_x86_features; 457 cpuid_mask(&entry->edx, 5); 458 break; 459 case 3: /* Processor serial number */ 460 case 5: /* MONITOR/MWAIT */ 461 case 6: /* Thermal management */ 462 case 0x80000007: /* Advanced power management */ 463 case 0xC0000002: 464 case 0xC0000003: 465 case 0xC0000004: 466 default: 467 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 468 break; 469 } 470 471 kvm_x86_ops->set_supported_cpuid(function, entry); 472 473 r = 0; 474 475 out: 476 put_cpu(); 477 478 return r; 479 } 480 481 #undef F 482 483 struct kvm_cpuid_param { 484 u32 func; 485 u32 idx; 486 bool has_leaf_count; 487 bool (*qualifier)(struct kvm_cpuid_param *param); 488 }; 489 490 static bool is_centaur_cpu(struct kvm_cpuid_param *param) 491 { 492 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR; 493 } 494 495 int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, 496 struct kvm_cpuid_entry2 __user *entries) 497 { 498 struct kvm_cpuid_entry2 *cpuid_entries; 499 int limit, nent = 0, r = -E2BIG, i; 500 u32 func; 501 static struct kvm_cpuid_param param[] = { 502 { .func = 0, .has_leaf_count = true }, 503 { .func = 0x80000000, .has_leaf_count = true }, 504 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true }, 505 { .func = KVM_CPUID_SIGNATURE }, 506 { .func = KVM_CPUID_FEATURES }, 507 }; 508 509 if (cpuid->nent < 1) 510 goto out; 511 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 512 cpuid->nent = KVM_MAX_CPUID_ENTRIES; 513 r = -ENOMEM; 514 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); 515 if (!cpuid_entries) 516 goto out; 517 518 r = 0; 519 for (i = 0; i < ARRAY_SIZE(param); i++) { 520 struct kvm_cpuid_param *ent = ¶m[i]; 521 522 if (ent->qualifier && !ent->qualifier(ent)) 523 continue; 524 525 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx, 526 &nent, cpuid->nent); 527 528 if (r) 529 goto out_free; 530 531 if (!ent->has_leaf_count) 532 continue; 533 534 limit = cpuid_entries[nent - 1].eax; 535 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) 536 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx, 537 &nent, cpuid->nent); 538 539 if (r) 540 goto out_free; 541 } 542 543 r = -EFAULT; 544 if (copy_to_user(entries, cpuid_entries, 545 nent * sizeof(struct kvm_cpuid_entry2))) 546 goto out_free; 547 cpuid->nent = nent; 548 r = 0; 549 550 out_free: 551 vfree(cpuid_entries); 552 out: 553 return r; 554 } 555 556 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 557 { 558 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 559 int j, nent = vcpu->arch.cpuid_nent; 560 561 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 562 /* when no next entry is found, the current entry[i] is reselected */ 563 for (j = i + 1; ; j = (j + 1) % nent) { 564 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 565 if (ej->function == e->function) { 566 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 567 return j; 568 } 569 } 570 return 0; /* silence gcc, even though control never reaches here */ 571 } 572 573 /* find an entry with matching function, matching index (if needed), and that 574 * should be read next (if it's stateful) */ 575 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, 576 u32 function, u32 index) 577 { 578 if (e->function != function) 579 return 0; 580 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) 581 return 0; 582 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && 583 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) 584 return 0; 585 return 1; 586 } 587 588 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 589 u32 function, u32 index) 590 { 591 int i; 592 struct kvm_cpuid_entry2 *best = NULL; 593 594 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 595 struct kvm_cpuid_entry2 *e; 596 597 e = &vcpu->arch.cpuid_entries[i]; 598 if (is_matching_cpuid_entry(e, function, index)) { 599 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) 600 move_to_next_stateful_cpuid_entry(vcpu, i); 601 best = e; 602 break; 603 } 604 } 605 return best; 606 } 607 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 608 609 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) 610 { 611 struct kvm_cpuid_entry2 *best; 612 613 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); 614 if (!best || best->eax < 0x80000008) 615 goto not_found; 616 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); 617 if (best) 618 return best->eax & 0xff; 619 not_found: 620 return 36; 621 } 622 623 /* 624 * If no match is found, check whether we exceed the vCPU's limit 625 * and return the content of the highest valid _standard_ leaf instead. 626 * This is to satisfy the CPUID specification. 627 */ 628 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, 629 u32 function, u32 index) 630 { 631 struct kvm_cpuid_entry2 *maxlevel; 632 633 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); 634 if (!maxlevel || maxlevel->eax >= function) 635 return NULL; 636 if (function & 0x80000000) { 637 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); 638 if (!maxlevel) 639 return NULL; 640 } 641 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); 642 } 643 644 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) 645 { 646 u32 function = *eax, index = *ecx; 647 struct kvm_cpuid_entry2 *best; 648 649 best = kvm_find_cpuid_entry(vcpu, function, index); 650 651 if (!best) 652 best = check_cpuid_limit(vcpu, function, index); 653 654 if (best) { 655 *eax = best->eax; 656 *ebx = best->ebx; 657 *ecx = best->ecx; 658 *edx = best->edx; 659 } else 660 *eax = *ebx = *ecx = *edx = 0; 661 } 662 663 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 664 { 665 u32 function, eax, ebx, ecx, edx; 666 667 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX); 668 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); 669 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); 670 kvm_register_write(vcpu, VCPU_REGS_RAX, eax); 671 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); 672 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); 673 kvm_register_write(vcpu, VCPU_REGS_RDX, edx); 674 kvm_x86_ops->skip_emulated_instruction(vcpu); 675 trace_kvm_cpuid(function, eax, ebx, ecx, edx); 676 } 677 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 678