1 /* 2 * QEMU KVM support 3 * 4 * Copyright (C) 2006-2008 Qumranet Technologies 5 * Copyright IBM, Corp. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/qapi-events-run-state.h" 17 #include "qapi/error.h" 18 #include <sys/ioctl.h> 19 #include <sys/utsname.h> 20 21 #include <linux/kvm.h> 22 #include "standard-headers/asm-x86/kvm_para.h" 23 24 #include "cpu.h" 25 #include "host-cpu.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/hw_accel.h" 28 #include "sysemu/kvm_int.h" 29 #include "sysemu/runstate.h" 30 #include "kvm_i386.h" 31 #include "sev_i386.h" 32 #include "hyperv.h" 33 #include "hyperv-proto.h" 34 35 #include "exec/gdbstub.h" 36 #include "qemu/host-utils.h" 37 #include "qemu/main-loop.h" 38 #include "qemu/config-file.h" 39 #include "qemu/error-report.h" 40 #include "hw/i386/x86.h" 41 #include "hw/i386/apic.h" 42 #include "hw/i386/apic_internal.h" 43 #include "hw/i386/apic-msidef.h" 44 #include "hw/i386/intel_iommu.h" 45 #include "hw/i386/x86-iommu.h" 46 #include "hw/i386/e820_memory_layout.h" 47 #include "sysemu/sev.h" 48 49 #include "hw/pci/pci.h" 50 #include "hw/pci/msi.h" 51 #include "hw/pci/msix.h" 52 #include "migration/blocker.h" 53 #include "exec/memattrs.h" 54 #include "trace.h" 55 56 //#define DEBUG_KVM 57 58 #ifdef DEBUG_KVM 59 #define DPRINTF(fmt, ...) \ 60 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 61 #else 62 #define DPRINTF(fmt, ...) \ 63 do { } while (0) 64 #endif 65 66 /* From arch/x86/kvm/lapic.h */ 67 #define KVM_APIC_BUS_CYCLE_NS 1 68 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) 69 70 #define MSR_KVM_WALL_CLOCK 0x11 71 #define MSR_KVM_SYSTEM_TIME 0x12 72 73 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus 74 * 255 kvm_msr_entry structs */ 75 #define MSR_BUF_SIZE 4096 76 77 static void kvm_init_msrs(X86CPU *cpu); 78 79 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 80 KVM_CAP_INFO(SET_TSS_ADDR), 81 KVM_CAP_INFO(EXT_CPUID), 82 KVM_CAP_INFO(MP_STATE), 83 KVM_CAP_LAST_INFO 84 }; 85 86 static bool has_msr_star; 87 static bool has_msr_hsave_pa; 88 static bool has_msr_tsc_aux; 89 static bool has_msr_tsc_adjust; 90 static bool has_msr_tsc_deadline; 91 static bool has_msr_feature_control; 92 static bool has_msr_misc_enable; 93 static bool has_msr_smbase; 94 static bool has_msr_bndcfgs; 95 static int lm_capable_kernel; 96 static bool has_msr_hv_hypercall; 97 static bool has_msr_hv_crash; 98 static bool has_msr_hv_reset; 99 static bool has_msr_hv_vpindex; 100 static bool hv_vpindex_settable; 101 static bool has_msr_hv_runtime; 102 static bool has_msr_hv_synic; 103 static bool has_msr_hv_stimer; 104 static bool has_msr_hv_frequencies; 105 static bool has_msr_hv_reenlightenment; 106 static bool has_msr_xss; 107 static bool has_msr_umwait; 108 static bool has_msr_spec_ctrl; 109 static bool has_msr_tsx_ctrl; 110 static bool has_msr_virt_ssbd; 111 static bool has_msr_smi_count; 112 static bool has_msr_arch_capabs; 113 static bool has_msr_core_capabs; 114 static bool has_msr_vmx_vmfunc; 115 static bool has_msr_ucode_rev; 116 static bool has_msr_vmx_procbased_ctls2; 117 static bool has_msr_perf_capabs; 118 static bool has_msr_pkrs; 119 120 static uint32_t has_architectural_pmu_version; 121 static uint32_t num_architectural_pmu_gp_counters; 122 static uint32_t num_architectural_pmu_fixed_counters; 123 124 static int has_xsave; 125 static int has_xcrs; 126 static int has_pit_state2; 127 static int has_exception_payload; 128 129 static bool has_msr_mcg_ext_ctl; 130 131 static struct kvm_cpuid2 *cpuid_cache; 132 static struct kvm_cpuid2 *hv_cpuid_cache; 133 static struct kvm_msr_list *kvm_feature_msrs; 134 135 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ 136 static RateLimit bus_lock_ratelimit_ctrl; 137 138 int kvm_has_pit_state2(void) 139 { 140 return has_pit_state2; 141 } 142 143 bool kvm_has_smm(void) 144 { 145 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); 146 } 147 148 bool kvm_has_adjust_clock_stable(void) 149 { 150 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 151 152 return (ret == KVM_CLOCK_TSC_STABLE); 153 } 154 155 bool kvm_has_adjust_clock(void) 156 { 157 return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 158 } 159 160 bool kvm_has_exception_payload(void) 161 { 162 return has_exception_payload; 163 } 164 165 static bool kvm_x2apic_api_set_flags(uint64_t flags) 166 { 167 KVMState *s = KVM_STATE(current_accel()); 168 169 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); 170 } 171 172 #define MEMORIZE(fn, _result) \ 173 ({ \ 174 static bool _memorized; \ 175 \ 176 if (_memorized) { \ 177 return _result; \ 178 } \ 179 _memorized = true; \ 180 _result = fn; \ 181 }) 182 183 static bool has_x2apic_api; 184 185 bool kvm_has_x2apic_api(void) 186 { 187 return has_x2apic_api; 188 } 189 190 bool kvm_enable_x2apic(void) 191 { 192 return MEMORIZE( 193 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | 194 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK), 195 has_x2apic_api); 196 } 197 198 bool kvm_hv_vpindex_settable(void) 199 { 200 return hv_vpindex_settable; 201 } 202 203 static int kvm_get_tsc(CPUState *cs) 204 { 205 X86CPU *cpu = X86_CPU(cs); 206 CPUX86State *env = &cpu->env; 207 struct { 208 struct kvm_msrs info; 209 struct kvm_msr_entry entries[1]; 210 } msr_data = {}; 211 int ret; 212 213 if (env->tsc_valid) { 214 return 0; 215 } 216 217 memset(&msr_data, 0, sizeof(msr_data)); 218 msr_data.info.nmsrs = 1; 219 msr_data.entries[0].index = MSR_IA32_TSC; 220 env->tsc_valid = !runstate_is_running(); 221 222 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); 223 if (ret < 0) { 224 return ret; 225 } 226 227 assert(ret == 1); 228 env->tsc = msr_data.entries[0].data; 229 return 0; 230 } 231 232 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg) 233 { 234 kvm_get_tsc(cpu); 235 } 236 237 void kvm_synchronize_all_tsc(void) 238 { 239 CPUState *cpu; 240 241 if (kvm_enabled()) { 242 CPU_FOREACH(cpu) { 243 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); 244 } 245 } 246 } 247 248 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) 249 { 250 struct kvm_cpuid2 *cpuid; 251 int r, size; 252 253 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 254 cpuid = g_malloc0(size); 255 cpuid->nent = max; 256 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); 257 if (r == 0 && cpuid->nent >= max) { 258 r = -E2BIG; 259 } 260 if (r < 0) { 261 if (r == -E2BIG) { 262 g_free(cpuid); 263 return NULL; 264 } else { 265 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", 266 strerror(-r)); 267 exit(1); 268 } 269 } 270 return cpuid; 271 } 272 273 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough 274 * for all entries. 275 */ 276 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) 277 { 278 struct kvm_cpuid2 *cpuid; 279 int max = 1; 280 281 if (cpuid_cache != NULL) { 282 return cpuid_cache; 283 } 284 while ((cpuid = try_get_cpuid(s, max)) == NULL) { 285 max *= 2; 286 } 287 cpuid_cache = cpuid; 288 return cpuid; 289 } 290 291 static bool host_tsx_broken(void) 292 { 293 int family, model, stepping;\ 294 char vendor[CPUID_VENDOR_SZ + 1]; 295 296 host_cpu_vendor_fms(vendor, &family, &model, &stepping); 297 298 /* Check if we are running on a Haswell host known to have broken TSX */ 299 return !strcmp(vendor, CPUID_VENDOR_INTEL) && 300 (family == 6) && 301 ((model == 63 && stepping < 4) || 302 model == 60 || model == 69 || model == 70); 303 } 304 305 /* Returns the value for a specific register on the cpuid entry 306 */ 307 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) 308 { 309 uint32_t ret = 0; 310 switch (reg) { 311 case R_EAX: 312 ret = entry->eax; 313 break; 314 case R_EBX: 315 ret = entry->ebx; 316 break; 317 case R_ECX: 318 ret = entry->ecx; 319 break; 320 case R_EDX: 321 ret = entry->edx; 322 break; 323 } 324 return ret; 325 } 326 327 /* Find matching entry for function/index on kvm_cpuid2 struct 328 */ 329 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, 330 uint32_t function, 331 uint32_t index) 332 { 333 int i; 334 for (i = 0; i < cpuid->nent; ++i) { 335 if (cpuid->entries[i].function == function && 336 cpuid->entries[i].index == index) { 337 return &cpuid->entries[i]; 338 } 339 } 340 /* not found: */ 341 return NULL; 342 } 343 344 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, 345 uint32_t index, int reg) 346 { 347 struct kvm_cpuid2 *cpuid; 348 uint32_t ret = 0; 349 uint32_t cpuid_1_edx; 350 351 cpuid = get_supported_cpuid(s); 352 353 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); 354 if (entry) { 355 ret = cpuid_entry_get_reg(entry, reg); 356 } 357 358 /* Fixups for the data returned by KVM, below */ 359 360 if (function == 1 && reg == R_EDX) { 361 /* KVM before 2.6.30 misreports the following features */ 362 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; 363 } else if (function == 1 && reg == R_ECX) { 364 /* We can set the hypervisor flag, even if KVM does not return it on 365 * GET_SUPPORTED_CPUID 366 */ 367 ret |= CPUID_EXT_HYPERVISOR; 368 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it 369 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, 370 * and the irqchip is in the kernel. 371 */ 372 if (kvm_irqchip_in_kernel() && 373 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { 374 ret |= CPUID_EXT_TSC_DEADLINE_TIMER; 375 } 376 377 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled 378 * without the in-kernel irqchip 379 */ 380 if (!kvm_irqchip_in_kernel()) { 381 ret &= ~CPUID_EXT_X2APIC; 382 } 383 384 if (enable_cpu_pm) { 385 int disable_exits = kvm_check_extension(s, 386 KVM_CAP_X86_DISABLE_EXITS); 387 388 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) { 389 ret |= CPUID_EXT_MONITOR; 390 } 391 } 392 } else if (function == 6 && reg == R_EAX) { 393 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ 394 } else if (function == 7 && index == 0 && reg == R_EBX) { 395 if (host_tsx_broken()) { 396 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); 397 } 398 } else if (function == 7 && index == 0 && reg == R_EDX) { 399 /* 400 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. 401 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is 402 * returned by KVM_GET_MSR_INDEX_LIST. 403 */ 404 if (!has_msr_arch_capabs) { 405 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; 406 } 407 } else if (function == 0x80000001 && reg == R_ECX) { 408 /* 409 * It's safe to enable TOPOEXT even if it's not returned by 410 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows 411 * us to keep CPU models including TOPOEXT runnable on older kernels. 412 */ 413 ret |= CPUID_EXT3_TOPOEXT; 414 } else if (function == 0x80000001 && reg == R_EDX) { 415 /* On Intel, kvm returns cpuid according to the Intel spec, 416 * so add missing bits according to the AMD spec: 417 */ 418 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); 419 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; 420 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { 421 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't 422 * be enabled without the in-kernel irqchip 423 */ 424 if (!kvm_irqchip_in_kernel()) { 425 ret &= ~(1U << KVM_FEATURE_PV_UNHALT); 426 } 427 if (kvm_irqchip_is_split()) { 428 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID; 429 } 430 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { 431 ret |= 1U << KVM_HINTS_REALTIME; 432 } 433 434 return ret; 435 } 436 437 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) 438 { 439 struct { 440 struct kvm_msrs info; 441 struct kvm_msr_entry entries[1]; 442 } msr_data = {}; 443 uint64_t value; 444 uint32_t ret, can_be_one, must_be_one; 445 446 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */ 447 return 0; 448 } 449 450 /* Check if requested MSR is supported feature MSR */ 451 int i; 452 for (i = 0; i < kvm_feature_msrs->nmsrs; i++) 453 if (kvm_feature_msrs->indices[i] == index) { 454 break; 455 } 456 if (i == kvm_feature_msrs->nmsrs) { 457 return 0; /* if the feature MSR is not supported, simply return 0 */ 458 } 459 460 msr_data.info.nmsrs = 1; 461 msr_data.entries[0].index = index; 462 463 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data); 464 if (ret != 1) { 465 error_report("KVM get MSR (index=0x%x) feature failed, %s", 466 index, strerror(-ret)); 467 exit(1); 468 } 469 470 value = msr_data.entries[0].data; 471 switch (index) { 472 case MSR_IA32_VMX_PROCBASED_CTLS2: 473 if (!has_msr_vmx_procbased_ctls2) { 474 /* KVM forgot to add these bits for some time, do this ourselves. */ 475 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) & 476 CPUID_XSAVE_XSAVES) { 477 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32; 478 } 479 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) & 480 CPUID_EXT_RDRAND) { 481 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32; 482 } 483 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 484 CPUID_7_0_EBX_INVPCID) { 485 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32; 486 } 487 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 488 CPUID_7_0_EBX_RDSEED) { 489 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32; 490 } 491 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) & 492 CPUID_EXT2_RDTSCP) { 493 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32; 494 } 495 } 496 /* fall through */ 497 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 498 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 499 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 500 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 501 /* 502 * Return true for bits that can be one, but do not have to be one. 503 * The SDM tells us which bits could have a "must be one" setting, 504 * so we can do the opposite transformation in make_vmx_msr_value. 505 */ 506 must_be_one = (uint32_t)value; 507 can_be_one = (uint32_t)(value >> 32); 508 return can_be_one & ~must_be_one; 509 510 default: 511 return value; 512 } 513 } 514 515 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, 516 int *max_banks) 517 { 518 int r; 519 520 r = kvm_check_extension(s, KVM_CAP_MCE); 521 if (r > 0) { 522 *max_banks = r; 523 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); 524 } 525 return -ENOSYS; 526 } 527 528 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) 529 { 530 CPUState *cs = CPU(cpu); 531 CPUX86State *env = &cpu->env; 532 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | 533 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; 534 uint64_t mcg_status = MCG_STATUS_MCIP; 535 int flags = 0; 536 537 if (code == BUS_MCEERR_AR) { 538 status |= MCI_STATUS_AR | 0x134; 539 mcg_status |= MCG_STATUS_EIPV; 540 } else { 541 status |= 0xc0; 542 mcg_status |= MCG_STATUS_RIPV; 543 } 544 545 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; 546 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the 547 * guest kernel back into env->mcg_ext_ctl. 548 */ 549 cpu_synchronize_state(cs); 550 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { 551 mcg_status |= MCG_STATUS_LMCE; 552 flags = 0; 553 } 554 555 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, 556 (MCM_ADDR_PHYS << 6) | 0xc, flags); 557 } 558 559 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar) 560 { 561 MemoryFailureFlags mff = {.action_required = ar, .recursive = false}; 562 563 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action, 564 &mff); 565 } 566 567 static void hardware_memory_error(void *host_addr) 568 { 569 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true); 570 error_report("QEMU got Hardware memory error at addr %p", host_addr); 571 exit(1); 572 } 573 574 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) 575 { 576 X86CPU *cpu = X86_CPU(c); 577 CPUX86State *env = &cpu->env; 578 ram_addr_t ram_addr; 579 hwaddr paddr; 580 581 /* If we get an action required MCE, it has been injected by KVM 582 * while the VM was running. An action optional MCE instead should 583 * be coming from the main thread, which qemu_init_sigbus identifies 584 * as the "early kill" thread. 585 */ 586 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); 587 588 if ((env->mcg_cap & MCG_SER_P) && addr) { 589 ram_addr = qemu_ram_addr_from_host(addr); 590 if (ram_addr != RAM_ADDR_INVALID && 591 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { 592 kvm_hwpoison_page_add(ram_addr); 593 kvm_mce_inject(cpu, paddr, code); 594 595 /* 596 * Use different logging severity based on error type. 597 * If there is additional MCE reporting on the hypervisor, QEMU VA 598 * could be another source to identify the PA and MCE details. 599 */ 600 if (code == BUS_MCEERR_AR) { 601 error_report("Guest MCE Memory Error at QEMU addr %p and " 602 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 603 addr, paddr, "BUS_MCEERR_AR"); 604 } else { 605 warn_report("Guest MCE Memory Error at QEMU addr %p and " 606 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 607 addr, paddr, "BUS_MCEERR_AO"); 608 } 609 610 return; 611 } 612 613 if (code == BUS_MCEERR_AO) { 614 warn_report("Hardware memory error at addr %p of type %s " 615 "for memory used by QEMU itself instead of guest system!", 616 addr, "BUS_MCEERR_AO"); 617 } 618 } 619 620 if (code == BUS_MCEERR_AR) { 621 hardware_memory_error(addr); 622 } 623 624 /* Hope we are lucky for AO MCE, just notify a event */ 625 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); 626 } 627 628 static void kvm_reset_exception(CPUX86State *env) 629 { 630 env->exception_nr = -1; 631 env->exception_pending = 0; 632 env->exception_injected = 0; 633 env->exception_has_payload = false; 634 env->exception_payload = 0; 635 } 636 637 static void kvm_queue_exception(CPUX86State *env, 638 int32_t exception_nr, 639 uint8_t exception_has_payload, 640 uint64_t exception_payload) 641 { 642 assert(env->exception_nr == -1); 643 assert(!env->exception_pending); 644 assert(!env->exception_injected); 645 assert(!env->exception_has_payload); 646 647 env->exception_nr = exception_nr; 648 649 if (has_exception_payload) { 650 env->exception_pending = 1; 651 652 env->exception_has_payload = exception_has_payload; 653 env->exception_payload = exception_payload; 654 } else { 655 env->exception_injected = 1; 656 657 if (exception_nr == EXCP01_DB) { 658 assert(exception_has_payload); 659 env->dr[6] = exception_payload; 660 } else if (exception_nr == EXCP0E_PAGE) { 661 assert(exception_has_payload); 662 env->cr[2] = exception_payload; 663 } else { 664 assert(!exception_has_payload); 665 } 666 } 667 } 668 669 static int kvm_inject_mce_oldstyle(X86CPU *cpu) 670 { 671 CPUX86State *env = &cpu->env; 672 673 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { 674 unsigned int bank, bank_num = env->mcg_cap & 0xff; 675 struct kvm_x86_mce mce; 676 677 kvm_reset_exception(env); 678 679 /* 680 * There must be at least one bank in use if an MCE is pending. 681 * Find it and use its values for the event injection. 682 */ 683 for (bank = 0; bank < bank_num; bank++) { 684 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { 685 break; 686 } 687 } 688 assert(bank < bank_num); 689 690 mce.bank = bank; 691 mce.status = env->mce_banks[bank * 4 + 1]; 692 mce.mcg_status = env->mcg_status; 693 mce.addr = env->mce_banks[bank * 4 + 2]; 694 mce.misc = env->mce_banks[bank * 4 + 3]; 695 696 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); 697 } 698 return 0; 699 } 700 701 static void cpu_update_state(void *opaque, bool running, RunState state) 702 { 703 CPUX86State *env = opaque; 704 705 if (running) { 706 env->tsc_valid = false; 707 } 708 } 709 710 unsigned long kvm_arch_vcpu_id(CPUState *cs) 711 { 712 X86CPU *cpu = X86_CPU(cs); 713 return cpu->apic_id; 714 } 715 716 #ifndef KVM_CPUID_SIGNATURE_NEXT 717 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 718 #endif 719 720 static bool hyperv_enabled(X86CPU *cpu) 721 { 722 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 && 723 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || 724 cpu->hyperv_features || cpu->hyperv_passthrough); 725 } 726 727 /* 728 * Check whether target_freq is within conservative 729 * ntp correctable bounds (250ppm) of freq 730 */ 731 static inline bool freq_within_bounds(int freq, int target_freq) 732 { 733 int max_freq = freq + (freq * 250 / 1000000); 734 int min_freq = freq - (freq * 250 / 1000000); 735 736 if (target_freq >= min_freq && target_freq <= max_freq) { 737 return true; 738 } 739 740 return false; 741 } 742 743 static int kvm_arch_set_tsc_khz(CPUState *cs) 744 { 745 X86CPU *cpu = X86_CPU(cs); 746 CPUX86State *env = &cpu->env; 747 int r, cur_freq; 748 bool set_ioctl = false; 749 750 if (!env->tsc_khz) { 751 return 0; 752 } 753 754 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 755 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; 756 757 /* 758 * If TSC scaling is supported, attempt to set TSC frequency. 759 */ 760 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) { 761 set_ioctl = true; 762 } 763 764 /* 765 * If desired TSC frequency is within bounds of NTP correction, 766 * attempt to set TSC frequency. 767 */ 768 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) { 769 set_ioctl = true; 770 } 771 772 r = set_ioctl ? 773 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : 774 -ENOTSUP; 775 776 if (r < 0) { 777 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current 778 * TSC frequency doesn't match the one we want. 779 */ 780 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 781 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 782 -ENOTSUP; 783 if (cur_freq <= 0 || cur_freq != env->tsc_khz) { 784 warn_report("TSC frequency mismatch between " 785 "VM (%" PRId64 " kHz) and host (%d kHz), " 786 "and TSC scaling unavailable", 787 env->tsc_khz, cur_freq); 788 return r; 789 } 790 } 791 792 return 0; 793 } 794 795 static bool tsc_is_stable_and_known(CPUX86State *env) 796 { 797 if (!env->tsc_khz) { 798 return false; 799 } 800 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) 801 || env->user_tsc_khz; 802 } 803 804 static struct { 805 const char *desc; 806 struct { 807 uint32_t func; 808 int reg; 809 uint32_t bits; 810 } flags[2]; 811 uint64_t dependencies; 812 } kvm_hyperv_properties[] = { 813 [HYPERV_FEAT_RELAXED] = { 814 .desc = "relaxed timing (hv-relaxed)", 815 .flags = { 816 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 817 .bits = HV_HYPERCALL_AVAILABLE}, 818 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 819 .bits = HV_RELAXED_TIMING_RECOMMENDED} 820 } 821 }, 822 [HYPERV_FEAT_VAPIC] = { 823 .desc = "virtual APIC (hv-vapic)", 824 .flags = { 825 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 826 .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE}, 827 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 828 .bits = HV_APIC_ACCESS_RECOMMENDED} 829 } 830 }, 831 [HYPERV_FEAT_TIME] = { 832 .desc = "clocksources (hv-time)", 833 .flags = { 834 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 835 .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE | 836 HV_REFERENCE_TSC_AVAILABLE} 837 } 838 }, 839 [HYPERV_FEAT_CRASH] = { 840 .desc = "crash MSRs (hv-crash)", 841 .flags = { 842 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 843 .bits = HV_GUEST_CRASH_MSR_AVAILABLE} 844 } 845 }, 846 [HYPERV_FEAT_RESET] = { 847 .desc = "reset MSR (hv-reset)", 848 .flags = { 849 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 850 .bits = HV_RESET_AVAILABLE} 851 } 852 }, 853 [HYPERV_FEAT_VPINDEX] = { 854 .desc = "VP_INDEX MSR (hv-vpindex)", 855 .flags = { 856 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 857 .bits = HV_VP_INDEX_AVAILABLE} 858 } 859 }, 860 [HYPERV_FEAT_RUNTIME] = { 861 .desc = "VP_RUNTIME MSR (hv-runtime)", 862 .flags = { 863 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 864 .bits = HV_VP_RUNTIME_AVAILABLE} 865 } 866 }, 867 [HYPERV_FEAT_SYNIC] = { 868 .desc = "synthetic interrupt controller (hv-synic)", 869 .flags = { 870 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 871 .bits = HV_SYNIC_AVAILABLE} 872 } 873 }, 874 [HYPERV_FEAT_STIMER] = { 875 .desc = "synthetic timers (hv-stimer)", 876 .flags = { 877 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 878 .bits = HV_SYNTIMERS_AVAILABLE} 879 }, 880 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) 881 }, 882 [HYPERV_FEAT_FREQUENCIES] = { 883 .desc = "frequency MSRs (hv-frequencies)", 884 .flags = { 885 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 886 .bits = HV_ACCESS_FREQUENCY_MSRS}, 887 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 888 .bits = HV_FREQUENCY_MSRS_AVAILABLE} 889 } 890 }, 891 [HYPERV_FEAT_REENLIGHTENMENT] = { 892 .desc = "reenlightenment MSRs (hv-reenlightenment)", 893 .flags = { 894 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 895 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} 896 } 897 }, 898 [HYPERV_FEAT_TLBFLUSH] = { 899 .desc = "paravirtualized TLB flush (hv-tlbflush)", 900 .flags = { 901 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 902 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | 903 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 904 }, 905 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 906 }, 907 [HYPERV_FEAT_EVMCS] = { 908 .desc = "enlightened VMCS (hv-evmcs)", 909 .flags = { 910 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 911 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} 912 }, 913 .dependencies = BIT(HYPERV_FEAT_VAPIC) 914 }, 915 [HYPERV_FEAT_IPI] = { 916 .desc = "paravirtualized IPI (hv-ipi)", 917 .flags = { 918 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 919 .bits = HV_CLUSTER_IPI_RECOMMENDED | 920 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 921 }, 922 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 923 }, 924 [HYPERV_FEAT_STIMER_DIRECT] = { 925 .desc = "direct mode synthetic timers (hv-stimer-direct)", 926 .flags = { 927 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 928 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} 929 }, 930 .dependencies = BIT(HYPERV_FEAT_STIMER) 931 }, 932 }; 933 934 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, 935 bool do_sys_ioctl) 936 { 937 struct kvm_cpuid2 *cpuid; 938 int r, size; 939 940 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 941 cpuid = g_malloc0(size); 942 cpuid->nent = max; 943 944 if (do_sys_ioctl) { 945 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 946 } else { 947 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 948 } 949 if (r == 0 && cpuid->nent >= max) { 950 r = -E2BIG; 951 } 952 if (r < 0) { 953 if (r == -E2BIG) { 954 g_free(cpuid); 955 return NULL; 956 } else { 957 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n", 958 strerror(-r)); 959 exit(1); 960 } 961 } 962 return cpuid; 963 } 964 965 /* 966 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough 967 * for all entries. 968 */ 969 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) 970 { 971 struct kvm_cpuid2 *cpuid; 972 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */ 973 int max = 10; 974 int i; 975 bool do_sys_ioctl; 976 977 do_sys_ioctl = 978 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0; 979 980 /* 981 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with 982 * -E2BIG, however, it doesn't report back the right size. Keep increasing 983 * it and re-trying until we succeed. 984 */ 985 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) { 986 max++; 987 } 988 989 /* 990 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before 991 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the 992 * information early, just check for the capability and set the bit 993 * manually. 994 */ 995 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, 996 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 997 for (i = 0; i < cpuid->nent; i++) { 998 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { 999 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1000 } 1001 } 1002 } 1003 1004 return cpuid; 1005 } 1006 1007 /* 1008 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature 1009 * leaves from KVM_CAP_HYPERV* and present MSRs data. 1010 */ 1011 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) 1012 { 1013 X86CPU *cpu = X86_CPU(cs); 1014 struct kvm_cpuid2 *cpuid; 1015 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm; 1016 1017 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */ 1018 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); 1019 cpuid->nent = 2; 1020 1021 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */ 1022 entry_feat = &cpuid->entries[0]; 1023 entry_feat->function = HV_CPUID_FEATURES; 1024 1025 entry_recomm = &cpuid->entries[1]; 1026 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; 1027 entry_recomm->ebx = cpu->hyperv_spinlock_attempts; 1028 1029 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { 1030 entry_feat->eax |= HV_HYPERCALL_AVAILABLE; 1031 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; 1032 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1033 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; 1034 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; 1035 } 1036 1037 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { 1038 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; 1039 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; 1040 } 1041 1042 if (has_msr_hv_frequencies) { 1043 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; 1044 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; 1045 } 1046 1047 if (has_msr_hv_crash) { 1048 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; 1049 } 1050 1051 if (has_msr_hv_reenlightenment) { 1052 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; 1053 } 1054 1055 if (has_msr_hv_reset) { 1056 entry_feat->eax |= HV_RESET_AVAILABLE; 1057 } 1058 1059 if (has_msr_hv_vpindex) { 1060 entry_feat->eax |= HV_VP_INDEX_AVAILABLE; 1061 } 1062 1063 if (has_msr_hv_runtime) { 1064 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; 1065 } 1066 1067 if (has_msr_hv_synic) { 1068 unsigned int cap = cpu->hyperv_synic_kvm_only ? 1069 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1070 1071 if (kvm_check_extension(cs->kvm_state, cap) > 0) { 1072 entry_feat->eax |= HV_SYNIC_AVAILABLE; 1073 } 1074 } 1075 1076 if (has_msr_hv_stimer) { 1077 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; 1078 } 1079 1080 if (kvm_check_extension(cs->kvm_state, 1081 KVM_CAP_HYPERV_TLBFLUSH) > 0) { 1082 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; 1083 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1084 } 1085 1086 if (kvm_check_extension(cs->kvm_state, 1087 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1088 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1089 } 1090 1091 if (kvm_check_extension(cs->kvm_state, 1092 KVM_CAP_HYPERV_SEND_IPI) > 0) { 1093 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; 1094 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1095 } 1096 1097 return cpuid; 1098 } 1099 1100 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) 1101 { 1102 struct kvm_cpuid_entry2 *entry; 1103 struct kvm_cpuid2 *cpuid; 1104 1105 if (hv_cpuid_cache) { 1106 cpuid = hv_cpuid_cache; 1107 } else { 1108 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { 1109 cpuid = get_supported_hv_cpuid(cs); 1110 } else { 1111 cpuid = get_supported_hv_cpuid_legacy(cs); 1112 } 1113 hv_cpuid_cache = cpuid; 1114 } 1115 1116 if (!cpuid) { 1117 return 0; 1118 } 1119 1120 entry = cpuid_find_entry(cpuid, func, 0); 1121 if (!entry) { 1122 return 0; 1123 } 1124 1125 return cpuid_entry_get_reg(entry, reg); 1126 } 1127 1128 static bool hyperv_feature_supported(CPUState *cs, int feature) 1129 { 1130 uint32_t func, bits; 1131 int i, reg; 1132 1133 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { 1134 1135 func = kvm_hyperv_properties[feature].flags[i].func; 1136 reg = kvm_hyperv_properties[feature].flags[i].reg; 1137 bits = kvm_hyperv_properties[feature].flags[i].bits; 1138 1139 if (!func) { 1140 continue; 1141 } 1142 1143 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) { 1144 return false; 1145 } 1146 } 1147 1148 return true; 1149 } 1150 1151 static int hv_cpuid_check_and_set(CPUState *cs, int feature, Error **errp) 1152 { 1153 X86CPU *cpu = X86_CPU(cs); 1154 uint64_t deps; 1155 int dep_feat; 1156 1157 if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) { 1158 return 0; 1159 } 1160 1161 deps = kvm_hyperv_properties[feature].dependencies; 1162 while (deps) { 1163 dep_feat = ctz64(deps); 1164 if (!(hyperv_feat_enabled(cpu, dep_feat))) { 1165 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1166 kvm_hyperv_properties[feature].desc, 1167 kvm_hyperv_properties[dep_feat].desc); 1168 return 1; 1169 } 1170 deps &= ~(1ull << dep_feat); 1171 } 1172 1173 if (!hyperv_feature_supported(cs, feature)) { 1174 if (hyperv_feat_enabled(cpu, feature)) { 1175 error_setg(errp, "Hyper-V %s is not supported by kernel", 1176 kvm_hyperv_properties[feature].desc); 1177 return 1; 1178 } else { 1179 return 0; 1180 } 1181 } 1182 1183 if (cpu->hyperv_passthrough) { 1184 cpu->hyperv_features |= BIT(feature); 1185 } 1186 1187 return 0; 1188 } 1189 1190 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) 1191 { 1192 X86CPU *cpu = X86_CPU(cs); 1193 uint32_t r = 0; 1194 int i, j; 1195 1196 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) { 1197 if (!hyperv_feat_enabled(cpu, i)) { 1198 continue; 1199 } 1200 1201 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) { 1202 if (kvm_hyperv_properties[i].flags[j].func != func) { 1203 continue; 1204 } 1205 if (kvm_hyperv_properties[i].flags[j].reg != reg) { 1206 continue; 1207 } 1208 1209 r |= kvm_hyperv_properties[i].flags[j].bits; 1210 } 1211 } 1212 1213 return r; 1214 } 1215 1216 /* 1217 * Expand Hyper-V CPU features. In partucular, check that all the requested 1218 * features are supported by the host and the sanity of the configuration 1219 * (that all the required dependencies are included). Also, this takes care 1220 * of 'hv_passthrough' mode and fills the environment with all supported 1221 * Hyper-V features. 1222 */ 1223 static void hyperv_expand_features(CPUState *cs, Error **errp) 1224 { 1225 X86CPU *cpu = X86_CPU(cs); 1226 1227 if (!hyperv_enabled(cpu)) 1228 return; 1229 1230 if (cpu->hyperv_passthrough) { 1231 cpu->hyperv_vendor_id[0] = 1232 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); 1233 cpu->hyperv_vendor_id[1] = 1234 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); 1235 cpu->hyperv_vendor_id[2] = 1236 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); 1237 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, 1238 sizeof(cpu->hyperv_vendor_id) + 1); 1239 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, 1240 sizeof(cpu->hyperv_vendor_id)); 1241 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; 1242 1243 cpu->hyperv_interface_id[0] = 1244 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); 1245 cpu->hyperv_interface_id[1] = 1246 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); 1247 cpu->hyperv_interface_id[2] = 1248 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); 1249 cpu->hyperv_interface_id[3] = 1250 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); 1251 1252 cpu->hyperv_version_id[0] = 1253 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); 1254 cpu->hyperv_version_id[1] = 1255 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX); 1256 cpu->hyperv_version_id[2] = 1257 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); 1258 cpu->hyperv_version_id[3] = 1259 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX); 1260 1261 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, 1262 R_EAX); 1263 cpu->hyperv_limits[0] = 1264 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); 1265 cpu->hyperv_limits[1] = 1266 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); 1267 cpu->hyperv_limits[2] = 1268 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); 1269 1270 cpu->hyperv_spinlock_attempts = 1271 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); 1272 } 1273 1274 /* Features */ 1275 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RELAXED, errp)) { 1276 return; 1277 } 1278 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_VAPIC, errp)) { 1279 return; 1280 } 1281 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_TIME, errp)) { 1282 return; 1283 } 1284 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_CRASH, errp)) { 1285 return; 1286 } 1287 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RESET, errp)) { 1288 return; 1289 } 1290 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_VPINDEX, errp)) { 1291 return; 1292 } 1293 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_RUNTIME, errp)) { 1294 return; 1295 } 1296 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_SYNIC, errp)) { 1297 return; 1298 } 1299 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER, errp)) { 1300 return; 1301 } 1302 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_FREQUENCIES, errp)) { 1303 return; 1304 } 1305 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_REENLIGHTENMENT, errp)) { 1306 return; 1307 } 1308 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_TLBFLUSH, errp)) { 1309 return; 1310 } 1311 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_EVMCS, errp)) { 1312 return; 1313 } 1314 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_IPI, errp)) { 1315 return; 1316 } 1317 if (hv_cpuid_check_and_set(cs, HYPERV_FEAT_STIMER_DIRECT, errp)) { 1318 return; 1319 } 1320 1321 /* Additional dependencies not covered by kvm_hyperv_properties[] */ 1322 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1323 !cpu->hyperv_synic_kvm_only && 1324 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { 1325 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1326 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, 1327 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); 1328 } 1329 } 1330 1331 /* 1332 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent. 1333 */ 1334 static int hyperv_fill_cpuids(CPUState *cs, 1335 struct kvm_cpuid_entry2 *cpuid_ent) 1336 { 1337 X86CPU *cpu = X86_CPU(cs); 1338 struct kvm_cpuid_entry2 *c; 1339 uint32_t cpuid_i = 0; 1340 1341 c = &cpuid_ent[cpuid_i++]; 1342 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; 1343 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? 1344 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; 1345 c->ebx = cpu->hyperv_vendor_id[0]; 1346 c->ecx = cpu->hyperv_vendor_id[1]; 1347 c->edx = cpu->hyperv_vendor_id[2]; 1348 1349 c = &cpuid_ent[cpuid_i++]; 1350 c->function = HV_CPUID_INTERFACE; 1351 c->eax = cpu->hyperv_interface_id[0]; 1352 c->ebx = cpu->hyperv_interface_id[1]; 1353 c->ecx = cpu->hyperv_interface_id[2]; 1354 c->edx = cpu->hyperv_interface_id[3]; 1355 1356 c = &cpuid_ent[cpuid_i++]; 1357 c->function = HV_CPUID_VERSION; 1358 c->eax = cpu->hyperv_version_id[0]; 1359 c->ebx = cpu->hyperv_version_id[1]; 1360 c->ecx = cpu->hyperv_version_id[2]; 1361 c->edx = cpu->hyperv_version_id[3]; 1362 1363 c = &cpuid_ent[cpuid_i++]; 1364 c->function = HV_CPUID_FEATURES; 1365 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); 1366 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); 1367 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); 1368 1369 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ 1370 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1371 1372 c = &cpuid_ent[cpuid_i++]; 1373 c->function = HV_CPUID_ENLIGHTMENT_INFO; 1374 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); 1375 c->ebx = cpu->hyperv_spinlock_attempts; 1376 1377 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { 1378 c->eax |= HV_NO_NONARCH_CORESHARING; 1379 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { 1380 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & 1381 HV_NO_NONARCH_CORESHARING; 1382 } 1383 1384 c = &cpuid_ent[cpuid_i++]; 1385 c->function = HV_CPUID_IMPLEMENT_LIMITS; 1386 c->eax = cpu->hv_max_vps; 1387 c->ebx = cpu->hyperv_limits[0]; 1388 c->ecx = cpu->hyperv_limits[1]; 1389 c->edx = cpu->hyperv_limits[2]; 1390 1391 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1392 __u32 function; 1393 1394 /* Create zeroed 0x40000006..0x40000009 leaves */ 1395 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; 1396 function < HV_CPUID_NESTED_FEATURES; function++) { 1397 c = &cpuid_ent[cpuid_i++]; 1398 c->function = function; 1399 } 1400 1401 c = &cpuid_ent[cpuid_i++]; 1402 c->function = HV_CPUID_NESTED_FEATURES; 1403 c->eax = cpu->hyperv_nested[0]; 1404 } 1405 1406 return cpuid_i; 1407 } 1408 1409 static Error *hv_passthrough_mig_blocker; 1410 static Error *hv_no_nonarch_cs_mig_blocker; 1411 1412 static int hyperv_init_vcpu(X86CPU *cpu) 1413 { 1414 CPUState *cs = CPU(cpu); 1415 Error *local_err = NULL; 1416 int ret; 1417 1418 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { 1419 error_setg(&hv_passthrough_mig_blocker, 1420 "'hv-passthrough' CPU flag prevents migration, use explicit" 1421 " set of hv-* flags instead"); 1422 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err); 1423 if (local_err) { 1424 error_report_err(local_err); 1425 error_free(hv_passthrough_mig_blocker); 1426 return ret; 1427 } 1428 } 1429 1430 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && 1431 hv_no_nonarch_cs_mig_blocker == NULL) { 1432 error_setg(&hv_no_nonarch_cs_mig_blocker, 1433 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" 1434 " use explicit 'hv-no-nonarch-coresharing=on' instead (but" 1435 " make sure SMT is disabled and/or that vCPUs are properly" 1436 " pinned)"); 1437 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err); 1438 if (local_err) { 1439 error_report_err(local_err); 1440 error_free(hv_no_nonarch_cs_mig_blocker); 1441 return ret; 1442 } 1443 } 1444 1445 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { 1446 /* 1447 * the kernel doesn't support setting vp_index; assert that its value 1448 * is in sync 1449 */ 1450 struct { 1451 struct kvm_msrs info; 1452 struct kvm_msr_entry entries[1]; 1453 } msr_data = { 1454 .info.nmsrs = 1, 1455 .entries[0].index = HV_X64_MSR_VP_INDEX, 1456 }; 1457 1458 ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); 1459 if (ret < 0) { 1460 return ret; 1461 } 1462 assert(ret == 1); 1463 1464 if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { 1465 error_report("kernel's vp_index != QEMU's vp_index"); 1466 return -ENXIO; 1467 } 1468 } 1469 1470 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 1471 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? 1472 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1473 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); 1474 if (ret < 0) { 1475 error_report("failed to turn on HyperV SynIC in KVM: %s", 1476 strerror(-ret)); 1477 return ret; 1478 } 1479 1480 if (!cpu->hyperv_synic_kvm_only) { 1481 ret = hyperv_x86_synic_add(cpu); 1482 if (ret < 0) { 1483 error_report("failed to create HyperV SynIC: %s", 1484 strerror(-ret)); 1485 return ret; 1486 } 1487 } 1488 } 1489 1490 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1491 uint16_t evmcs_version; 1492 1493 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, 1494 (uintptr_t)&evmcs_version); 1495 1496 if (ret < 0) { 1497 fprintf(stderr, "Hyper-V %s is not supported by kernel\n", 1498 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); 1499 return ret; 1500 } 1501 1502 cpu->hyperv_nested[0] = evmcs_version; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static Error *invtsc_mig_blocker; 1509 1510 #define KVM_MAX_CPUID_ENTRIES 100 1511 1512 int kvm_arch_init_vcpu(CPUState *cs) 1513 { 1514 struct { 1515 struct kvm_cpuid2 cpuid; 1516 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 1517 } cpuid_data; 1518 /* 1519 * The kernel defines these structs with padding fields so there 1520 * should be no extra padding in our cpuid_data struct. 1521 */ 1522 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != 1523 sizeof(struct kvm_cpuid2) + 1524 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 1525 1526 X86CPU *cpu = X86_CPU(cs); 1527 CPUX86State *env = &cpu->env; 1528 uint32_t limit, i, j, cpuid_i; 1529 uint32_t unused; 1530 struct kvm_cpuid_entry2 *c; 1531 uint32_t signature[3]; 1532 int kvm_base = KVM_CPUID_SIGNATURE; 1533 int max_nested_state_len; 1534 int r; 1535 Error *local_err = NULL; 1536 1537 memset(&cpuid_data, 0, sizeof(cpuid_data)); 1538 1539 cpuid_i = 0; 1540 1541 r = kvm_arch_set_tsc_khz(cs); 1542 if (r < 0) { 1543 return r; 1544 } 1545 1546 /* vcpu's TSC frequency is either specified by user, or following 1547 * the value used by KVM if the former is not present. In the 1548 * latter case, we query it from KVM and record in env->tsc_khz, 1549 * so that vcpu's TSC frequency can be migrated later via this field. 1550 */ 1551 if (!env->tsc_khz) { 1552 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 1553 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 1554 -ENOTSUP; 1555 if (r > 0) { 1556 env->tsc_khz = r; 1557 } 1558 } 1559 1560 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; 1561 1562 /* Paravirtualization CPUIDs */ 1563 hyperv_expand_features(cs, &local_err); 1564 if (local_err) { 1565 error_report_err(local_err); 1566 return -ENOSYS; 1567 } 1568 1569 if (hyperv_enabled(cpu)) { 1570 r = hyperv_init_vcpu(cpu); 1571 if (r) { 1572 return r; 1573 } 1574 1575 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); 1576 kvm_base = KVM_CPUID_SIGNATURE_NEXT; 1577 has_msr_hv_hypercall = true; 1578 } 1579 1580 if (cpu->expose_kvm) { 1581 memcpy(signature, "KVMKVMKVM\0\0\0", 12); 1582 c = &cpuid_data.entries[cpuid_i++]; 1583 c->function = KVM_CPUID_SIGNATURE | kvm_base; 1584 c->eax = KVM_CPUID_FEATURES | kvm_base; 1585 c->ebx = signature[0]; 1586 c->ecx = signature[1]; 1587 c->edx = signature[2]; 1588 1589 c = &cpuid_data.entries[cpuid_i++]; 1590 c->function = KVM_CPUID_FEATURES | kvm_base; 1591 c->eax = env->features[FEAT_KVM]; 1592 c->edx = env->features[FEAT_KVM_HINTS]; 1593 } 1594 1595 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); 1596 1597 for (i = 0; i <= limit; i++) { 1598 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1599 fprintf(stderr, "unsupported level value: 0x%x\n", limit); 1600 abort(); 1601 } 1602 c = &cpuid_data.entries[cpuid_i++]; 1603 1604 switch (i) { 1605 case 2: { 1606 /* Keep reading function 2 till all the input is received */ 1607 int times; 1608 1609 c->function = i; 1610 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | 1611 KVM_CPUID_FLAG_STATE_READ_NEXT; 1612 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1613 times = c->eax & 0xff; 1614 1615 for (j = 1; j < times; ++j) { 1616 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1617 fprintf(stderr, "cpuid_data is full, no space for " 1618 "cpuid(eax:2):eax & 0xf = 0x%x\n", times); 1619 abort(); 1620 } 1621 c = &cpuid_data.entries[cpuid_i++]; 1622 c->function = i; 1623 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; 1624 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1625 } 1626 break; 1627 } 1628 case 0x1f: 1629 if (env->nr_dies < 2) { 1630 break; 1631 } 1632 /* fallthrough */ 1633 case 4: 1634 case 0xb: 1635 case 0xd: 1636 for (j = 0; ; j++) { 1637 if (i == 0xd && j == 64) { 1638 break; 1639 } 1640 1641 if (i == 0x1f && j == 64) { 1642 break; 1643 } 1644 1645 c->function = i; 1646 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1647 c->index = j; 1648 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1649 1650 if (i == 4 && c->eax == 0) { 1651 break; 1652 } 1653 if (i == 0xb && !(c->ecx & 0xff00)) { 1654 break; 1655 } 1656 if (i == 0x1f && !(c->ecx & 0xff00)) { 1657 break; 1658 } 1659 if (i == 0xd && c->eax == 0) { 1660 continue; 1661 } 1662 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1663 fprintf(stderr, "cpuid_data is full, no space for " 1664 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1665 abort(); 1666 } 1667 c = &cpuid_data.entries[cpuid_i++]; 1668 } 1669 break; 1670 case 0x7: 1671 case 0x14: { 1672 uint32_t times; 1673 1674 c->function = i; 1675 c->index = 0; 1676 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1677 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1678 times = c->eax; 1679 1680 for (j = 1; j <= times; ++j) { 1681 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1682 fprintf(stderr, "cpuid_data is full, no space for " 1683 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1684 abort(); 1685 } 1686 c = &cpuid_data.entries[cpuid_i++]; 1687 c->function = i; 1688 c->index = j; 1689 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1690 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1691 } 1692 break; 1693 } 1694 default: 1695 c->function = i; 1696 c->flags = 0; 1697 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1698 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1699 /* 1700 * KVM already returns all zeroes if a CPUID entry is missing, 1701 * so we can omit it and avoid hitting KVM's 80-entry limit. 1702 */ 1703 cpuid_i--; 1704 } 1705 break; 1706 } 1707 } 1708 1709 if (limit >= 0x0a) { 1710 uint32_t eax, edx; 1711 1712 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); 1713 1714 has_architectural_pmu_version = eax & 0xff; 1715 if (has_architectural_pmu_version > 0) { 1716 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; 1717 1718 /* Shouldn't be more than 32, since that's the number of bits 1719 * available in EBX to tell us _which_ counters are available. 1720 * Play it safe. 1721 */ 1722 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { 1723 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; 1724 } 1725 1726 if (has_architectural_pmu_version > 1) { 1727 num_architectural_pmu_fixed_counters = edx & 0x1f; 1728 1729 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { 1730 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; 1731 } 1732 } 1733 } 1734 } 1735 1736 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); 1737 1738 for (i = 0x80000000; i <= limit; i++) { 1739 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1740 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit); 1741 abort(); 1742 } 1743 c = &cpuid_data.entries[cpuid_i++]; 1744 1745 switch (i) { 1746 case 0x8000001d: 1747 /* Query for all AMD cache information leaves */ 1748 for (j = 0; ; j++) { 1749 c->function = i; 1750 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1751 c->index = j; 1752 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1753 1754 if (c->eax == 0) { 1755 break; 1756 } 1757 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1758 fprintf(stderr, "cpuid_data is full, no space for " 1759 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1760 abort(); 1761 } 1762 c = &cpuid_data.entries[cpuid_i++]; 1763 } 1764 break; 1765 default: 1766 c->function = i; 1767 c->flags = 0; 1768 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1769 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1770 /* 1771 * KVM already returns all zeroes if a CPUID entry is missing, 1772 * so we can omit it and avoid hitting KVM's 80-entry limit. 1773 */ 1774 cpuid_i--; 1775 } 1776 break; 1777 } 1778 } 1779 1780 /* Call Centaur's CPUID instructions they are supported. */ 1781 if (env->cpuid_xlevel2 > 0) { 1782 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); 1783 1784 for (i = 0xC0000000; i <= limit; i++) { 1785 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1786 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit); 1787 abort(); 1788 } 1789 c = &cpuid_data.entries[cpuid_i++]; 1790 1791 c->function = i; 1792 c->flags = 0; 1793 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1794 } 1795 } 1796 1797 cpuid_data.cpuid.nent = cpuid_i; 1798 1799 if (((env->cpuid_version >> 8)&0xF) >= 6 1800 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 1801 (CPUID_MCE | CPUID_MCA) 1802 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { 1803 uint64_t mcg_cap, unsupported_caps; 1804 int banks; 1805 int ret; 1806 1807 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); 1808 if (ret < 0) { 1809 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); 1810 return ret; 1811 } 1812 1813 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { 1814 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)", 1815 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); 1816 return -ENOTSUP; 1817 } 1818 1819 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); 1820 if (unsupported_caps) { 1821 if (unsupported_caps & MCG_LMCE_P) { 1822 error_report("kvm: LMCE not supported"); 1823 return -ENOTSUP; 1824 } 1825 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64, 1826 unsupported_caps); 1827 } 1828 1829 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; 1830 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); 1831 if (ret < 0) { 1832 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); 1833 return ret; 1834 } 1835 } 1836 1837 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env); 1838 1839 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); 1840 if (c) { 1841 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || 1842 !!(c->ecx & CPUID_EXT_SMX); 1843 } 1844 1845 if (env->mcg_cap & MCG_LMCE_P) { 1846 has_msr_mcg_ext_ctl = has_msr_feature_control = true; 1847 } 1848 1849 if (!env->user_tsc_khz) { 1850 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && 1851 invtsc_mig_blocker == NULL) { 1852 error_setg(&invtsc_mig_blocker, 1853 "State blocked by non-migratable CPU device" 1854 " (invtsc flag)"); 1855 r = migrate_add_blocker(invtsc_mig_blocker, &local_err); 1856 if (local_err) { 1857 error_report_err(local_err); 1858 error_free(invtsc_mig_blocker); 1859 return r; 1860 } 1861 } 1862 } 1863 1864 if (cpu->vmware_cpuid_freq 1865 /* Guests depend on 0x40000000 to detect this feature, so only expose 1866 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ 1867 && cpu->expose_kvm 1868 && kvm_base == KVM_CPUID_SIGNATURE 1869 /* TSC clock must be stable and known for this feature. */ 1870 && tsc_is_stable_and_known(env)) { 1871 1872 c = &cpuid_data.entries[cpuid_i++]; 1873 c->function = KVM_CPUID_SIGNATURE | 0x10; 1874 c->eax = env->tsc_khz; 1875 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ 1876 c->ecx = c->edx = 0; 1877 1878 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0); 1879 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); 1880 } 1881 1882 cpuid_data.cpuid.nent = cpuid_i; 1883 1884 cpuid_data.cpuid.padding = 0; 1885 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); 1886 if (r) { 1887 goto fail; 1888 } 1889 1890 if (has_xsave) { 1891 env->xsave_buf_len = sizeof(struct kvm_xsave); 1892 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); 1893 memset(env->xsave_buf, 0, env->xsave_buf_len); 1894 1895 /* 1896 * The allocated storage must be large enough for all of the 1897 * possible XSAVE state components. 1898 */ 1899 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) 1900 <= env->xsave_buf_len); 1901 } 1902 1903 max_nested_state_len = kvm_max_nested_state_length(); 1904 if (max_nested_state_len > 0) { 1905 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); 1906 1907 if (cpu_has_vmx(env) || cpu_has_svm(env)) { 1908 struct kvm_vmx_nested_state_hdr *vmx_hdr; 1909 1910 env->nested_state = g_malloc0(max_nested_state_len); 1911 env->nested_state->size = max_nested_state_len; 1912 1913 if (cpu_has_vmx(env)) { 1914 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; 1915 vmx_hdr = &env->nested_state->hdr.vmx; 1916 vmx_hdr->vmxon_pa = -1ull; 1917 vmx_hdr->vmcs12_pa = -1ull; 1918 } else { 1919 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; 1920 } 1921 } 1922 } 1923 1924 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); 1925 1926 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { 1927 has_msr_tsc_aux = false; 1928 } 1929 1930 kvm_init_msrs(cpu); 1931 1932 return 0; 1933 1934 fail: 1935 migrate_del_blocker(invtsc_mig_blocker); 1936 1937 return r; 1938 } 1939 1940 int kvm_arch_destroy_vcpu(CPUState *cs) 1941 { 1942 X86CPU *cpu = X86_CPU(cs); 1943 CPUX86State *env = &cpu->env; 1944 1945 if (cpu->kvm_msr_buf) { 1946 g_free(cpu->kvm_msr_buf); 1947 cpu->kvm_msr_buf = NULL; 1948 } 1949 1950 if (env->nested_state) { 1951 g_free(env->nested_state); 1952 env->nested_state = NULL; 1953 } 1954 1955 qemu_del_vm_change_state_handler(cpu->vmsentry); 1956 1957 return 0; 1958 } 1959 1960 void kvm_arch_reset_vcpu(X86CPU *cpu) 1961 { 1962 CPUX86State *env = &cpu->env; 1963 1964 env->xcr0 = 1; 1965 if (kvm_irqchip_in_kernel()) { 1966 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : 1967 KVM_MP_STATE_UNINITIALIZED; 1968 } else { 1969 env->mp_state = KVM_MP_STATE_RUNNABLE; 1970 } 1971 1972 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 1973 int i; 1974 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 1975 env->msr_hv_synic_sint[i] = HV_SINT_MASKED; 1976 } 1977 1978 hyperv_x86_synic_reset(cpu); 1979 } 1980 /* enabled by default */ 1981 env->poll_control_msr = 1; 1982 1983 sev_es_set_reset_vector(CPU(cpu)); 1984 } 1985 1986 void kvm_arch_do_init_vcpu(X86CPU *cpu) 1987 { 1988 CPUX86State *env = &cpu->env; 1989 1990 /* APs get directly into wait-for-SIPI state. */ 1991 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { 1992 env->mp_state = KVM_MP_STATE_INIT_RECEIVED; 1993 } 1994 } 1995 1996 static int kvm_get_supported_feature_msrs(KVMState *s) 1997 { 1998 int ret = 0; 1999 2000 if (kvm_feature_msrs != NULL) { 2001 return 0; 2002 } 2003 2004 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) { 2005 return 0; 2006 } 2007 2008 struct kvm_msr_list msr_list; 2009 2010 msr_list.nmsrs = 0; 2011 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list); 2012 if (ret < 0 && ret != -E2BIG) { 2013 error_report("Fetch KVM feature MSR list failed: %s", 2014 strerror(-ret)); 2015 return ret; 2016 } 2017 2018 assert(msr_list.nmsrs > 0); 2019 kvm_feature_msrs = (struct kvm_msr_list *) \ 2020 g_malloc0(sizeof(msr_list) + 2021 msr_list.nmsrs * sizeof(msr_list.indices[0])); 2022 2023 kvm_feature_msrs->nmsrs = msr_list.nmsrs; 2024 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs); 2025 2026 if (ret < 0) { 2027 error_report("Fetch KVM feature MSR list failed: %s", 2028 strerror(-ret)); 2029 g_free(kvm_feature_msrs); 2030 kvm_feature_msrs = NULL; 2031 return ret; 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int kvm_get_supported_msrs(KVMState *s) 2038 { 2039 int ret = 0; 2040 struct kvm_msr_list msr_list, *kvm_msr_list; 2041 2042 /* 2043 * Obtain MSR list from KVM. These are the MSRs that we must 2044 * save/restore. 2045 */ 2046 msr_list.nmsrs = 0; 2047 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); 2048 if (ret < 0 && ret != -E2BIG) { 2049 return ret; 2050 } 2051 /* 2052 * Old kernel modules had a bug and could write beyond the provided 2053 * memory. Allocate at least a safe amount of 1K. 2054 */ 2055 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + 2056 msr_list.nmsrs * 2057 sizeof(msr_list.indices[0]))); 2058 2059 kvm_msr_list->nmsrs = msr_list.nmsrs; 2060 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); 2061 if (ret >= 0) { 2062 int i; 2063 2064 for (i = 0; i < kvm_msr_list->nmsrs; i++) { 2065 switch (kvm_msr_list->indices[i]) { 2066 case MSR_STAR: 2067 has_msr_star = true; 2068 break; 2069 case MSR_VM_HSAVE_PA: 2070 has_msr_hsave_pa = true; 2071 break; 2072 case MSR_TSC_AUX: 2073 has_msr_tsc_aux = true; 2074 break; 2075 case MSR_TSC_ADJUST: 2076 has_msr_tsc_adjust = true; 2077 break; 2078 case MSR_IA32_TSCDEADLINE: 2079 has_msr_tsc_deadline = true; 2080 break; 2081 case MSR_IA32_SMBASE: 2082 has_msr_smbase = true; 2083 break; 2084 case MSR_SMI_COUNT: 2085 has_msr_smi_count = true; 2086 break; 2087 case MSR_IA32_MISC_ENABLE: 2088 has_msr_misc_enable = true; 2089 break; 2090 case MSR_IA32_BNDCFGS: 2091 has_msr_bndcfgs = true; 2092 break; 2093 case MSR_IA32_XSS: 2094 has_msr_xss = true; 2095 break; 2096 case MSR_IA32_UMWAIT_CONTROL: 2097 has_msr_umwait = true; 2098 break; 2099 case HV_X64_MSR_CRASH_CTL: 2100 has_msr_hv_crash = true; 2101 break; 2102 case HV_X64_MSR_RESET: 2103 has_msr_hv_reset = true; 2104 break; 2105 case HV_X64_MSR_VP_INDEX: 2106 has_msr_hv_vpindex = true; 2107 break; 2108 case HV_X64_MSR_VP_RUNTIME: 2109 has_msr_hv_runtime = true; 2110 break; 2111 case HV_X64_MSR_SCONTROL: 2112 has_msr_hv_synic = true; 2113 break; 2114 case HV_X64_MSR_STIMER0_CONFIG: 2115 has_msr_hv_stimer = true; 2116 break; 2117 case HV_X64_MSR_TSC_FREQUENCY: 2118 has_msr_hv_frequencies = true; 2119 break; 2120 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 2121 has_msr_hv_reenlightenment = true; 2122 break; 2123 case MSR_IA32_SPEC_CTRL: 2124 has_msr_spec_ctrl = true; 2125 break; 2126 case MSR_IA32_TSX_CTRL: 2127 has_msr_tsx_ctrl = true; 2128 break; 2129 case MSR_VIRT_SSBD: 2130 has_msr_virt_ssbd = true; 2131 break; 2132 case MSR_IA32_ARCH_CAPABILITIES: 2133 has_msr_arch_capabs = true; 2134 break; 2135 case MSR_IA32_CORE_CAPABILITY: 2136 has_msr_core_capabs = true; 2137 break; 2138 case MSR_IA32_PERF_CAPABILITIES: 2139 has_msr_perf_capabs = true; 2140 break; 2141 case MSR_IA32_VMX_VMFUNC: 2142 has_msr_vmx_vmfunc = true; 2143 break; 2144 case MSR_IA32_UCODE_REV: 2145 has_msr_ucode_rev = true; 2146 break; 2147 case MSR_IA32_VMX_PROCBASED_CTLS2: 2148 has_msr_vmx_procbased_ctls2 = true; 2149 break; 2150 case MSR_IA32_PKRS: 2151 has_msr_pkrs = true; 2152 break; 2153 } 2154 } 2155 } 2156 2157 g_free(kvm_msr_list); 2158 2159 return ret; 2160 } 2161 2162 static Notifier smram_machine_done; 2163 static KVMMemoryListener smram_listener; 2164 static AddressSpace smram_address_space; 2165 static MemoryRegion smram_as_root; 2166 static MemoryRegion smram_as_mem; 2167 2168 static void register_smram_listener(Notifier *n, void *unused) 2169 { 2170 MemoryRegion *smram = 2171 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 2172 2173 /* Outer container... */ 2174 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); 2175 memory_region_set_enabled(&smram_as_root, true); 2176 2177 /* ... with two regions inside: normal system memory with low 2178 * priority, and... 2179 */ 2180 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", 2181 get_system_memory(), 0, ~0ull); 2182 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); 2183 memory_region_set_enabled(&smram_as_mem, true); 2184 2185 if (smram) { 2186 /* ... SMRAM with higher priority */ 2187 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); 2188 memory_region_set_enabled(smram, true); 2189 } 2190 2191 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); 2192 kvm_memory_listener_register(kvm_state, &smram_listener, 2193 &smram_address_space, 1); 2194 } 2195 2196 int kvm_arch_init(MachineState *ms, KVMState *s) 2197 { 2198 uint64_t identity_base = 0xfffbc000; 2199 uint64_t shadow_mem; 2200 int ret; 2201 struct utsname utsname; 2202 Error *local_err = NULL; 2203 2204 /* 2205 * Initialize SEV context, if required 2206 * 2207 * If no memory encryption is requested (ms->cgs == NULL) this is 2208 * a no-op. 2209 * 2210 * It's also a no-op if a non-SEV confidential guest support 2211 * mechanism is selected. SEV is the only mechanism available to 2212 * select on x86 at present, so this doesn't arise, but if new 2213 * mechanisms are supported in future (e.g. TDX), they'll need 2214 * their own initialization either here or elsewhere. 2215 */ 2216 ret = sev_kvm_init(ms->cgs, &local_err); 2217 if (ret < 0) { 2218 error_report_err(local_err); 2219 return ret; 2220 } 2221 2222 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2223 error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM"); 2224 return -ENOTSUP; 2225 } 2226 2227 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); 2228 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); 2229 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); 2230 2231 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); 2232 2233 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); 2234 if (has_exception_payload) { 2235 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); 2236 if (ret < 0) { 2237 error_report("kvm: Failed to enable exception payload cap: %s", 2238 strerror(-ret)); 2239 return ret; 2240 } 2241 } 2242 2243 ret = kvm_get_supported_msrs(s); 2244 if (ret < 0) { 2245 return ret; 2246 } 2247 2248 kvm_get_supported_feature_msrs(s); 2249 2250 uname(&utsname); 2251 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; 2252 2253 /* 2254 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. 2255 * In order to use vm86 mode, an EPT identity map and a TSS are needed. 2256 * Since these must be part of guest physical memory, we need to allocate 2257 * them, both by setting their start addresses in the kernel and by 2258 * creating a corresponding e820 entry. We need 4 pages before the BIOS. 2259 * 2260 * Older KVM versions may not support setting the identity map base. In 2261 * that case we need to stick with the default, i.e. a 256K maximum BIOS 2262 * size. 2263 */ 2264 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { 2265 /* Allows up to 16M BIOSes. */ 2266 identity_base = 0xfeffc000; 2267 2268 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); 2269 if (ret < 0) { 2270 return ret; 2271 } 2272 } 2273 2274 /* Set TSS base one page after EPT identity map. */ 2275 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); 2276 if (ret < 0) { 2277 return ret; 2278 } 2279 2280 /* Tell fw_cfg to notify the BIOS to reserve the range. */ 2281 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED); 2282 if (ret < 0) { 2283 fprintf(stderr, "e820_add_entry() table is full\n"); 2284 return ret; 2285 } 2286 2287 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort); 2288 if (shadow_mem != -1) { 2289 shadow_mem /= 4096; 2290 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); 2291 if (ret < 0) { 2292 return ret; 2293 } 2294 } 2295 2296 if (kvm_check_extension(s, KVM_CAP_X86_SMM) && 2297 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && 2298 x86_machine_is_smm_enabled(X86_MACHINE(ms))) { 2299 smram_machine_done.notify = register_smram_listener; 2300 qemu_add_machine_init_done_notifier(&smram_machine_done); 2301 } 2302 2303 if (enable_cpu_pm) { 2304 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); 2305 int ret; 2306 2307 /* Work around for kernel header with a typo. TODO: fix header and drop. */ 2308 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) 2309 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL 2310 #endif 2311 if (disable_exits) { 2312 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | 2313 KVM_X86_DISABLE_EXITS_HLT | 2314 KVM_X86_DISABLE_EXITS_PAUSE | 2315 KVM_X86_DISABLE_EXITS_CSTATE); 2316 } 2317 2318 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, 2319 disable_exits); 2320 if (ret < 0) { 2321 error_report("kvm: guest stopping CPU not supported: %s", 2322 strerror(-ret)); 2323 } 2324 } 2325 2326 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { 2327 X86MachineState *x86ms = X86_MACHINE(ms); 2328 2329 if (x86ms->bus_lock_ratelimit > 0) { 2330 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); 2331 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { 2332 error_report("kvm: bus lock detection unsupported"); 2333 return -ENOTSUP; 2334 } 2335 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, 2336 KVM_BUS_LOCK_DETECTION_EXIT); 2337 if (ret < 0) { 2338 error_report("kvm: Failed to enable bus lock detection cap: %s", 2339 strerror(-ret)); 2340 return ret; 2341 } 2342 ratelimit_init(&bus_lock_ratelimit_ctrl); 2343 ratelimit_set_speed(&bus_lock_ratelimit_ctrl, 2344 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME); 2345 } 2346 } 2347 2348 return 0; 2349 } 2350 2351 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2352 { 2353 lhs->selector = rhs->selector; 2354 lhs->base = rhs->base; 2355 lhs->limit = rhs->limit; 2356 lhs->type = 3; 2357 lhs->present = 1; 2358 lhs->dpl = 3; 2359 lhs->db = 0; 2360 lhs->s = 1; 2361 lhs->l = 0; 2362 lhs->g = 0; 2363 lhs->avl = 0; 2364 lhs->unusable = 0; 2365 } 2366 2367 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2368 { 2369 unsigned flags = rhs->flags; 2370 lhs->selector = rhs->selector; 2371 lhs->base = rhs->base; 2372 lhs->limit = rhs->limit; 2373 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; 2374 lhs->present = (flags & DESC_P_MASK) != 0; 2375 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; 2376 lhs->db = (flags >> DESC_B_SHIFT) & 1; 2377 lhs->s = (flags & DESC_S_MASK) != 0; 2378 lhs->l = (flags >> DESC_L_SHIFT) & 1; 2379 lhs->g = (flags & DESC_G_MASK) != 0; 2380 lhs->avl = (flags & DESC_AVL_MASK) != 0; 2381 lhs->unusable = !lhs->present; 2382 lhs->padding = 0; 2383 } 2384 2385 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) 2386 { 2387 lhs->selector = rhs->selector; 2388 lhs->base = rhs->base; 2389 lhs->limit = rhs->limit; 2390 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | 2391 ((rhs->present && !rhs->unusable) * DESC_P_MASK) | 2392 (rhs->dpl << DESC_DPL_SHIFT) | 2393 (rhs->db << DESC_B_SHIFT) | 2394 (rhs->s * DESC_S_MASK) | 2395 (rhs->l << DESC_L_SHIFT) | 2396 (rhs->g * DESC_G_MASK) | 2397 (rhs->avl * DESC_AVL_MASK); 2398 } 2399 2400 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) 2401 { 2402 if (set) { 2403 *kvm_reg = *qemu_reg; 2404 } else { 2405 *qemu_reg = *kvm_reg; 2406 } 2407 } 2408 2409 static int kvm_getput_regs(X86CPU *cpu, int set) 2410 { 2411 CPUX86State *env = &cpu->env; 2412 struct kvm_regs regs; 2413 int ret = 0; 2414 2415 if (!set) { 2416 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s); 2417 if (ret < 0) { 2418 return ret; 2419 } 2420 } 2421 2422 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); 2423 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); 2424 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); 2425 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); 2426 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); 2427 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); 2428 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); 2429 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); 2430 #ifdef TARGET_X86_64 2431 kvm_getput_reg(®s.r8, &env->regs[8], set); 2432 kvm_getput_reg(®s.r9, &env->regs[9], set); 2433 kvm_getput_reg(®s.r10, &env->regs[10], set); 2434 kvm_getput_reg(®s.r11, &env->regs[11], set); 2435 kvm_getput_reg(®s.r12, &env->regs[12], set); 2436 kvm_getput_reg(®s.r13, &env->regs[13], set); 2437 kvm_getput_reg(®s.r14, &env->regs[14], set); 2438 kvm_getput_reg(®s.r15, &env->regs[15], set); 2439 #endif 2440 2441 kvm_getput_reg(®s.rflags, &env->eflags, set); 2442 kvm_getput_reg(®s.rip, &env->eip, set); 2443 2444 if (set) { 2445 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s); 2446 } 2447 2448 return ret; 2449 } 2450 2451 static int kvm_put_fpu(X86CPU *cpu) 2452 { 2453 CPUX86State *env = &cpu->env; 2454 struct kvm_fpu fpu; 2455 int i; 2456 2457 memset(&fpu, 0, sizeof fpu); 2458 fpu.fsw = env->fpus & ~(7 << 11); 2459 fpu.fsw |= (env->fpstt & 7) << 11; 2460 fpu.fcw = env->fpuc; 2461 fpu.last_opcode = env->fpop; 2462 fpu.last_ip = env->fpip; 2463 fpu.last_dp = env->fpdp; 2464 for (i = 0; i < 8; ++i) { 2465 fpu.ftwx |= (!env->fptags[i]) << i; 2466 } 2467 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); 2468 for (i = 0; i < CPU_NB_REGS; i++) { 2469 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); 2470 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); 2471 } 2472 fpu.mxcsr = env->mxcsr; 2473 2474 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); 2475 } 2476 2477 static int kvm_put_xsave(X86CPU *cpu) 2478 { 2479 CPUX86State *env = &cpu->env; 2480 void *xsave = env->xsave_buf; 2481 2482 if (!has_xsave) { 2483 return kvm_put_fpu(cpu); 2484 } 2485 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); 2486 2487 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); 2488 } 2489 2490 static int kvm_put_xcrs(X86CPU *cpu) 2491 { 2492 CPUX86State *env = &cpu->env; 2493 struct kvm_xcrs xcrs = {}; 2494 2495 if (!has_xcrs) { 2496 return 0; 2497 } 2498 2499 xcrs.nr_xcrs = 1; 2500 xcrs.flags = 0; 2501 xcrs.xcrs[0].xcr = 0; 2502 xcrs.xcrs[0].value = env->xcr0; 2503 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); 2504 } 2505 2506 static int kvm_put_sregs(X86CPU *cpu) 2507 { 2508 CPUX86State *env = &cpu->env; 2509 struct kvm_sregs sregs; 2510 2511 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); 2512 if (env->interrupt_injected >= 0) { 2513 sregs.interrupt_bitmap[env->interrupt_injected / 64] |= 2514 (uint64_t)1 << (env->interrupt_injected % 64); 2515 } 2516 2517 if ((env->eflags & VM_MASK)) { 2518 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 2519 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 2520 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 2521 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 2522 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 2523 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 2524 } else { 2525 set_seg(&sregs.cs, &env->segs[R_CS]); 2526 set_seg(&sregs.ds, &env->segs[R_DS]); 2527 set_seg(&sregs.es, &env->segs[R_ES]); 2528 set_seg(&sregs.fs, &env->segs[R_FS]); 2529 set_seg(&sregs.gs, &env->segs[R_GS]); 2530 set_seg(&sregs.ss, &env->segs[R_SS]); 2531 } 2532 2533 set_seg(&sregs.tr, &env->tr); 2534 set_seg(&sregs.ldt, &env->ldt); 2535 2536 sregs.idt.limit = env->idt.limit; 2537 sregs.idt.base = env->idt.base; 2538 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 2539 sregs.gdt.limit = env->gdt.limit; 2540 sregs.gdt.base = env->gdt.base; 2541 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 2542 2543 sregs.cr0 = env->cr[0]; 2544 sregs.cr2 = env->cr[2]; 2545 sregs.cr3 = env->cr[3]; 2546 sregs.cr4 = env->cr[4]; 2547 2548 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 2549 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 2550 2551 sregs.efer = env->efer; 2552 2553 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 2554 } 2555 2556 static void kvm_msr_buf_reset(X86CPU *cpu) 2557 { 2558 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); 2559 } 2560 2561 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) 2562 { 2563 struct kvm_msrs *msrs = cpu->kvm_msr_buf; 2564 void *limit = ((void *)msrs) + MSR_BUF_SIZE; 2565 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; 2566 2567 assert((void *)(entry + 1) <= limit); 2568 2569 entry->index = index; 2570 entry->reserved = 0; 2571 entry->data = value; 2572 msrs->nmsrs++; 2573 } 2574 2575 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) 2576 { 2577 kvm_msr_buf_reset(cpu); 2578 kvm_msr_entry_add(cpu, index, value); 2579 2580 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 2581 } 2582 2583 void kvm_put_apicbase(X86CPU *cpu, uint64_t value) 2584 { 2585 int ret; 2586 2587 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); 2588 assert(ret == 1); 2589 } 2590 2591 static int kvm_put_tscdeadline_msr(X86CPU *cpu) 2592 { 2593 CPUX86State *env = &cpu->env; 2594 int ret; 2595 2596 if (!has_msr_tsc_deadline) { 2597 return 0; 2598 } 2599 2600 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); 2601 if (ret < 0) { 2602 return ret; 2603 } 2604 2605 assert(ret == 1); 2606 return 0; 2607 } 2608 2609 /* 2610 * Provide a separate write service for the feature control MSR in order to 2611 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done 2612 * before writing any other state because forcibly leaving nested mode 2613 * invalidates the VCPU state. 2614 */ 2615 static int kvm_put_msr_feature_control(X86CPU *cpu) 2616 { 2617 int ret; 2618 2619 if (!has_msr_feature_control) { 2620 return 0; 2621 } 2622 2623 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, 2624 cpu->env.msr_ia32_feature_control); 2625 if (ret < 0) { 2626 return ret; 2627 } 2628 2629 assert(ret == 1); 2630 return 0; 2631 } 2632 2633 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features) 2634 { 2635 uint32_t default1, can_be_one, can_be_zero; 2636 uint32_t must_be_one; 2637 2638 switch (index) { 2639 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 2640 default1 = 0x00000016; 2641 break; 2642 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 2643 default1 = 0x0401e172; 2644 break; 2645 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 2646 default1 = 0x000011ff; 2647 break; 2648 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 2649 default1 = 0x00036dff; 2650 break; 2651 case MSR_IA32_VMX_PROCBASED_CTLS2: 2652 default1 = 0; 2653 break; 2654 default: 2655 abort(); 2656 } 2657 2658 /* If a feature bit is set, the control can be either set or clear. 2659 * Otherwise the value is limited to either 0 or 1 by default1. 2660 */ 2661 can_be_one = features | default1; 2662 can_be_zero = features | ~default1; 2663 must_be_one = ~can_be_zero; 2664 2665 /* 2666 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one). 2667 * Bit 32:63 -> 1 if the control bit can be one. 2668 */ 2669 return must_be_one | (((uint64_t)can_be_one) << 32); 2670 } 2671 2672 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) 2673 { 2674 uint64_t kvm_vmx_basic = 2675 kvm_arch_get_supported_msr_feature(kvm_state, 2676 MSR_IA32_VMX_BASIC); 2677 2678 if (!kvm_vmx_basic) { 2679 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0), 2680 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail. 2681 */ 2682 return; 2683 } 2684 2685 uint64_t kvm_vmx_misc = 2686 kvm_arch_get_supported_msr_feature(kvm_state, 2687 MSR_IA32_VMX_MISC); 2688 uint64_t kvm_vmx_ept_vpid = 2689 kvm_arch_get_supported_msr_feature(kvm_state, 2690 MSR_IA32_VMX_EPT_VPID_CAP); 2691 2692 /* 2693 * If the guest is 64-bit, a value of 1 is allowed for the host address 2694 * space size vmexit control. 2695 */ 2696 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM 2697 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0; 2698 2699 /* 2700 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should 2701 * not change them for backwards compatibility. 2702 */ 2703 uint64_t fixed_vmx_basic = kvm_vmx_basic & 2704 (MSR_VMX_BASIC_VMCS_REVISION_MASK | 2705 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK | 2706 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK); 2707 2708 /* 2709 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can 2710 * change in the future but are always zero for now, clear them to be 2711 * future proof. Bits 32-63 in theory could change, though KVM does 2712 * not support dual-monitor treatment and probably never will; mask 2713 * them out as well. 2714 */ 2715 uint64_t fixed_vmx_misc = kvm_vmx_misc & 2716 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK | 2717 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK); 2718 2719 /* 2720 * EPT memory types should not change either, so we do not bother 2721 * adding features for them. 2722 */ 2723 uint64_t fixed_vmx_ept_mask = 2724 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ? 2725 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0); 2726 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask; 2727 2728 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 2729 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 2730 f[FEAT_VMX_PROCBASED_CTLS])); 2731 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS, 2732 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS, 2733 f[FEAT_VMX_PINBASED_CTLS])); 2734 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS, 2735 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS, 2736 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit); 2737 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS, 2738 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS, 2739 f[FEAT_VMX_ENTRY_CTLS])); 2740 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2, 2741 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2, 2742 f[FEAT_VMX_SECONDARY_CTLS])); 2743 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP, 2744 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid); 2745 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC, 2746 f[FEAT_VMX_BASIC] | fixed_vmx_basic); 2747 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC, 2748 f[FEAT_VMX_MISC] | fixed_vmx_misc); 2749 if (has_msr_vmx_vmfunc) { 2750 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]); 2751 } 2752 2753 /* 2754 * Just to be safe, write these with constant values. The CRn_FIXED1 2755 * MSRs are generated by KVM based on the vCPU's CPUID. 2756 */ 2757 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0, 2758 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK); 2759 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, 2760 CR4_VMXE_MASK); 2761 2762 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { 2763 /* TSC multiplier (0x2032). */ 2764 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); 2765 } else { 2766 /* Preemption timer (0x482E). */ 2767 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E); 2768 } 2769 } 2770 2771 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f) 2772 { 2773 uint64_t kvm_perf_cap = 2774 kvm_arch_get_supported_msr_feature(kvm_state, 2775 MSR_IA32_PERF_CAPABILITIES); 2776 2777 if (kvm_perf_cap) { 2778 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES, 2779 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]); 2780 } 2781 } 2782 2783 static int kvm_buf_set_msrs(X86CPU *cpu) 2784 { 2785 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 2786 if (ret < 0) { 2787 return ret; 2788 } 2789 2790 if (ret < cpu->kvm_msr_buf->nmsrs) { 2791 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 2792 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, 2793 (uint32_t)e->index, (uint64_t)e->data); 2794 } 2795 2796 assert(ret == cpu->kvm_msr_buf->nmsrs); 2797 return 0; 2798 } 2799 2800 static void kvm_init_msrs(X86CPU *cpu) 2801 { 2802 CPUX86State *env = &cpu->env; 2803 2804 kvm_msr_buf_reset(cpu); 2805 if (has_msr_arch_capabs) { 2806 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, 2807 env->features[FEAT_ARCH_CAPABILITIES]); 2808 } 2809 2810 if (has_msr_core_capabs) { 2811 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, 2812 env->features[FEAT_CORE_CAPABILITY]); 2813 } 2814 2815 if (has_msr_perf_capabs && cpu->enable_pmu) { 2816 kvm_msr_entry_add_perf(cpu, env->features); 2817 } 2818 2819 if (has_msr_ucode_rev) { 2820 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); 2821 } 2822 2823 /* 2824 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but 2825 * all kernels with MSR features should have them. 2826 */ 2827 if (kvm_feature_msrs && cpu_has_vmx(env)) { 2828 kvm_msr_entry_add_vmx(cpu, env->features); 2829 } 2830 2831 assert(kvm_buf_set_msrs(cpu) == 0); 2832 } 2833 2834 static int kvm_put_msrs(X86CPU *cpu, int level) 2835 { 2836 CPUX86State *env = &cpu->env; 2837 int i; 2838 2839 kvm_msr_buf_reset(cpu); 2840 2841 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); 2842 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); 2843 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); 2844 kvm_msr_entry_add(cpu, MSR_PAT, env->pat); 2845 if (has_msr_star) { 2846 kvm_msr_entry_add(cpu, MSR_STAR, env->star); 2847 } 2848 if (has_msr_hsave_pa) { 2849 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); 2850 } 2851 if (has_msr_tsc_aux) { 2852 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); 2853 } 2854 if (has_msr_tsc_adjust) { 2855 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); 2856 } 2857 if (has_msr_misc_enable) { 2858 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 2859 env->msr_ia32_misc_enable); 2860 } 2861 if (has_msr_smbase) { 2862 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); 2863 } 2864 if (has_msr_smi_count) { 2865 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); 2866 } 2867 if (has_msr_pkrs) { 2868 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs); 2869 } 2870 if (has_msr_bndcfgs) { 2871 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); 2872 } 2873 if (has_msr_xss) { 2874 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); 2875 } 2876 if (has_msr_umwait) { 2877 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); 2878 } 2879 if (has_msr_spec_ctrl) { 2880 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); 2881 } 2882 if (has_msr_tsx_ctrl) { 2883 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); 2884 } 2885 if (has_msr_virt_ssbd) { 2886 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); 2887 } 2888 2889 #ifdef TARGET_X86_64 2890 if (lm_capable_kernel) { 2891 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); 2892 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); 2893 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); 2894 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); 2895 } 2896 #endif 2897 2898 /* 2899 * The following MSRs have side effects on the guest or are too heavy 2900 * for normal writeback. Limit them to reset or full state updates. 2901 */ 2902 if (level >= KVM_PUT_RESET_STATE) { 2903 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); 2904 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); 2905 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); 2906 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 2907 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); 2908 } 2909 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 2910 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); 2911 } 2912 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 2913 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); 2914 } 2915 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 2916 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); 2917 } 2918 2919 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 2920 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); 2921 } 2922 2923 if (has_architectural_pmu_version > 0) { 2924 if (has_architectural_pmu_version > 1) { 2925 /* Stop the counter. */ 2926 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 2927 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 2928 } 2929 2930 /* Set the counter values. */ 2931 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 2932 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 2933 env->msr_fixed_counters[i]); 2934 } 2935 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 2936 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 2937 env->msr_gp_counters[i]); 2938 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 2939 env->msr_gp_evtsel[i]); 2940 } 2941 if (has_architectural_pmu_version > 1) { 2942 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 2943 env->msr_global_status); 2944 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 2945 env->msr_global_ovf_ctrl); 2946 2947 /* Now start the PMU. */ 2948 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 2949 env->msr_fixed_ctr_ctrl); 2950 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 2951 env->msr_global_ctrl); 2952 } 2953 } 2954 /* 2955 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, 2956 * only sync them to KVM on the first cpu 2957 */ 2958 if (current_cpu == first_cpu) { 2959 if (has_msr_hv_hypercall) { 2960 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 2961 env->msr_hv_guest_os_id); 2962 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 2963 env->msr_hv_hypercall); 2964 } 2965 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 2966 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 2967 env->msr_hv_tsc); 2968 } 2969 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 2970 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 2971 env->msr_hv_reenlightenment_control); 2972 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 2973 env->msr_hv_tsc_emulation_control); 2974 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 2975 env->msr_hv_tsc_emulation_status); 2976 } 2977 } 2978 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 2979 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 2980 env->msr_hv_vapic); 2981 } 2982 if (has_msr_hv_crash) { 2983 int j; 2984 2985 for (j = 0; j < HV_CRASH_PARAMS; j++) 2986 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 2987 env->msr_hv_crash_params[j]); 2988 2989 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); 2990 } 2991 if (has_msr_hv_runtime) { 2992 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); 2993 } 2994 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) 2995 && hv_vpindex_settable) { 2996 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, 2997 hyperv_vp_index(CPU(cpu))); 2998 } 2999 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3000 int j; 3001 3002 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); 3003 3004 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 3005 env->msr_hv_synic_control); 3006 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 3007 env->msr_hv_synic_evt_page); 3008 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 3009 env->msr_hv_synic_msg_page); 3010 3011 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { 3012 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, 3013 env->msr_hv_synic_sint[j]); 3014 } 3015 } 3016 if (has_msr_hv_stimer) { 3017 int j; 3018 3019 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { 3020 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, 3021 env->msr_hv_stimer_config[j]); 3022 } 3023 3024 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { 3025 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, 3026 env->msr_hv_stimer_count[j]); 3027 } 3028 } 3029 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3030 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); 3031 3032 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); 3033 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); 3034 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); 3035 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); 3036 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); 3037 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); 3038 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); 3039 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); 3040 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); 3041 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); 3042 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); 3043 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); 3044 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3045 /* The CPU GPs if we write to a bit above the physical limit of 3046 * the host CPU (and KVM emulates that) 3047 */ 3048 uint64_t mask = env->mtrr_var[i].mask; 3049 mask &= phys_mask; 3050 3051 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 3052 env->mtrr_var[i].base); 3053 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); 3054 } 3055 } 3056 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3057 int addr_num = kvm_arch_get_supported_cpuid(kvm_state, 3058 0x14, 1, R_EAX) & 0x7; 3059 3060 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 3061 env->msr_rtit_ctrl); 3062 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 3063 env->msr_rtit_status); 3064 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 3065 env->msr_rtit_output_base); 3066 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 3067 env->msr_rtit_output_mask); 3068 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 3069 env->msr_rtit_cr3_match); 3070 for (i = 0; i < addr_num; i++) { 3071 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 3072 env->msr_rtit_addrs[i]); 3073 } 3074 } 3075 3076 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see 3077 * kvm_put_msr_feature_control. */ 3078 } 3079 3080 if (env->mcg_cap) { 3081 int i; 3082 3083 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); 3084 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); 3085 if (has_msr_mcg_ext_ctl) { 3086 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); 3087 } 3088 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3089 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); 3090 } 3091 } 3092 3093 return kvm_buf_set_msrs(cpu); 3094 } 3095 3096 3097 static int kvm_get_fpu(X86CPU *cpu) 3098 { 3099 CPUX86State *env = &cpu->env; 3100 struct kvm_fpu fpu; 3101 int i, ret; 3102 3103 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); 3104 if (ret < 0) { 3105 return ret; 3106 } 3107 3108 env->fpstt = (fpu.fsw >> 11) & 7; 3109 env->fpus = fpu.fsw; 3110 env->fpuc = fpu.fcw; 3111 env->fpop = fpu.last_opcode; 3112 env->fpip = fpu.last_ip; 3113 env->fpdp = fpu.last_dp; 3114 for (i = 0; i < 8; ++i) { 3115 env->fptags[i] = !((fpu.ftwx >> i) & 1); 3116 } 3117 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); 3118 for (i = 0; i < CPU_NB_REGS; i++) { 3119 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); 3120 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); 3121 } 3122 env->mxcsr = fpu.mxcsr; 3123 3124 return 0; 3125 } 3126 3127 static int kvm_get_xsave(X86CPU *cpu) 3128 { 3129 CPUX86State *env = &cpu->env; 3130 void *xsave = env->xsave_buf; 3131 int ret; 3132 3133 if (!has_xsave) { 3134 return kvm_get_fpu(cpu); 3135 } 3136 3137 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); 3138 if (ret < 0) { 3139 return ret; 3140 } 3141 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len); 3142 3143 return 0; 3144 } 3145 3146 static int kvm_get_xcrs(X86CPU *cpu) 3147 { 3148 CPUX86State *env = &cpu->env; 3149 int i, ret; 3150 struct kvm_xcrs xcrs; 3151 3152 if (!has_xcrs) { 3153 return 0; 3154 } 3155 3156 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); 3157 if (ret < 0) { 3158 return ret; 3159 } 3160 3161 for (i = 0; i < xcrs.nr_xcrs; i++) { 3162 /* Only support xcr0 now */ 3163 if (xcrs.xcrs[i].xcr == 0) { 3164 env->xcr0 = xcrs.xcrs[i].value; 3165 break; 3166 } 3167 } 3168 return 0; 3169 } 3170 3171 static int kvm_get_sregs(X86CPU *cpu) 3172 { 3173 CPUX86State *env = &cpu->env; 3174 struct kvm_sregs sregs; 3175 int bit, i, ret; 3176 3177 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 3178 if (ret < 0) { 3179 return ret; 3180 } 3181 3182 /* There can only be one pending IRQ set in the bitmap at a time, so try 3183 to find it and save its number instead (-1 for none). */ 3184 env->interrupt_injected = -1; 3185 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { 3186 if (sregs.interrupt_bitmap[i]) { 3187 bit = ctz64(sregs.interrupt_bitmap[i]); 3188 env->interrupt_injected = i * 64 + bit; 3189 break; 3190 } 3191 } 3192 3193 get_seg(&env->segs[R_CS], &sregs.cs); 3194 get_seg(&env->segs[R_DS], &sregs.ds); 3195 get_seg(&env->segs[R_ES], &sregs.es); 3196 get_seg(&env->segs[R_FS], &sregs.fs); 3197 get_seg(&env->segs[R_GS], &sregs.gs); 3198 get_seg(&env->segs[R_SS], &sregs.ss); 3199 3200 get_seg(&env->tr, &sregs.tr); 3201 get_seg(&env->ldt, &sregs.ldt); 3202 3203 env->idt.limit = sregs.idt.limit; 3204 env->idt.base = sregs.idt.base; 3205 env->gdt.limit = sregs.gdt.limit; 3206 env->gdt.base = sregs.gdt.base; 3207 3208 env->cr[0] = sregs.cr0; 3209 env->cr[2] = sregs.cr2; 3210 env->cr[3] = sregs.cr3; 3211 env->cr[4] = sregs.cr4; 3212 3213 env->efer = sregs.efer; 3214 3215 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 3216 x86_update_hflags(env); 3217 3218 return 0; 3219 } 3220 3221 static int kvm_get_msrs(X86CPU *cpu) 3222 { 3223 CPUX86State *env = &cpu->env; 3224 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; 3225 int ret, i; 3226 uint64_t mtrr_top_bits; 3227 3228 kvm_msr_buf_reset(cpu); 3229 3230 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); 3231 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); 3232 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); 3233 kvm_msr_entry_add(cpu, MSR_PAT, 0); 3234 if (has_msr_star) { 3235 kvm_msr_entry_add(cpu, MSR_STAR, 0); 3236 } 3237 if (has_msr_hsave_pa) { 3238 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); 3239 } 3240 if (has_msr_tsc_aux) { 3241 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); 3242 } 3243 if (has_msr_tsc_adjust) { 3244 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); 3245 } 3246 if (has_msr_tsc_deadline) { 3247 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); 3248 } 3249 if (has_msr_misc_enable) { 3250 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); 3251 } 3252 if (has_msr_smbase) { 3253 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); 3254 } 3255 if (has_msr_smi_count) { 3256 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0); 3257 } 3258 if (has_msr_feature_control) { 3259 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); 3260 } 3261 if (has_msr_pkrs) { 3262 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0); 3263 } 3264 if (has_msr_bndcfgs) { 3265 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); 3266 } 3267 if (has_msr_xss) { 3268 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); 3269 } 3270 if (has_msr_umwait) { 3271 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); 3272 } 3273 if (has_msr_spec_ctrl) { 3274 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); 3275 } 3276 if (has_msr_tsx_ctrl) { 3277 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); 3278 } 3279 if (has_msr_virt_ssbd) { 3280 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); 3281 } 3282 if (!env->tsc_valid) { 3283 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); 3284 env->tsc_valid = !runstate_is_running(); 3285 } 3286 3287 #ifdef TARGET_X86_64 3288 if (lm_capable_kernel) { 3289 kvm_msr_entry_add(cpu, MSR_CSTAR, 0); 3290 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); 3291 kvm_msr_entry_add(cpu, MSR_FMASK, 0); 3292 kvm_msr_entry_add(cpu, MSR_LSTAR, 0); 3293 } 3294 #endif 3295 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); 3296 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); 3297 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3298 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0); 3299 } 3300 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3301 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); 3302 } 3303 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3304 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); 3305 } 3306 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3307 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); 3308 } 3309 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3310 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); 3311 } 3312 if (has_architectural_pmu_version > 0) { 3313 if (has_architectural_pmu_version > 1) { 3314 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3315 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3316 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); 3317 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); 3318 } 3319 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3320 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); 3321 } 3322 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3323 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); 3324 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); 3325 } 3326 } 3327 3328 if (env->mcg_cap) { 3329 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); 3330 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); 3331 if (has_msr_mcg_ext_ctl) { 3332 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); 3333 } 3334 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3335 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); 3336 } 3337 } 3338 3339 if (has_msr_hv_hypercall) { 3340 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); 3341 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); 3342 } 3343 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 3344 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); 3345 } 3346 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 3347 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); 3348 } 3349 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 3350 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); 3351 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); 3352 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); 3353 } 3354 if (has_msr_hv_crash) { 3355 int j; 3356 3357 for (j = 0; j < HV_CRASH_PARAMS; j++) { 3358 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); 3359 } 3360 } 3361 if (has_msr_hv_runtime) { 3362 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); 3363 } 3364 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3365 uint32_t msr; 3366 3367 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); 3368 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); 3369 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); 3370 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { 3371 kvm_msr_entry_add(cpu, msr, 0); 3372 } 3373 } 3374 if (has_msr_hv_stimer) { 3375 uint32_t msr; 3376 3377 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; 3378 msr++) { 3379 kvm_msr_entry_add(cpu, msr, 0); 3380 } 3381 } 3382 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3383 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); 3384 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); 3385 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); 3386 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); 3387 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); 3388 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); 3389 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); 3390 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); 3391 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); 3392 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); 3393 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); 3394 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); 3395 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3396 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); 3397 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); 3398 } 3399 } 3400 3401 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3402 int addr_num = 3403 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; 3404 3405 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); 3406 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); 3407 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); 3408 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); 3409 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); 3410 for (i = 0; i < addr_num; i++) { 3411 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); 3412 } 3413 } 3414 3415 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); 3416 if (ret < 0) { 3417 return ret; 3418 } 3419 3420 if (ret < cpu->kvm_msr_buf->nmsrs) { 3421 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 3422 error_report("error: failed to get MSR 0x%" PRIx32, 3423 (uint32_t)e->index); 3424 } 3425 3426 assert(ret == cpu->kvm_msr_buf->nmsrs); 3427 /* 3428 * MTRR masks: Each mask consists of 5 parts 3429 * a 10..0: must be zero 3430 * b 11 : valid bit 3431 * c n-1.12: actual mask bits 3432 * d 51..n: reserved must be zero 3433 * e 63.52: reserved must be zero 3434 * 3435 * 'n' is the number of physical bits supported by the CPU and is 3436 * apparently always <= 52. We know our 'n' but don't know what 3437 * the destinations 'n' is; it might be smaller, in which case 3438 * it masks (c) on loading. It might be larger, in which case 3439 * we fill 'd' so that d..c is consistent irrespetive of the 'n' 3440 * we're migrating to. 3441 */ 3442 3443 if (cpu->fill_mtrr_mask) { 3444 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); 3445 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); 3446 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); 3447 } else { 3448 mtrr_top_bits = 0; 3449 } 3450 3451 for (i = 0; i < ret; i++) { 3452 uint32_t index = msrs[i].index; 3453 switch (index) { 3454 case MSR_IA32_SYSENTER_CS: 3455 env->sysenter_cs = msrs[i].data; 3456 break; 3457 case MSR_IA32_SYSENTER_ESP: 3458 env->sysenter_esp = msrs[i].data; 3459 break; 3460 case MSR_IA32_SYSENTER_EIP: 3461 env->sysenter_eip = msrs[i].data; 3462 break; 3463 case MSR_PAT: 3464 env->pat = msrs[i].data; 3465 break; 3466 case MSR_STAR: 3467 env->star = msrs[i].data; 3468 break; 3469 #ifdef TARGET_X86_64 3470 case MSR_CSTAR: 3471 env->cstar = msrs[i].data; 3472 break; 3473 case MSR_KERNELGSBASE: 3474 env->kernelgsbase = msrs[i].data; 3475 break; 3476 case MSR_FMASK: 3477 env->fmask = msrs[i].data; 3478 break; 3479 case MSR_LSTAR: 3480 env->lstar = msrs[i].data; 3481 break; 3482 #endif 3483 case MSR_IA32_TSC: 3484 env->tsc = msrs[i].data; 3485 break; 3486 case MSR_TSC_AUX: 3487 env->tsc_aux = msrs[i].data; 3488 break; 3489 case MSR_TSC_ADJUST: 3490 env->tsc_adjust = msrs[i].data; 3491 break; 3492 case MSR_IA32_TSCDEADLINE: 3493 env->tsc_deadline = msrs[i].data; 3494 break; 3495 case MSR_VM_HSAVE_PA: 3496 env->vm_hsave = msrs[i].data; 3497 break; 3498 case MSR_KVM_SYSTEM_TIME: 3499 env->system_time_msr = msrs[i].data; 3500 break; 3501 case MSR_KVM_WALL_CLOCK: 3502 env->wall_clock_msr = msrs[i].data; 3503 break; 3504 case MSR_MCG_STATUS: 3505 env->mcg_status = msrs[i].data; 3506 break; 3507 case MSR_MCG_CTL: 3508 env->mcg_ctl = msrs[i].data; 3509 break; 3510 case MSR_MCG_EXT_CTL: 3511 env->mcg_ext_ctl = msrs[i].data; 3512 break; 3513 case MSR_IA32_MISC_ENABLE: 3514 env->msr_ia32_misc_enable = msrs[i].data; 3515 break; 3516 case MSR_IA32_SMBASE: 3517 env->smbase = msrs[i].data; 3518 break; 3519 case MSR_SMI_COUNT: 3520 env->msr_smi_count = msrs[i].data; 3521 break; 3522 case MSR_IA32_FEATURE_CONTROL: 3523 env->msr_ia32_feature_control = msrs[i].data; 3524 break; 3525 case MSR_IA32_BNDCFGS: 3526 env->msr_bndcfgs = msrs[i].data; 3527 break; 3528 case MSR_IA32_XSS: 3529 env->xss = msrs[i].data; 3530 break; 3531 case MSR_IA32_UMWAIT_CONTROL: 3532 env->umwait = msrs[i].data; 3533 break; 3534 case MSR_IA32_PKRS: 3535 env->pkrs = msrs[i].data; 3536 break; 3537 default: 3538 if (msrs[i].index >= MSR_MC0_CTL && 3539 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { 3540 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; 3541 } 3542 break; 3543 case MSR_KVM_ASYNC_PF_EN: 3544 env->async_pf_en_msr = msrs[i].data; 3545 break; 3546 case MSR_KVM_ASYNC_PF_INT: 3547 env->async_pf_int_msr = msrs[i].data; 3548 break; 3549 case MSR_KVM_PV_EOI_EN: 3550 env->pv_eoi_en_msr = msrs[i].data; 3551 break; 3552 case MSR_KVM_STEAL_TIME: 3553 env->steal_time_msr = msrs[i].data; 3554 break; 3555 case MSR_KVM_POLL_CONTROL: { 3556 env->poll_control_msr = msrs[i].data; 3557 break; 3558 } 3559 case MSR_CORE_PERF_FIXED_CTR_CTRL: 3560 env->msr_fixed_ctr_ctrl = msrs[i].data; 3561 break; 3562 case MSR_CORE_PERF_GLOBAL_CTRL: 3563 env->msr_global_ctrl = msrs[i].data; 3564 break; 3565 case MSR_CORE_PERF_GLOBAL_STATUS: 3566 env->msr_global_status = msrs[i].data; 3567 break; 3568 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 3569 env->msr_global_ovf_ctrl = msrs[i].data; 3570 break; 3571 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: 3572 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; 3573 break; 3574 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: 3575 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; 3576 break; 3577 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: 3578 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; 3579 break; 3580 case HV_X64_MSR_HYPERCALL: 3581 env->msr_hv_hypercall = msrs[i].data; 3582 break; 3583 case HV_X64_MSR_GUEST_OS_ID: 3584 env->msr_hv_guest_os_id = msrs[i].data; 3585 break; 3586 case HV_X64_MSR_APIC_ASSIST_PAGE: 3587 env->msr_hv_vapic = msrs[i].data; 3588 break; 3589 case HV_X64_MSR_REFERENCE_TSC: 3590 env->msr_hv_tsc = msrs[i].data; 3591 break; 3592 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3593 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; 3594 break; 3595 case HV_X64_MSR_VP_RUNTIME: 3596 env->msr_hv_runtime = msrs[i].data; 3597 break; 3598 case HV_X64_MSR_SCONTROL: 3599 env->msr_hv_synic_control = msrs[i].data; 3600 break; 3601 case HV_X64_MSR_SIEFP: 3602 env->msr_hv_synic_evt_page = msrs[i].data; 3603 break; 3604 case HV_X64_MSR_SIMP: 3605 env->msr_hv_synic_msg_page = msrs[i].data; 3606 break; 3607 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 3608 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; 3609 break; 3610 case HV_X64_MSR_STIMER0_CONFIG: 3611 case HV_X64_MSR_STIMER1_CONFIG: 3612 case HV_X64_MSR_STIMER2_CONFIG: 3613 case HV_X64_MSR_STIMER3_CONFIG: 3614 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = 3615 msrs[i].data; 3616 break; 3617 case HV_X64_MSR_STIMER0_COUNT: 3618 case HV_X64_MSR_STIMER1_COUNT: 3619 case HV_X64_MSR_STIMER2_COUNT: 3620 case HV_X64_MSR_STIMER3_COUNT: 3621 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = 3622 msrs[i].data; 3623 break; 3624 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3625 env->msr_hv_reenlightenment_control = msrs[i].data; 3626 break; 3627 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3628 env->msr_hv_tsc_emulation_control = msrs[i].data; 3629 break; 3630 case HV_X64_MSR_TSC_EMULATION_STATUS: 3631 env->msr_hv_tsc_emulation_status = msrs[i].data; 3632 break; 3633 case MSR_MTRRdefType: 3634 env->mtrr_deftype = msrs[i].data; 3635 break; 3636 case MSR_MTRRfix64K_00000: 3637 env->mtrr_fixed[0] = msrs[i].data; 3638 break; 3639 case MSR_MTRRfix16K_80000: 3640 env->mtrr_fixed[1] = msrs[i].data; 3641 break; 3642 case MSR_MTRRfix16K_A0000: 3643 env->mtrr_fixed[2] = msrs[i].data; 3644 break; 3645 case MSR_MTRRfix4K_C0000: 3646 env->mtrr_fixed[3] = msrs[i].data; 3647 break; 3648 case MSR_MTRRfix4K_C8000: 3649 env->mtrr_fixed[4] = msrs[i].data; 3650 break; 3651 case MSR_MTRRfix4K_D0000: 3652 env->mtrr_fixed[5] = msrs[i].data; 3653 break; 3654 case MSR_MTRRfix4K_D8000: 3655 env->mtrr_fixed[6] = msrs[i].data; 3656 break; 3657 case MSR_MTRRfix4K_E0000: 3658 env->mtrr_fixed[7] = msrs[i].data; 3659 break; 3660 case MSR_MTRRfix4K_E8000: 3661 env->mtrr_fixed[8] = msrs[i].data; 3662 break; 3663 case MSR_MTRRfix4K_F0000: 3664 env->mtrr_fixed[9] = msrs[i].data; 3665 break; 3666 case MSR_MTRRfix4K_F8000: 3667 env->mtrr_fixed[10] = msrs[i].data; 3668 break; 3669 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): 3670 if (index & 1) { 3671 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | 3672 mtrr_top_bits; 3673 } else { 3674 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; 3675 } 3676 break; 3677 case MSR_IA32_SPEC_CTRL: 3678 env->spec_ctrl = msrs[i].data; 3679 break; 3680 case MSR_IA32_TSX_CTRL: 3681 env->tsx_ctrl = msrs[i].data; 3682 break; 3683 case MSR_VIRT_SSBD: 3684 env->virt_ssbd = msrs[i].data; 3685 break; 3686 case MSR_IA32_RTIT_CTL: 3687 env->msr_rtit_ctrl = msrs[i].data; 3688 break; 3689 case MSR_IA32_RTIT_STATUS: 3690 env->msr_rtit_status = msrs[i].data; 3691 break; 3692 case MSR_IA32_RTIT_OUTPUT_BASE: 3693 env->msr_rtit_output_base = msrs[i].data; 3694 break; 3695 case MSR_IA32_RTIT_OUTPUT_MASK: 3696 env->msr_rtit_output_mask = msrs[i].data; 3697 break; 3698 case MSR_IA32_RTIT_CR3_MATCH: 3699 env->msr_rtit_cr3_match = msrs[i].data; 3700 break; 3701 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 3702 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; 3703 break; 3704 } 3705 } 3706 3707 return 0; 3708 } 3709 3710 static int kvm_put_mp_state(X86CPU *cpu) 3711 { 3712 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; 3713 3714 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 3715 } 3716 3717 static int kvm_get_mp_state(X86CPU *cpu) 3718 { 3719 CPUState *cs = CPU(cpu); 3720 CPUX86State *env = &cpu->env; 3721 struct kvm_mp_state mp_state; 3722 int ret; 3723 3724 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); 3725 if (ret < 0) { 3726 return ret; 3727 } 3728 env->mp_state = mp_state.mp_state; 3729 if (kvm_irqchip_in_kernel()) { 3730 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); 3731 } 3732 return 0; 3733 } 3734 3735 static int kvm_get_apic(X86CPU *cpu) 3736 { 3737 DeviceState *apic = cpu->apic_state; 3738 struct kvm_lapic_state kapic; 3739 int ret; 3740 3741 if (apic && kvm_irqchip_in_kernel()) { 3742 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); 3743 if (ret < 0) { 3744 return ret; 3745 } 3746 3747 kvm_get_apic_state(apic, &kapic); 3748 } 3749 return 0; 3750 } 3751 3752 static int kvm_put_vcpu_events(X86CPU *cpu, int level) 3753 { 3754 CPUState *cs = CPU(cpu); 3755 CPUX86State *env = &cpu->env; 3756 struct kvm_vcpu_events events = {}; 3757 3758 if (!kvm_has_vcpu_events()) { 3759 return 0; 3760 } 3761 3762 events.flags = 0; 3763 3764 if (has_exception_payload) { 3765 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 3766 events.exception.pending = env->exception_pending; 3767 events.exception_has_payload = env->exception_has_payload; 3768 events.exception_payload = env->exception_payload; 3769 } 3770 events.exception.nr = env->exception_nr; 3771 events.exception.injected = env->exception_injected; 3772 events.exception.has_error_code = env->has_error_code; 3773 events.exception.error_code = env->error_code; 3774 3775 events.interrupt.injected = (env->interrupt_injected >= 0); 3776 events.interrupt.nr = env->interrupt_injected; 3777 events.interrupt.soft = env->soft_interrupt; 3778 3779 events.nmi.injected = env->nmi_injected; 3780 events.nmi.pending = env->nmi_pending; 3781 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); 3782 3783 events.sipi_vector = env->sipi_vector; 3784 3785 if (has_msr_smbase) { 3786 events.smi.smm = !!(env->hflags & HF_SMM_MASK); 3787 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); 3788 if (kvm_irqchip_in_kernel()) { 3789 /* As soon as these are moved to the kernel, remove them 3790 * from cs->interrupt_request. 3791 */ 3792 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; 3793 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; 3794 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); 3795 } else { 3796 /* Keep these in cs->interrupt_request. */ 3797 events.smi.pending = 0; 3798 events.smi.latched_init = 0; 3799 } 3800 /* Stop SMI delivery on old machine types to avoid a reboot 3801 * on an inward migration of an old VM. 3802 */ 3803 if (!cpu->kvm_no_smi_migration) { 3804 events.flags |= KVM_VCPUEVENT_VALID_SMM; 3805 } 3806 } 3807 3808 if (level >= KVM_PUT_RESET_STATE) { 3809 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 3810 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 3811 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR; 3812 } 3813 } 3814 3815 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); 3816 } 3817 3818 static int kvm_get_vcpu_events(X86CPU *cpu) 3819 { 3820 CPUX86State *env = &cpu->env; 3821 struct kvm_vcpu_events events; 3822 int ret; 3823 3824 if (!kvm_has_vcpu_events()) { 3825 return 0; 3826 } 3827 3828 memset(&events, 0, sizeof(events)); 3829 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); 3830 if (ret < 0) { 3831 return ret; 3832 } 3833 3834 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 3835 env->exception_pending = events.exception.pending; 3836 env->exception_has_payload = events.exception_has_payload; 3837 env->exception_payload = events.exception_payload; 3838 } else { 3839 env->exception_pending = 0; 3840 env->exception_has_payload = false; 3841 } 3842 env->exception_injected = events.exception.injected; 3843 env->exception_nr = 3844 (env->exception_pending || env->exception_injected) ? 3845 events.exception.nr : -1; 3846 env->has_error_code = events.exception.has_error_code; 3847 env->error_code = events.exception.error_code; 3848 3849 env->interrupt_injected = 3850 events.interrupt.injected ? events.interrupt.nr : -1; 3851 env->soft_interrupt = events.interrupt.soft; 3852 3853 env->nmi_injected = events.nmi.injected; 3854 env->nmi_pending = events.nmi.pending; 3855 if (events.nmi.masked) { 3856 env->hflags2 |= HF2_NMI_MASK; 3857 } else { 3858 env->hflags2 &= ~HF2_NMI_MASK; 3859 } 3860 3861 if (events.flags & KVM_VCPUEVENT_VALID_SMM) { 3862 if (events.smi.smm) { 3863 env->hflags |= HF_SMM_MASK; 3864 } else { 3865 env->hflags &= ~HF_SMM_MASK; 3866 } 3867 if (events.smi.pending) { 3868 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 3869 } else { 3870 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 3871 } 3872 if (events.smi.smm_inside_nmi) { 3873 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; 3874 } else { 3875 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; 3876 } 3877 if (events.smi.latched_init) { 3878 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 3879 } else { 3880 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 3881 } 3882 } 3883 3884 env->sipi_vector = events.sipi_vector; 3885 3886 return 0; 3887 } 3888 3889 static int kvm_guest_debug_workarounds(X86CPU *cpu) 3890 { 3891 CPUState *cs = CPU(cpu); 3892 CPUX86State *env = &cpu->env; 3893 int ret = 0; 3894 unsigned long reinject_trap = 0; 3895 3896 if (!kvm_has_vcpu_events()) { 3897 if (env->exception_nr == EXCP01_DB) { 3898 reinject_trap = KVM_GUESTDBG_INJECT_DB; 3899 } else if (env->exception_injected == EXCP03_INT3) { 3900 reinject_trap = KVM_GUESTDBG_INJECT_BP; 3901 } 3902 kvm_reset_exception(env); 3903 } 3904 3905 /* 3906 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF 3907 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this 3908 * by updating the debug state once again if single-stepping is on. 3909 * Another reason to call kvm_update_guest_debug here is a pending debug 3910 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to 3911 * reinject them via SET_GUEST_DEBUG. 3912 */ 3913 if (reinject_trap || 3914 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { 3915 ret = kvm_update_guest_debug(cs, reinject_trap); 3916 } 3917 return ret; 3918 } 3919 3920 static int kvm_put_debugregs(X86CPU *cpu) 3921 { 3922 CPUX86State *env = &cpu->env; 3923 struct kvm_debugregs dbgregs; 3924 int i; 3925 3926 if (!kvm_has_debugregs()) { 3927 return 0; 3928 } 3929 3930 memset(&dbgregs, 0, sizeof(dbgregs)); 3931 for (i = 0; i < 4; i++) { 3932 dbgregs.db[i] = env->dr[i]; 3933 } 3934 dbgregs.dr6 = env->dr[6]; 3935 dbgregs.dr7 = env->dr[7]; 3936 dbgregs.flags = 0; 3937 3938 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); 3939 } 3940 3941 static int kvm_get_debugregs(X86CPU *cpu) 3942 { 3943 CPUX86State *env = &cpu->env; 3944 struct kvm_debugregs dbgregs; 3945 int i, ret; 3946 3947 if (!kvm_has_debugregs()) { 3948 return 0; 3949 } 3950 3951 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); 3952 if (ret < 0) { 3953 return ret; 3954 } 3955 for (i = 0; i < 4; i++) { 3956 env->dr[i] = dbgregs.db[i]; 3957 } 3958 env->dr[4] = env->dr[6] = dbgregs.dr6; 3959 env->dr[5] = env->dr[7] = dbgregs.dr7; 3960 3961 return 0; 3962 } 3963 3964 static int kvm_put_nested_state(X86CPU *cpu) 3965 { 3966 CPUX86State *env = &cpu->env; 3967 int max_nested_state_len = kvm_max_nested_state_length(); 3968 3969 if (!env->nested_state) { 3970 return 0; 3971 } 3972 3973 /* 3974 * Copy flags that are affected by reset from env->hflags and env->hflags2. 3975 */ 3976 if (env->hflags & HF_GUEST_MASK) { 3977 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE; 3978 } else { 3979 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE; 3980 } 3981 3982 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */ 3983 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) { 3984 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET; 3985 } else { 3986 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET; 3987 } 3988 3989 assert(env->nested_state->size <= max_nested_state_len); 3990 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); 3991 } 3992 3993 static int kvm_get_nested_state(X86CPU *cpu) 3994 { 3995 CPUX86State *env = &cpu->env; 3996 int max_nested_state_len = kvm_max_nested_state_length(); 3997 int ret; 3998 3999 if (!env->nested_state) { 4000 return 0; 4001 } 4002 4003 /* 4004 * It is possible that migration restored a smaller size into 4005 * nested_state->hdr.size than what our kernel support. 4006 * We preserve migration origin nested_state->hdr.size for 4007 * call to KVM_SET_NESTED_STATE but wish that our next call 4008 * to KVM_GET_NESTED_STATE will use max size our kernel support. 4009 */ 4010 env->nested_state->size = max_nested_state_len; 4011 4012 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); 4013 if (ret < 0) { 4014 return ret; 4015 } 4016 4017 /* 4018 * Copy flags that are affected by reset to env->hflags and env->hflags2. 4019 */ 4020 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { 4021 env->hflags |= HF_GUEST_MASK; 4022 } else { 4023 env->hflags &= ~HF_GUEST_MASK; 4024 } 4025 4026 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */ 4027 if (cpu_has_svm(env)) { 4028 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) { 4029 env->hflags2 |= HF2_GIF_MASK; 4030 } else { 4031 env->hflags2 &= ~HF2_GIF_MASK; 4032 } 4033 } 4034 4035 return ret; 4036 } 4037 4038 int kvm_arch_put_registers(CPUState *cpu, int level) 4039 { 4040 X86CPU *x86_cpu = X86_CPU(cpu); 4041 int ret; 4042 4043 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 4044 4045 /* must be before kvm_put_nested_state so that EFER.SVME is set */ 4046 ret = kvm_put_sregs(x86_cpu); 4047 if (ret < 0) { 4048 return ret; 4049 } 4050 4051 if (level >= KVM_PUT_RESET_STATE) { 4052 ret = kvm_put_nested_state(x86_cpu); 4053 if (ret < 0) { 4054 return ret; 4055 } 4056 4057 ret = kvm_put_msr_feature_control(x86_cpu); 4058 if (ret < 0) { 4059 return ret; 4060 } 4061 } 4062 4063 if (level == KVM_PUT_FULL_STATE) { 4064 /* We don't check for kvm_arch_set_tsc_khz() errors here, 4065 * because TSC frequency mismatch shouldn't abort migration, 4066 * unless the user explicitly asked for a more strict TSC 4067 * setting (e.g. using an explicit "tsc-freq" option). 4068 */ 4069 kvm_arch_set_tsc_khz(cpu); 4070 } 4071 4072 ret = kvm_getput_regs(x86_cpu, 1); 4073 if (ret < 0) { 4074 return ret; 4075 } 4076 ret = kvm_put_xsave(x86_cpu); 4077 if (ret < 0) { 4078 return ret; 4079 } 4080 ret = kvm_put_xcrs(x86_cpu); 4081 if (ret < 0) { 4082 return ret; 4083 } 4084 /* must be before kvm_put_msrs */ 4085 ret = kvm_inject_mce_oldstyle(x86_cpu); 4086 if (ret < 0) { 4087 return ret; 4088 } 4089 ret = kvm_put_msrs(x86_cpu, level); 4090 if (ret < 0) { 4091 return ret; 4092 } 4093 ret = kvm_put_vcpu_events(x86_cpu, level); 4094 if (ret < 0) { 4095 return ret; 4096 } 4097 if (level >= KVM_PUT_RESET_STATE) { 4098 ret = kvm_put_mp_state(x86_cpu); 4099 if (ret < 0) { 4100 return ret; 4101 } 4102 } 4103 4104 ret = kvm_put_tscdeadline_msr(x86_cpu); 4105 if (ret < 0) { 4106 return ret; 4107 } 4108 ret = kvm_put_debugregs(x86_cpu); 4109 if (ret < 0) { 4110 return ret; 4111 } 4112 /* must be last */ 4113 ret = kvm_guest_debug_workarounds(x86_cpu); 4114 if (ret < 0) { 4115 return ret; 4116 } 4117 return 0; 4118 } 4119 4120 int kvm_arch_get_registers(CPUState *cs) 4121 { 4122 X86CPU *cpu = X86_CPU(cs); 4123 int ret; 4124 4125 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); 4126 4127 ret = kvm_get_vcpu_events(cpu); 4128 if (ret < 0) { 4129 goto out; 4130 } 4131 /* 4132 * KVM_GET_MPSTATE can modify CS and RIP, call it before 4133 * KVM_GET_REGS and KVM_GET_SREGS. 4134 */ 4135 ret = kvm_get_mp_state(cpu); 4136 if (ret < 0) { 4137 goto out; 4138 } 4139 ret = kvm_getput_regs(cpu, 0); 4140 if (ret < 0) { 4141 goto out; 4142 } 4143 ret = kvm_get_xsave(cpu); 4144 if (ret < 0) { 4145 goto out; 4146 } 4147 ret = kvm_get_xcrs(cpu); 4148 if (ret < 0) { 4149 goto out; 4150 } 4151 ret = kvm_get_sregs(cpu); 4152 if (ret < 0) { 4153 goto out; 4154 } 4155 ret = kvm_get_msrs(cpu); 4156 if (ret < 0) { 4157 goto out; 4158 } 4159 ret = kvm_get_apic(cpu); 4160 if (ret < 0) { 4161 goto out; 4162 } 4163 ret = kvm_get_debugregs(cpu); 4164 if (ret < 0) { 4165 goto out; 4166 } 4167 ret = kvm_get_nested_state(cpu); 4168 if (ret < 0) { 4169 goto out; 4170 } 4171 ret = 0; 4172 out: 4173 cpu_sync_bndcs_hflags(&cpu->env); 4174 return ret; 4175 } 4176 4177 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 4178 { 4179 X86CPU *x86_cpu = X86_CPU(cpu); 4180 CPUX86State *env = &x86_cpu->env; 4181 int ret; 4182 4183 /* Inject NMI */ 4184 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { 4185 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { 4186 qemu_mutex_lock_iothread(); 4187 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 4188 qemu_mutex_unlock_iothread(); 4189 DPRINTF("injected NMI\n"); 4190 ret = kvm_vcpu_ioctl(cpu, KVM_NMI); 4191 if (ret < 0) { 4192 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", 4193 strerror(-ret)); 4194 } 4195 } 4196 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 4197 qemu_mutex_lock_iothread(); 4198 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 4199 qemu_mutex_unlock_iothread(); 4200 DPRINTF("injected SMI\n"); 4201 ret = kvm_vcpu_ioctl(cpu, KVM_SMI); 4202 if (ret < 0) { 4203 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", 4204 strerror(-ret)); 4205 } 4206 } 4207 } 4208 4209 if (!kvm_pic_in_kernel()) { 4210 qemu_mutex_lock_iothread(); 4211 } 4212 4213 /* Force the VCPU out of its inner loop to process any INIT requests 4214 * or (for userspace APIC, but it is cheap to combine the checks here) 4215 * pending TPR access reports. 4216 */ 4217 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 4218 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 4219 !(env->hflags & HF_SMM_MASK)) { 4220 cpu->exit_request = 1; 4221 } 4222 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 4223 cpu->exit_request = 1; 4224 } 4225 } 4226 4227 if (!kvm_pic_in_kernel()) { 4228 /* Try to inject an interrupt if the guest can accept it */ 4229 if (run->ready_for_interrupt_injection && 4230 (cpu->interrupt_request & CPU_INTERRUPT_HARD) && 4231 (env->eflags & IF_MASK)) { 4232 int irq; 4233 4234 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 4235 irq = cpu_get_pic_interrupt(env); 4236 if (irq >= 0) { 4237 struct kvm_interrupt intr; 4238 4239 intr.irq = irq; 4240 DPRINTF("injected interrupt %d\n", irq); 4241 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); 4242 if (ret < 0) { 4243 fprintf(stderr, 4244 "KVM: injection failed, interrupt lost (%s)\n", 4245 strerror(-ret)); 4246 } 4247 } 4248 } 4249 4250 /* If we have an interrupt but the guest is not ready to receive an 4251 * interrupt, request an interrupt window exit. This will 4252 * cause a return to userspace as soon as the guest is ready to 4253 * receive interrupts. */ 4254 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { 4255 run->request_interrupt_window = 1; 4256 } else { 4257 run->request_interrupt_window = 0; 4258 } 4259 4260 DPRINTF("setting tpr\n"); 4261 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); 4262 4263 qemu_mutex_unlock_iothread(); 4264 } 4265 } 4266 4267 static void kvm_rate_limit_on_bus_lock(void) 4268 { 4269 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1); 4270 4271 if (delay_ns) { 4272 g_usleep(delay_ns / SCALE_US); 4273 } 4274 } 4275 4276 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) 4277 { 4278 X86CPU *x86_cpu = X86_CPU(cpu); 4279 CPUX86State *env = &x86_cpu->env; 4280 4281 if (run->flags & KVM_RUN_X86_SMM) { 4282 env->hflags |= HF_SMM_MASK; 4283 } else { 4284 env->hflags &= ~HF_SMM_MASK; 4285 } 4286 if (run->if_flag) { 4287 env->eflags |= IF_MASK; 4288 } else { 4289 env->eflags &= ~IF_MASK; 4290 } 4291 if (run->flags & KVM_RUN_X86_BUS_LOCK) { 4292 kvm_rate_limit_on_bus_lock(); 4293 } 4294 4295 /* We need to protect the apic state against concurrent accesses from 4296 * different threads in case the userspace irqchip is used. */ 4297 if (!kvm_irqchip_in_kernel()) { 4298 qemu_mutex_lock_iothread(); 4299 } 4300 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); 4301 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); 4302 if (!kvm_irqchip_in_kernel()) { 4303 qemu_mutex_unlock_iothread(); 4304 } 4305 return cpu_get_mem_attrs(env); 4306 } 4307 4308 int kvm_arch_process_async_events(CPUState *cs) 4309 { 4310 X86CPU *cpu = X86_CPU(cs); 4311 CPUX86State *env = &cpu->env; 4312 4313 if (cs->interrupt_request & CPU_INTERRUPT_MCE) { 4314 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ 4315 assert(env->mcg_cap); 4316 4317 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 4318 4319 kvm_cpu_synchronize_state(cs); 4320 4321 if (env->exception_nr == EXCP08_DBLE) { 4322 /* this means triple fault */ 4323 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 4324 cs->exit_request = 1; 4325 return 0; 4326 } 4327 kvm_queue_exception(env, EXCP12_MCHK, 0, 0); 4328 env->has_error_code = 0; 4329 4330 cs->halted = 0; 4331 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { 4332 env->mp_state = KVM_MP_STATE_RUNNABLE; 4333 } 4334 } 4335 4336 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && 4337 !(env->hflags & HF_SMM_MASK)) { 4338 kvm_cpu_synchronize_state(cs); 4339 do_cpu_init(cpu); 4340 } 4341 4342 if (kvm_irqchip_in_kernel()) { 4343 return 0; 4344 } 4345 4346 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { 4347 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 4348 apic_poll_irq(cpu->apic_state); 4349 } 4350 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4351 (env->eflags & IF_MASK)) || 4352 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4353 cs->halted = 0; 4354 } 4355 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { 4356 kvm_cpu_synchronize_state(cs); 4357 do_cpu_sipi(cpu); 4358 } 4359 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { 4360 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; 4361 kvm_cpu_synchronize_state(cs); 4362 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 4363 env->tpr_access_type); 4364 } 4365 4366 return cs->halted; 4367 } 4368 4369 static int kvm_handle_halt(X86CPU *cpu) 4370 { 4371 CPUState *cs = CPU(cpu); 4372 CPUX86State *env = &cpu->env; 4373 4374 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4375 (env->eflags & IF_MASK)) && 4376 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4377 cs->halted = 1; 4378 return EXCP_HLT; 4379 } 4380 4381 return 0; 4382 } 4383 4384 static int kvm_handle_tpr_access(X86CPU *cpu) 4385 { 4386 CPUState *cs = CPU(cpu); 4387 struct kvm_run *run = cs->kvm_run; 4388 4389 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, 4390 run->tpr_access.is_write ? TPR_ACCESS_WRITE 4391 : TPR_ACCESS_READ); 4392 return 1; 4393 } 4394 4395 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 4396 { 4397 static const uint8_t int3 = 0xcc; 4398 4399 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || 4400 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { 4401 return -EINVAL; 4402 } 4403 return 0; 4404 } 4405 4406 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 4407 { 4408 uint8_t int3; 4409 4410 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { 4411 return -EINVAL; 4412 } 4413 if (int3 != 0xcc) { 4414 return 0; 4415 } 4416 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { 4417 return -EINVAL; 4418 } 4419 return 0; 4420 } 4421 4422 static struct { 4423 target_ulong addr; 4424 int len; 4425 int type; 4426 } hw_breakpoint[4]; 4427 4428 static int nb_hw_breakpoint; 4429 4430 static int find_hw_breakpoint(target_ulong addr, int len, int type) 4431 { 4432 int n; 4433 4434 for (n = 0; n < nb_hw_breakpoint; n++) { 4435 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && 4436 (hw_breakpoint[n].len == len || len == -1)) { 4437 return n; 4438 } 4439 } 4440 return -1; 4441 } 4442 4443 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 4444 target_ulong len, int type) 4445 { 4446 switch (type) { 4447 case GDB_BREAKPOINT_HW: 4448 len = 1; 4449 break; 4450 case GDB_WATCHPOINT_WRITE: 4451 case GDB_WATCHPOINT_ACCESS: 4452 switch (len) { 4453 case 1: 4454 break; 4455 case 2: 4456 case 4: 4457 case 8: 4458 if (addr & (len - 1)) { 4459 return -EINVAL; 4460 } 4461 break; 4462 default: 4463 return -EINVAL; 4464 } 4465 break; 4466 default: 4467 return -ENOSYS; 4468 } 4469 4470 if (nb_hw_breakpoint == 4) { 4471 return -ENOBUFS; 4472 } 4473 if (find_hw_breakpoint(addr, len, type) >= 0) { 4474 return -EEXIST; 4475 } 4476 hw_breakpoint[nb_hw_breakpoint].addr = addr; 4477 hw_breakpoint[nb_hw_breakpoint].len = len; 4478 hw_breakpoint[nb_hw_breakpoint].type = type; 4479 nb_hw_breakpoint++; 4480 4481 return 0; 4482 } 4483 4484 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 4485 target_ulong len, int type) 4486 { 4487 int n; 4488 4489 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); 4490 if (n < 0) { 4491 return -ENOENT; 4492 } 4493 nb_hw_breakpoint--; 4494 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; 4495 4496 return 0; 4497 } 4498 4499 void kvm_arch_remove_all_hw_breakpoints(void) 4500 { 4501 nb_hw_breakpoint = 0; 4502 } 4503 4504 static CPUWatchpoint hw_watchpoint; 4505 4506 static int kvm_handle_debug(X86CPU *cpu, 4507 struct kvm_debug_exit_arch *arch_info) 4508 { 4509 CPUState *cs = CPU(cpu); 4510 CPUX86State *env = &cpu->env; 4511 int ret = 0; 4512 int n; 4513 4514 if (arch_info->exception == EXCP01_DB) { 4515 if (arch_info->dr6 & DR6_BS) { 4516 if (cs->singlestep_enabled) { 4517 ret = EXCP_DEBUG; 4518 } 4519 } else { 4520 for (n = 0; n < 4; n++) { 4521 if (arch_info->dr6 & (1 << n)) { 4522 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { 4523 case 0x0: 4524 ret = EXCP_DEBUG; 4525 break; 4526 case 0x1: 4527 ret = EXCP_DEBUG; 4528 cs->watchpoint_hit = &hw_watchpoint; 4529 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 4530 hw_watchpoint.flags = BP_MEM_WRITE; 4531 break; 4532 case 0x3: 4533 ret = EXCP_DEBUG; 4534 cs->watchpoint_hit = &hw_watchpoint; 4535 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 4536 hw_watchpoint.flags = BP_MEM_ACCESS; 4537 break; 4538 } 4539 } 4540 } 4541 } 4542 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { 4543 ret = EXCP_DEBUG; 4544 } 4545 if (ret == 0) { 4546 cpu_synchronize_state(cs); 4547 assert(env->exception_nr == -1); 4548 4549 /* pass to guest */ 4550 kvm_queue_exception(env, arch_info->exception, 4551 arch_info->exception == EXCP01_DB, 4552 arch_info->dr6); 4553 env->has_error_code = 0; 4554 } 4555 4556 return ret; 4557 } 4558 4559 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 4560 { 4561 const uint8_t type_code[] = { 4562 [GDB_BREAKPOINT_HW] = 0x0, 4563 [GDB_WATCHPOINT_WRITE] = 0x1, 4564 [GDB_WATCHPOINT_ACCESS] = 0x3 4565 }; 4566 const uint8_t len_code[] = { 4567 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 4568 }; 4569 int n; 4570 4571 if (kvm_sw_breakpoints_active(cpu)) { 4572 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 4573 } 4574 if (nb_hw_breakpoint > 0) { 4575 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 4576 dbg->arch.debugreg[7] = 0x0600; 4577 for (n = 0; n < nb_hw_breakpoint; n++) { 4578 dbg->arch.debugreg[n] = hw_breakpoint[n].addr; 4579 dbg->arch.debugreg[7] |= (2 << (n * 2)) | 4580 (type_code[hw_breakpoint[n].type] << (16 + n*4)) | 4581 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); 4582 } 4583 } 4584 } 4585 4586 static bool host_supports_vmx(void) 4587 { 4588 uint32_t ecx, unused; 4589 4590 host_cpuid(1, 0, &unused, &unused, &ecx, &unused); 4591 return ecx & CPUID_EXT_VMX; 4592 } 4593 4594 #define VMX_INVALID_GUEST_STATE 0x80000021 4595 4596 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 4597 { 4598 X86CPU *cpu = X86_CPU(cs); 4599 uint64_t code; 4600 int ret; 4601 4602 switch (run->exit_reason) { 4603 case KVM_EXIT_HLT: 4604 DPRINTF("handle_hlt\n"); 4605 qemu_mutex_lock_iothread(); 4606 ret = kvm_handle_halt(cpu); 4607 qemu_mutex_unlock_iothread(); 4608 break; 4609 case KVM_EXIT_SET_TPR: 4610 ret = 0; 4611 break; 4612 case KVM_EXIT_TPR_ACCESS: 4613 qemu_mutex_lock_iothread(); 4614 ret = kvm_handle_tpr_access(cpu); 4615 qemu_mutex_unlock_iothread(); 4616 break; 4617 case KVM_EXIT_FAIL_ENTRY: 4618 code = run->fail_entry.hardware_entry_failure_reason; 4619 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", 4620 code); 4621 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { 4622 fprintf(stderr, 4623 "\nIf you're running a guest on an Intel machine without " 4624 "unrestricted mode\n" 4625 "support, the failure can be most likely due to the guest " 4626 "entering an invalid\n" 4627 "state for Intel VT. For example, the guest maybe running " 4628 "in big real mode\n" 4629 "which is not supported on less recent Intel processors." 4630 "\n\n"); 4631 } 4632 ret = -1; 4633 break; 4634 case KVM_EXIT_EXCEPTION: 4635 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n", 4636 run->ex.exception, run->ex.error_code); 4637 ret = -1; 4638 break; 4639 case KVM_EXIT_DEBUG: 4640 DPRINTF("kvm_exit_debug\n"); 4641 qemu_mutex_lock_iothread(); 4642 ret = kvm_handle_debug(cpu, &run->debug.arch); 4643 qemu_mutex_unlock_iothread(); 4644 break; 4645 case KVM_EXIT_HYPERV: 4646 ret = kvm_hv_handle_exit(cpu, &run->hyperv); 4647 break; 4648 case KVM_EXIT_IOAPIC_EOI: 4649 ioapic_eoi_broadcast(run->eoi.vector); 4650 ret = 0; 4651 break; 4652 case KVM_EXIT_X86_BUS_LOCK: 4653 /* already handled in kvm_arch_post_run */ 4654 ret = 0; 4655 break; 4656 default: 4657 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 4658 ret = -1; 4659 break; 4660 } 4661 4662 return ret; 4663 } 4664 4665 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 4666 { 4667 X86CPU *cpu = X86_CPU(cs); 4668 CPUX86State *env = &cpu->env; 4669 4670 kvm_cpu_synchronize_state(cs); 4671 return !(env->cr[0] & CR0_PE_MASK) || 4672 ((env->segs[R_CS].selector & 3) != 3); 4673 } 4674 4675 void kvm_arch_init_irq_routing(KVMState *s) 4676 { 4677 /* We know at this point that we're using the in-kernel 4678 * irqchip, so we can use irqfds, and on x86 we know 4679 * we can use msi via irqfd and GSI routing. 4680 */ 4681 kvm_msi_via_irqfd_allowed = true; 4682 kvm_gsi_routing_allowed = true; 4683 4684 if (kvm_irqchip_is_split()) { 4685 int i; 4686 4687 /* If the ioapic is in QEMU and the lapics are in KVM, reserve 4688 MSI routes for signaling interrupts to the local apics. */ 4689 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 4690 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) { 4691 error_report("Could not enable split IRQ mode."); 4692 exit(1); 4693 } 4694 } 4695 } 4696 } 4697 4698 int kvm_arch_irqchip_create(KVMState *s) 4699 { 4700 int ret; 4701 if (kvm_kernel_irqchip_split()) { 4702 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); 4703 if (ret) { 4704 error_report("Could not enable split irqchip mode: %s", 4705 strerror(-ret)); 4706 exit(1); 4707 } else { 4708 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); 4709 kvm_split_irqchip = true; 4710 return 1; 4711 } 4712 } else { 4713 return 0; 4714 } 4715 } 4716 4717 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address) 4718 { 4719 CPUX86State *env; 4720 uint64_t ext_id; 4721 4722 if (!first_cpu) { 4723 return address; 4724 } 4725 env = &X86_CPU(first_cpu)->env; 4726 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) { 4727 return address; 4728 } 4729 4730 /* 4731 * If the remappable format bit is set, or the upper bits are 4732 * already set in address_hi, or the low extended bits aren't 4733 * there anyway, do nothing. 4734 */ 4735 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT); 4736 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) { 4737 return address; 4738 } 4739 4740 address &= ~ext_id; 4741 address |= ext_id << 35; 4742 return address; 4743 } 4744 4745 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 4746 uint64_t address, uint32_t data, PCIDevice *dev) 4747 { 4748 X86IOMMUState *iommu = x86_iommu_get_default(); 4749 4750 if (iommu) { 4751 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu); 4752 4753 if (class->int_remap) { 4754 int ret; 4755 MSIMessage src, dst; 4756 4757 src.address = route->u.msi.address_hi; 4758 src.address <<= VTD_MSI_ADDR_HI_SHIFT; 4759 src.address |= route->u.msi.address_lo; 4760 src.data = route->u.msi.data; 4761 4762 ret = class->int_remap(iommu, &src, &dst, dev ? \ 4763 pci_requester_id(dev) : \ 4764 X86_IOMMU_SID_INVALID); 4765 if (ret) { 4766 trace_kvm_x86_fixup_msi_error(route->gsi); 4767 return 1; 4768 } 4769 4770 /* 4771 * Handled untranslated compatibilty format interrupt with 4772 * extended destination ID in the low bits 11-5. */ 4773 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address); 4774 4775 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; 4776 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; 4777 route->u.msi.data = dst.data; 4778 return 0; 4779 } 4780 } 4781 4782 address = kvm_swizzle_msi_ext_dest_id(address); 4783 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; 4784 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; 4785 return 0; 4786 } 4787 4788 typedef struct MSIRouteEntry MSIRouteEntry; 4789 4790 struct MSIRouteEntry { 4791 PCIDevice *dev; /* Device pointer */ 4792 int vector; /* MSI/MSIX vector index */ 4793 int virq; /* Virtual IRQ index */ 4794 QLIST_ENTRY(MSIRouteEntry) list; 4795 }; 4796 4797 /* List of used GSI routes */ 4798 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ 4799 QLIST_HEAD_INITIALIZER(msi_route_list); 4800 4801 static void kvm_update_msi_routes_all(void *private, bool global, 4802 uint32_t index, uint32_t mask) 4803 { 4804 int cnt = 0, vector; 4805 MSIRouteEntry *entry; 4806 MSIMessage msg; 4807 PCIDevice *dev; 4808 4809 /* TODO: explicit route update */ 4810 QLIST_FOREACH(entry, &msi_route_list, list) { 4811 cnt++; 4812 vector = entry->vector; 4813 dev = entry->dev; 4814 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) { 4815 msg = msix_get_message(dev, vector); 4816 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) { 4817 msg = msi_get_message(dev, vector); 4818 } else { 4819 /* 4820 * Either MSI/MSIX is disabled for the device, or the 4821 * specific message was masked out. Skip this one. 4822 */ 4823 continue; 4824 } 4825 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); 4826 } 4827 kvm_irqchip_commit_routes(kvm_state); 4828 trace_kvm_x86_update_msi_routes(cnt); 4829 } 4830 4831 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 4832 int vector, PCIDevice *dev) 4833 { 4834 static bool notify_list_inited = false; 4835 MSIRouteEntry *entry; 4836 4837 if (!dev) { 4838 /* These are (possibly) IOAPIC routes only used for split 4839 * kernel irqchip mode, while what we are housekeeping are 4840 * PCI devices only. */ 4841 return 0; 4842 } 4843 4844 entry = g_new0(MSIRouteEntry, 1); 4845 entry->dev = dev; 4846 entry->vector = vector; 4847 entry->virq = route->gsi; 4848 QLIST_INSERT_HEAD(&msi_route_list, entry, list); 4849 4850 trace_kvm_x86_add_msi_route(route->gsi); 4851 4852 if (!notify_list_inited) { 4853 /* For the first time we do add route, add ourselves into 4854 * IOMMU's IEC notify list if needed. */ 4855 X86IOMMUState *iommu = x86_iommu_get_default(); 4856 if (iommu) { 4857 x86_iommu_iec_register_notifier(iommu, 4858 kvm_update_msi_routes_all, 4859 NULL); 4860 } 4861 notify_list_inited = true; 4862 } 4863 return 0; 4864 } 4865 4866 int kvm_arch_release_virq_post(int virq) 4867 { 4868 MSIRouteEntry *entry, *next; 4869 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { 4870 if (entry->virq == virq) { 4871 trace_kvm_x86_remove_msi_route(virq); 4872 QLIST_REMOVE(entry, list); 4873 g_free(entry); 4874 break; 4875 } 4876 } 4877 return 0; 4878 } 4879 4880 int kvm_arch_msi_data_to_gsi(uint32_t data) 4881 { 4882 abort(); 4883 } 4884 4885 bool kvm_has_waitpkg(void) 4886 { 4887 return has_msr_umwait; 4888 } 4889 4890 bool kvm_arch_cpu_check_are_resettable(void) 4891 { 4892 return !sev_es_enabled(); 4893 } 4894