1 /* 2 * QEMU KVM support 3 * 4 * Copyright (C) 2006-2008 Qumranet Technologies 5 * Copyright IBM, Corp. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/qapi-events-run-state.h" 17 #include "qapi/error.h" 18 #include "qapi/visitor.h" 19 #include <math.h> 20 #include <sys/ioctl.h> 21 #include <sys/utsname.h> 22 #include <sys/syscall.h> 23 #include <sys/resource.h> 24 #include <sys/time.h> 25 26 #include <linux/kvm.h> 27 #include <linux/kvm_para.h> 28 #include "standard-headers/asm-x86/kvm_para.h" 29 #include "hw/xen/interface/arch-x86/cpuid.h" 30 31 #include "cpu.h" 32 #include "host-cpu.h" 33 #include "vmsr_energy.h" 34 #include "sysemu/sysemu.h" 35 #include "sysemu/hw_accel.h" 36 #include "sysemu/kvm_int.h" 37 #include "sysemu/runstate.h" 38 #include "kvm_i386.h" 39 #include "../confidential-guest.h" 40 #include "sev.h" 41 #include "xen-emu.h" 42 #include "hyperv.h" 43 #include "hyperv-proto.h" 44 45 #include "gdbstub/enums.h" 46 #include "qemu/host-utils.h" 47 #include "qemu/main-loop.h" 48 #include "qemu/ratelimit.h" 49 #include "qemu/config-file.h" 50 #include "qemu/error-report.h" 51 #include "qemu/memalign.h" 52 #include "hw/i386/x86.h" 53 #include "hw/i386/kvm/xen_evtchn.h" 54 #include "hw/i386/pc.h" 55 #include "hw/i386/apic.h" 56 #include "hw/i386/apic_internal.h" 57 #include "hw/i386/apic-msidef.h" 58 #include "hw/i386/intel_iommu.h" 59 #include "hw/i386/topology.h" 60 #include "hw/i386/x86-iommu.h" 61 #include "hw/i386/e820_memory_layout.h" 62 63 #include "hw/xen/xen.h" 64 65 #include "hw/pci/pci.h" 66 #include "hw/pci/msi.h" 67 #include "hw/pci/msix.h" 68 #include "migration/blocker.h" 69 #include "exec/memattrs.h" 70 #include "trace.h" 71 72 #include CONFIG_DEVICES 73 74 //#define DEBUG_KVM 75 76 #ifdef DEBUG_KVM 77 #define DPRINTF(fmt, ...) \ 78 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 79 #else 80 #define DPRINTF(fmt, ...) \ 81 do { } while (0) 82 #endif 83 84 /* 85 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. 86 * In order to use vm86 mode, an EPT identity map and a TSS are needed. 87 * Since these must be part of guest physical memory, we need to allocate 88 * them, both by setting their start addresses in the kernel and by 89 * creating a corresponding e820 entry. We need 4 pages before the BIOS, 90 * so this value allows up to 16M BIOSes. 91 */ 92 #define KVM_IDENTITY_BASE 0xfeffc000 93 94 /* From arch/x86/kvm/lapic.h */ 95 #define KVM_APIC_BUS_CYCLE_NS 1 96 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) 97 98 #define MSR_KVM_WALL_CLOCK 0x11 99 #define MSR_KVM_SYSTEM_TIME 0x12 100 101 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus 102 * 255 kvm_msr_entry structs */ 103 #define MSR_BUF_SIZE 4096 104 105 typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); 106 typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); 107 typedef struct { 108 uint32_t msr; 109 QEMURDMSRHandler *rdmsr; 110 QEMUWRMSRHandler *wrmsr; 111 } KVMMSRHandlers; 112 113 static void kvm_init_msrs(X86CPU *cpu); 114 static bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, 115 QEMUWRMSRHandler *wrmsr); 116 117 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 118 KVM_CAP_INFO(SET_TSS_ADDR), 119 KVM_CAP_INFO(EXT_CPUID), 120 KVM_CAP_INFO(MP_STATE), 121 KVM_CAP_INFO(SIGNAL_MSI), 122 KVM_CAP_INFO(IRQ_ROUTING), 123 KVM_CAP_INFO(DEBUGREGS), 124 KVM_CAP_INFO(XSAVE), 125 KVM_CAP_INFO(VCPU_EVENTS), 126 KVM_CAP_INFO(X86_ROBUST_SINGLESTEP), 127 KVM_CAP_INFO(MCE), 128 KVM_CAP_INFO(ADJUST_CLOCK), 129 KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR), 130 KVM_CAP_LAST_INFO 131 }; 132 133 static bool has_msr_star; 134 static bool has_msr_hsave_pa; 135 static bool has_msr_tsc_aux; 136 static bool has_msr_tsc_adjust; 137 static bool has_msr_tsc_deadline; 138 static bool has_msr_feature_control; 139 static bool has_msr_misc_enable; 140 static bool has_msr_smbase; 141 static bool has_msr_bndcfgs; 142 static int lm_capable_kernel; 143 static bool has_msr_hv_hypercall; 144 static bool has_msr_hv_crash; 145 static bool has_msr_hv_reset; 146 static bool has_msr_hv_vpindex; 147 static bool hv_vpindex_settable; 148 static bool has_msr_hv_runtime; 149 static bool has_msr_hv_synic; 150 static bool has_msr_hv_stimer; 151 static bool has_msr_hv_frequencies; 152 static bool has_msr_hv_reenlightenment; 153 static bool has_msr_hv_syndbg_options; 154 static bool has_msr_xss; 155 static bool has_msr_umwait; 156 static bool has_msr_spec_ctrl; 157 static bool has_tsc_scale_msr; 158 static bool has_msr_tsx_ctrl; 159 static bool has_msr_virt_ssbd; 160 static bool has_msr_smi_count; 161 static bool has_msr_arch_capabs; 162 static bool has_msr_core_capabs; 163 static bool has_msr_vmx_vmfunc; 164 static bool has_msr_ucode_rev; 165 static bool has_msr_vmx_procbased_ctls2; 166 static bool has_msr_perf_capabs; 167 static bool has_msr_pkrs; 168 169 static uint32_t has_architectural_pmu_version; 170 static uint32_t num_architectural_pmu_gp_counters; 171 static uint32_t num_architectural_pmu_fixed_counters; 172 173 static int has_xsave2; 174 static int has_xcrs; 175 static int has_sregs2; 176 static int has_exception_payload; 177 static int has_triple_fault_event; 178 179 static bool has_msr_mcg_ext_ctl; 180 181 static struct kvm_cpuid2 *cpuid_cache; 182 static struct kvm_cpuid2 *hv_cpuid_cache; 183 static struct kvm_msr_list *kvm_feature_msrs; 184 185 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; 186 187 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ 188 static RateLimit bus_lock_ratelimit_ctrl; 189 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); 190 191 static const char *vm_type_name[] = { 192 [KVM_X86_DEFAULT_VM] = "default", 193 [KVM_X86_SEV_VM] = "SEV", 194 [KVM_X86_SEV_ES_VM] = "SEV-ES", 195 [KVM_X86_SNP_VM] = "SEV-SNP", 196 }; 197 198 bool kvm_is_vm_type_supported(int type) 199 { 200 uint32_t machine_types; 201 202 /* 203 * old KVM doesn't support KVM_CAP_VM_TYPES but KVM_X86_DEFAULT_VM 204 * is always supported 205 */ 206 if (type == KVM_X86_DEFAULT_VM) { 207 return true; 208 } 209 210 machine_types = kvm_check_extension(KVM_STATE(current_machine->accelerator), 211 KVM_CAP_VM_TYPES); 212 return !!(machine_types & BIT(type)); 213 } 214 215 int kvm_get_vm_type(MachineState *ms) 216 { 217 int kvm_type = KVM_X86_DEFAULT_VM; 218 219 if (ms->cgs) { 220 if (!object_dynamic_cast(OBJECT(ms->cgs), TYPE_X86_CONFIDENTIAL_GUEST)) { 221 error_report("configuration type %s not supported for x86 guests", 222 object_get_typename(OBJECT(ms->cgs))); 223 exit(1); 224 } 225 kvm_type = x86_confidential_guest_kvm_type( 226 X86_CONFIDENTIAL_GUEST(ms->cgs)); 227 } 228 229 if (!kvm_is_vm_type_supported(kvm_type)) { 230 error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]); 231 exit(1); 232 } 233 234 return kvm_type; 235 } 236 237 bool kvm_enable_hypercall(uint64_t enable_mask) 238 { 239 KVMState *s = KVM_STATE(current_accel()); 240 241 return !kvm_vm_enable_cap(s, KVM_CAP_EXIT_HYPERCALL, 0, enable_mask); 242 } 243 244 bool kvm_has_smm(void) 245 { 246 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); 247 } 248 249 bool kvm_has_adjust_clock_stable(void) 250 { 251 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 252 253 return (ret & KVM_CLOCK_TSC_STABLE); 254 } 255 256 bool kvm_has_exception_payload(void) 257 { 258 return has_exception_payload; 259 } 260 261 static bool kvm_x2apic_api_set_flags(uint64_t flags) 262 { 263 KVMState *s = KVM_STATE(current_accel()); 264 265 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); 266 } 267 268 #define MEMORIZE(fn, _result) \ 269 ({ \ 270 static bool _memorized; \ 271 \ 272 if (_memorized) { \ 273 return _result; \ 274 } \ 275 _memorized = true; \ 276 _result = fn; \ 277 }) 278 279 static bool has_x2apic_api; 280 281 bool kvm_has_x2apic_api(void) 282 { 283 return has_x2apic_api; 284 } 285 286 bool kvm_enable_x2apic(void) 287 { 288 return MEMORIZE( 289 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | 290 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK), 291 has_x2apic_api); 292 } 293 294 bool kvm_hv_vpindex_settable(void) 295 { 296 return hv_vpindex_settable; 297 } 298 299 static int kvm_get_tsc(CPUState *cs) 300 { 301 X86CPU *cpu = X86_CPU(cs); 302 CPUX86State *env = &cpu->env; 303 uint64_t value; 304 int ret; 305 306 if (env->tsc_valid) { 307 return 0; 308 } 309 310 env->tsc_valid = !runstate_is_running(); 311 312 ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value); 313 if (ret < 0) { 314 return ret; 315 } 316 317 env->tsc = value; 318 return 0; 319 } 320 321 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg) 322 { 323 kvm_get_tsc(cpu); 324 } 325 326 void kvm_synchronize_all_tsc(void) 327 { 328 CPUState *cpu; 329 330 if (kvm_enabled()) { 331 CPU_FOREACH(cpu) { 332 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); 333 } 334 } 335 } 336 337 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) 338 { 339 struct kvm_cpuid2 *cpuid; 340 int r, size; 341 342 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 343 cpuid = g_malloc0(size); 344 cpuid->nent = max; 345 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); 346 if (r == 0 && cpuid->nent >= max) { 347 r = -E2BIG; 348 } 349 if (r < 0) { 350 if (r == -E2BIG) { 351 g_free(cpuid); 352 return NULL; 353 } else { 354 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", 355 strerror(-r)); 356 exit(1); 357 } 358 } 359 return cpuid; 360 } 361 362 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough 363 * for all entries. 364 */ 365 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) 366 { 367 struct kvm_cpuid2 *cpuid; 368 int max = 1; 369 370 if (cpuid_cache != NULL) { 371 return cpuid_cache; 372 } 373 while ((cpuid = try_get_cpuid(s, max)) == NULL) { 374 max *= 2; 375 } 376 cpuid_cache = cpuid; 377 return cpuid; 378 } 379 380 static bool host_tsx_broken(void) 381 { 382 int family, model, stepping;\ 383 char vendor[CPUID_VENDOR_SZ + 1]; 384 385 host_cpu_vendor_fms(vendor, &family, &model, &stepping); 386 387 /* Check if we are running on a Haswell host known to have broken TSX */ 388 return !strcmp(vendor, CPUID_VENDOR_INTEL) && 389 (family == 6) && 390 ((model == 63 && stepping < 4) || 391 model == 60 || model == 69 || model == 70); 392 } 393 394 /* Returns the value for a specific register on the cpuid entry 395 */ 396 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) 397 { 398 uint32_t ret = 0; 399 switch (reg) { 400 case R_EAX: 401 ret = entry->eax; 402 break; 403 case R_EBX: 404 ret = entry->ebx; 405 break; 406 case R_ECX: 407 ret = entry->ecx; 408 break; 409 case R_EDX: 410 ret = entry->edx; 411 break; 412 } 413 return ret; 414 } 415 416 /* Find matching entry for function/index on kvm_cpuid2 struct 417 */ 418 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, 419 uint32_t function, 420 uint32_t index) 421 { 422 int i; 423 for (i = 0; i < cpuid->nent; ++i) { 424 if (cpuid->entries[i].function == function && 425 cpuid->entries[i].index == index) { 426 return &cpuid->entries[i]; 427 } 428 } 429 /* not found: */ 430 return NULL; 431 } 432 433 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, 434 uint32_t index, int reg) 435 { 436 struct kvm_cpuid2 *cpuid; 437 uint32_t ret = 0; 438 uint32_t cpuid_1_edx, unused; 439 uint64_t bitmask; 440 441 cpuid = get_supported_cpuid(s); 442 443 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); 444 if (entry) { 445 ret = cpuid_entry_get_reg(entry, reg); 446 } 447 448 /* Fixups for the data returned by KVM, below */ 449 450 if (function == 1 && reg == R_EDX) { 451 /* KVM before 2.6.30 misreports the following features */ 452 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; 453 /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */ 454 ret |= CPUID_HT; 455 } else if (function == 1 && reg == R_ECX) { 456 /* We can set the hypervisor flag, even if KVM does not return it on 457 * GET_SUPPORTED_CPUID 458 */ 459 ret |= CPUID_EXT_HYPERVISOR; 460 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it 461 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, 462 * and the irqchip is in the kernel. 463 */ 464 if (kvm_irqchip_in_kernel() && 465 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { 466 ret |= CPUID_EXT_TSC_DEADLINE_TIMER; 467 } 468 469 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled 470 * without the in-kernel irqchip 471 */ 472 if (!kvm_irqchip_in_kernel()) { 473 ret &= ~CPUID_EXT_X2APIC; 474 } 475 476 if (enable_cpu_pm) { 477 int disable_exits = kvm_check_extension(s, 478 KVM_CAP_X86_DISABLE_EXITS); 479 480 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) { 481 ret |= CPUID_EXT_MONITOR; 482 } 483 } 484 } else if (function == 6 && reg == R_EAX) { 485 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ 486 } else if (function == 7 && index == 0 && reg == R_EBX) { 487 /* Not new instructions, just an optimization. */ 488 uint32_t ebx; 489 host_cpuid(7, 0, &unused, &ebx, &unused, &unused); 490 ret |= ebx & CPUID_7_0_EBX_ERMS; 491 492 if (host_tsx_broken()) { 493 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); 494 } 495 } else if (function == 7 && index == 0 && reg == R_EDX) { 496 /* Not new instructions, just an optimization. */ 497 uint32_t edx; 498 host_cpuid(7, 0, &unused, &unused, &unused, &edx); 499 ret |= edx & CPUID_7_0_EDX_FSRM; 500 501 /* 502 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. 503 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is 504 * returned by KVM_GET_MSR_INDEX_LIST. 505 */ 506 if (!has_msr_arch_capabs) { 507 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; 508 } 509 } else if (function == 7 && index == 1 && reg == R_EAX) { 510 /* Not new instructions, just an optimization. */ 511 uint32_t eax; 512 host_cpuid(7, 1, &eax, &unused, &unused, &unused); 513 ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC); 514 } else if (function == 7 && index == 2 && reg == R_EDX) { 515 uint32_t edx; 516 host_cpuid(7, 2, &unused, &unused, &unused, &edx); 517 ret |= edx & CPUID_7_2_EDX_MCDT_NO; 518 } else if (function == 0xd && index == 0 && 519 (reg == R_EAX || reg == R_EDX)) { 520 /* 521 * The value returned by KVM_GET_SUPPORTED_CPUID does not include 522 * features that still have to be enabled with the arch_prctl 523 * system call. QEMU needs the full value, which is retrieved 524 * with KVM_GET_DEVICE_ATTR. 525 */ 526 struct kvm_device_attr attr = { 527 .group = 0, 528 .attr = KVM_X86_XCOMP_GUEST_SUPP, 529 .addr = (unsigned long) &bitmask 530 }; 531 532 bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); 533 if (!sys_attr) { 534 return ret; 535 } 536 537 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); 538 if (rc < 0) { 539 if (rc != -ENXIO) { 540 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " 541 "error: %d", rc); 542 } 543 return ret; 544 } 545 ret = (reg == R_EAX) ? bitmask : bitmask >> 32; 546 } else if (function == 0x80000001 && reg == R_ECX) { 547 /* 548 * It's safe to enable TOPOEXT even if it's not returned by 549 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows 550 * us to keep CPU models including TOPOEXT runnable on older kernels. 551 */ 552 ret |= CPUID_EXT3_TOPOEXT; 553 } else if (function == 0x80000001 && reg == R_EDX) { 554 /* On Intel, kvm returns cpuid according to the Intel spec, 555 * so add missing bits according to the AMD spec: 556 */ 557 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); 558 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; 559 } else if (function == 0x80000007 && reg == R_EBX) { 560 ret |= CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR; 561 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { 562 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't 563 * be enabled without the in-kernel irqchip 564 */ 565 if (!kvm_irqchip_in_kernel()) { 566 ret &= ~(1U << KVM_FEATURE_PV_UNHALT); 567 } 568 if (kvm_irqchip_is_split()) { 569 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID; 570 } 571 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { 572 ret |= 1U << KVM_HINTS_REALTIME; 573 } 574 575 if (current_machine->cgs) { 576 ret = x86_confidential_guest_mask_cpuid_features( 577 X86_CONFIDENTIAL_GUEST(current_machine->cgs), 578 function, index, reg, ret); 579 } 580 return ret; 581 } 582 583 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) 584 { 585 struct { 586 struct kvm_msrs info; 587 struct kvm_msr_entry entries[1]; 588 } msr_data = {}; 589 uint64_t value; 590 uint32_t ret, can_be_one, must_be_one; 591 592 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */ 593 return 0; 594 } 595 596 /* Check if requested MSR is supported feature MSR */ 597 int i; 598 for (i = 0; i < kvm_feature_msrs->nmsrs; i++) 599 if (kvm_feature_msrs->indices[i] == index) { 600 break; 601 } 602 if (i == kvm_feature_msrs->nmsrs) { 603 return 0; /* if the feature MSR is not supported, simply return 0 */ 604 } 605 606 msr_data.info.nmsrs = 1; 607 msr_data.entries[0].index = index; 608 609 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data); 610 if (ret != 1) { 611 error_report("KVM get MSR (index=0x%x) feature failed, %s", 612 index, strerror(-ret)); 613 exit(1); 614 } 615 616 value = msr_data.entries[0].data; 617 switch (index) { 618 case MSR_IA32_VMX_PROCBASED_CTLS2: 619 if (!has_msr_vmx_procbased_ctls2) { 620 /* KVM forgot to add these bits for some time, do this ourselves. */ 621 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) & 622 CPUID_XSAVE_XSAVES) { 623 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32; 624 } 625 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) & 626 CPUID_EXT_RDRAND) { 627 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32; 628 } 629 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 630 CPUID_7_0_EBX_INVPCID) { 631 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32; 632 } 633 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 634 CPUID_7_0_EBX_RDSEED) { 635 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32; 636 } 637 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) & 638 CPUID_EXT2_RDTSCP) { 639 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32; 640 } 641 } 642 /* fall through */ 643 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 644 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 645 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 646 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 647 /* 648 * Return true for bits that can be one, but do not have to be one. 649 * The SDM tells us which bits could have a "must be one" setting, 650 * so we can do the opposite transformation in make_vmx_msr_value. 651 */ 652 must_be_one = (uint32_t)value; 653 can_be_one = (uint32_t)(value >> 32); 654 return can_be_one & ~must_be_one; 655 656 default: 657 return value; 658 } 659 } 660 661 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, 662 int *max_banks) 663 { 664 *max_banks = kvm_check_extension(s, KVM_CAP_MCE); 665 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); 666 } 667 668 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) 669 { 670 CPUState *cs = CPU(cpu); 671 CPUX86State *env = &cpu->env; 672 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_MISCV | 673 MCI_STATUS_ADDRV; 674 uint64_t mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV; 675 int flags = 0; 676 677 if (!IS_AMD_CPU(env)) { 678 status |= MCI_STATUS_S | MCI_STATUS_UC; 679 if (code == BUS_MCEERR_AR) { 680 status |= MCI_STATUS_AR | 0x134; 681 mcg_status |= MCG_STATUS_EIPV; 682 } else { 683 status |= 0xc0; 684 } 685 } else { 686 if (code == BUS_MCEERR_AR) { 687 status |= MCI_STATUS_UC | MCI_STATUS_POISON; 688 mcg_status |= MCG_STATUS_EIPV; 689 } else { 690 /* Setting the POISON bit for deferred errors indicates to the 691 * guest kernel that the address provided by the MCE is valid 692 * and usable which will ensure that the guest kernel will send 693 * a SIGBUS_AO signal to the guest process. This allows for 694 * more desirable behavior in the case that the guest process 695 * with poisoned memory has set the MCE_KILL_EARLY prctl flag 696 * which indicates that the process would prefer to handle or 697 * shutdown due to the poisoned memory condition before the 698 * memory has been accessed. 699 * 700 * While the POISON bit would not be set in a deferred error 701 * sent from hardware, the bit is not meaningful for deferred 702 * errors and can be reused in this scenario. 703 */ 704 status |= MCI_STATUS_DEFERRED | MCI_STATUS_POISON; 705 } 706 } 707 708 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; 709 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the 710 * guest kernel back into env->mcg_ext_ctl. 711 */ 712 cpu_synchronize_state(cs); 713 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { 714 mcg_status |= MCG_STATUS_LMCE; 715 flags = 0; 716 } 717 718 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, 719 (MCM_ADDR_PHYS << 6) | 0xc, flags); 720 } 721 722 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar) 723 { 724 MemoryFailureFlags mff = {.action_required = ar, .recursive = false}; 725 726 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action, 727 &mff); 728 } 729 730 static void hardware_memory_error(void *host_addr) 731 { 732 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true); 733 error_report("QEMU got Hardware memory error at addr %p", host_addr); 734 exit(1); 735 } 736 737 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) 738 { 739 X86CPU *cpu = X86_CPU(c); 740 CPUX86State *env = &cpu->env; 741 ram_addr_t ram_addr; 742 hwaddr paddr; 743 744 /* If we get an action required MCE, it has been injected by KVM 745 * while the VM was running. An action optional MCE instead should 746 * be coming from the main thread, which qemu_init_sigbus identifies 747 * as the "early kill" thread. 748 */ 749 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); 750 751 if ((env->mcg_cap & MCG_SER_P) && addr) { 752 ram_addr = qemu_ram_addr_from_host(addr); 753 if (ram_addr != RAM_ADDR_INVALID && 754 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { 755 kvm_hwpoison_page_add(ram_addr); 756 kvm_mce_inject(cpu, paddr, code); 757 758 /* 759 * Use different logging severity based on error type. 760 * If there is additional MCE reporting on the hypervisor, QEMU VA 761 * could be another source to identify the PA and MCE details. 762 */ 763 if (code == BUS_MCEERR_AR) { 764 error_report("Guest MCE Memory Error at QEMU addr %p and " 765 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 766 addr, paddr, "BUS_MCEERR_AR"); 767 } else { 768 warn_report("Guest MCE Memory Error at QEMU addr %p and " 769 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 770 addr, paddr, "BUS_MCEERR_AO"); 771 } 772 773 return; 774 } 775 776 if (code == BUS_MCEERR_AO) { 777 warn_report("Hardware memory error at addr %p of type %s " 778 "for memory used by QEMU itself instead of guest system!", 779 addr, "BUS_MCEERR_AO"); 780 } 781 } 782 783 if (code == BUS_MCEERR_AR) { 784 hardware_memory_error(addr); 785 } 786 787 /* Hope we are lucky for AO MCE, just notify a event */ 788 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); 789 } 790 791 static void kvm_queue_exception(CPUX86State *env, 792 int32_t exception_nr, 793 uint8_t exception_has_payload, 794 uint64_t exception_payload) 795 { 796 assert(env->exception_nr == -1); 797 assert(!env->exception_pending); 798 assert(!env->exception_injected); 799 assert(!env->exception_has_payload); 800 801 env->exception_nr = exception_nr; 802 803 if (has_exception_payload) { 804 env->exception_pending = 1; 805 806 env->exception_has_payload = exception_has_payload; 807 env->exception_payload = exception_payload; 808 } else { 809 env->exception_injected = 1; 810 811 if (exception_nr == EXCP01_DB) { 812 assert(exception_has_payload); 813 env->dr[6] = exception_payload; 814 } else if (exception_nr == EXCP0E_PAGE) { 815 assert(exception_has_payload); 816 env->cr[2] = exception_payload; 817 } else { 818 assert(!exception_has_payload); 819 } 820 } 821 } 822 823 static void cpu_update_state(void *opaque, bool running, RunState state) 824 { 825 CPUX86State *env = opaque; 826 827 if (running) { 828 env->tsc_valid = false; 829 } 830 } 831 832 unsigned long kvm_arch_vcpu_id(CPUState *cs) 833 { 834 X86CPU *cpu = X86_CPU(cs); 835 return cpu->apic_id; 836 } 837 838 #ifndef KVM_CPUID_SIGNATURE_NEXT 839 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 840 #endif 841 842 static bool hyperv_enabled(X86CPU *cpu) 843 { 844 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 && 845 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || 846 cpu->hyperv_features || cpu->hyperv_passthrough); 847 } 848 849 /* 850 * Check whether target_freq is within conservative 851 * ntp correctable bounds (250ppm) of freq 852 */ 853 static inline bool freq_within_bounds(int freq, int target_freq) 854 { 855 int max_freq = freq + (freq * 250 / 1000000); 856 int min_freq = freq - (freq * 250 / 1000000); 857 858 if (target_freq >= min_freq && target_freq <= max_freq) { 859 return true; 860 } 861 862 return false; 863 } 864 865 static int kvm_arch_set_tsc_khz(CPUState *cs) 866 { 867 X86CPU *cpu = X86_CPU(cs); 868 CPUX86State *env = &cpu->env; 869 int r, cur_freq; 870 bool set_ioctl = false; 871 872 if (!env->tsc_khz) { 873 return 0; 874 } 875 876 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 877 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; 878 879 /* 880 * If TSC scaling is supported, attempt to set TSC frequency. 881 */ 882 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) { 883 set_ioctl = true; 884 } 885 886 /* 887 * If desired TSC frequency is within bounds of NTP correction, 888 * attempt to set TSC frequency. 889 */ 890 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) { 891 set_ioctl = true; 892 } 893 894 r = set_ioctl ? 895 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : 896 -ENOTSUP; 897 898 if (r < 0) { 899 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current 900 * TSC frequency doesn't match the one we want. 901 */ 902 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 903 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 904 -ENOTSUP; 905 if (cur_freq <= 0 || cur_freq != env->tsc_khz) { 906 warn_report("TSC frequency mismatch between " 907 "VM (%" PRId64 " kHz) and host (%d kHz), " 908 "and TSC scaling unavailable", 909 env->tsc_khz, cur_freq); 910 return r; 911 } 912 } 913 914 return 0; 915 } 916 917 static bool tsc_is_stable_and_known(CPUX86State *env) 918 { 919 if (!env->tsc_khz) { 920 return false; 921 } 922 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) 923 || env->user_tsc_khz; 924 } 925 926 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1) 927 928 static struct { 929 const char *desc; 930 struct { 931 uint32_t func; 932 int reg; 933 uint32_t bits; 934 } flags[2]; 935 uint64_t dependencies; 936 } kvm_hyperv_properties[] = { 937 [HYPERV_FEAT_RELAXED] = { 938 .desc = "relaxed timing (hv-relaxed)", 939 .flags = { 940 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 941 .bits = HV_RELAXED_TIMING_RECOMMENDED} 942 } 943 }, 944 [HYPERV_FEAT_VAPIC] = { 945 .desc = "virtual APIC (hv-vapic)", 946 .flags = { 947 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 948 .bits = HV_APIC_ACCESS_AVAILABLE} 949 } 950 }, 951 [HYPERV_FEAT_TIME] = { 952 .desc = "clocksources (hv-time)", 953 .flags = { 954 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 955 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE} 956 } 957 }, 958 [HYPERV_FEAT_CRASH] = { 959 .desc = "crash MSRs (hv-crash)", 960 .flags = { 961 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 962 .bits = HV_GUEST_CRASH_MSR_AVAILABLE} 963 } 964 }, 965 [HYPERV_FEAT_RESET] = { 966 .desc = "reset MSR (hv-reset)", 967 .flags = { 968 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 969 .bits = HV_RESET_AVAILABLE} 970 } 971 }, 972 [HYPERV_FEAT_VPINDEX] = { 973 .desc = "VP_INDEX MSR (hv-vpindex)", 974 .flags = { 975 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 976 .bits = HV_VP_INDEX_AVAILABLE} 977 } 978 }, 979 [HYPERV_FEAT_RUNTIME] = { 980 .desc = "VP_RUNTIME MSR (hv-runtime)", 981 .flags = { 982 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 983 .bits = HV_VP_RUNTIME_AVAILABLE} 984 } 985 }, 986 [HYPERV_FEAT_SYNIC] = { 987 .desc = "synthetic interrupt controller (hv-synic)", 988 .flags = { 989 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 990 .bits = HV_SYNIC_AVAILABLE} 991 } 992 }, 993 [HYPERV_FEAT_STIMER] = { 994 .desc = "synthetic timers (hv-stimer)", 995 .flags = { 996 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 997 .bits = HV_SYNTIMERS_AVAILABLE} 998 }, 999 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) 1000 }, 1001 [HYPERV_FEAT_FREQUENCIES] = { 1002 .desc = "frequency MSRs (hv-frequencies)", 1003 .flags = { 1004 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 1005 .bits = HV_ACCESS_FREQUENCY_MSRS}, 1006 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1007 .bits = HV_FREQUENCY_MSRS_AVAILABLE} 1008 } 1009 }, 1010 [HYPERV_FEAT_REENLIGHTENMENT] = { 1011 .desc = "reenlightenment MSRs (hv-reenlightenment)", 1012 .flags = { 1013 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 1014 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} 1015 } 1016 }, 1017 [HYPERV_FEAT_TLBFLUSH] = { 1018 .desc = "paravirtualized TLB flush (hv-tlbflush)", 1019 .flags = { 1020 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 1021 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | 1022 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 1023 }, 1024 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 1025 }, 1026 [HYPERV_FEAT_EVMCS] = { 1027 .desc = "enlightened VMCS (hv-evmcs)", 1028 .flags = { 1029 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 1030 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} 1031 }, 1032 .dependencies = BIT(HYPERV_FEAT_VAPIC) 1033 }, 1034 [HYPERV_FEAT_IPI] = { 1035 .desc = "paravirtualized IPI (hv-ipi)", 1036 .flags = { 1037 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 1038 .bits = HV_CLUSTER_IPI_RECOMMENDED | 1039 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 1040 }, 1041 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 1042 }, 1043 [HYPERV_FEAT_STIMER_DIRECT] = { 1044 .desc = "direct mode synthetic timers (hv-stimer-direct)", 1045 .flags = { 1046 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1047 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} 1048 }, 1049 .dependencies = BIT(HYPERV_FEAT_STIMER) 1050 }, 1051 [HYPERV_FEAT_AVIC] = { 1052 .desc = "AVIC/APICv support (hv-avic/hv-apicv)", 1053 .flags = { 1054 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 1055 .bits = HV_DEPRECATING_AEOI_RECOMMENDED} 1056 } 1057 }, 1058 #ifdef CONFIG_SYNDBG 1059 [HYPERV_FEAT_SYNDBG] = { 1060 .desc = "Enable synthetic kernel debugger channel (hv-syndbg)", 1061 .flags = { 1062 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1063 .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE} 1064 }, 1065 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED) 1066 }, 1067 #endif 1068 [HYPERV_FEAT_MSR_BITMAP] = { 1069 .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)", 1070 .flags = { 1071 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, 1072 .bits = HV_NESTED_MSR_BITMAP} 1073 } 1074 }, 1075 [HYPERV_FEAT_XMM_INPUT] = { 1076 .desc = "XMM fast hypercall input (hv-xmm-input)", 1077 .flags = { 1078 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1079 .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE} 1080 } 1081 }, 1082 [HYPERV_FEAT_TLBFLUSH_EXT] = { 1083 .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)", 1084 .flags = { 1085 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1086 .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE} 1087 }, 1088 .dependencies = BIT(HYPERV_FEAT_TLBFLUSH) 1089 }, 1090 [HYPERV_FEAT_TLBFLUSH_DIRECT] = { 1091 .desc = "direct TLB flush (hv-tlbflush-direct)", 1092 .flags = { 1093 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, 1094 .bits = HV_NESTED_DIRECT_FLUSH} 1095 }, 1096 .dependencies = BIT(HYPERV_FEAT_VAPIC) 1097 }, 1098 }; 1099 1100 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, 1101 bool do_sys_ioctl) 1102 { 1103 struct kvm_cpuid2 *cpuid; 1104 int r, size; 1105 1106 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 1107 cpuid = g_malloc0(size); 1108 cpuid->nent = max; 1109 1110 if (do_sys_ioctl) { 1111 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1112 } else { 1113 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1114 } 1115 if (r == 0 && cpuid->nent >= max) { 1116 r = -E2BIG; 1117 } 1118 if (r < 0) { 1119 if (r == -E2BIG) { 1120 g_free(cpuid); 1121 return NULL; 1122 } else { 1123 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n", 1124 strerror(-r)); 1125 exit(1); 1126 } 1127 } 1128 return cpuid; 1129 } 1130 1131 /* 1132 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough 1133 * for all entries. 1134 */ 1135 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) 1136 { 1137 struct kvm_cpuid2 *cpuid; 1138 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */ 1139 int max = 11; 1140 int i; 1141 bool do_sys_ioctl; 1142 1143 do_sys_ioctl = 1144 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0; 1145 1146 /* 1147 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is 1148 * unsupported, kvm_hyperv_expand_features() checks for that. 1149 */ 1150 assert(do_sys_ioctl || cs->kvm_state); 1151 1152 /* 1153 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with 1154 * -E2BIG, however, it doesn't report back the right size. Keep increasing 1155 * it and re-trying until we succeed. 1156 */ 1157 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) { 1158 max++; 1159 } 1160 1161 /* 1162 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before 1163 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the 1164 * information early, just check for the capability and set the bit 1165 * manually. 1166 */ 1167 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, 1168 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1169 for (i = 0; i < cpuid->nent; i++) { 1170 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { 1171 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1172 } 1173 } 1174 } 1175 1176 return cpuid; 1177 } 1178 1179 /* 1180 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature 1181 * leaves from KVM_CAP_HYPERV* and present MSRs data. 1182 */ 1183 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) 1184 { 1185 X86CPU *cpu = X86_CPU(cs); 1186 struct kvm_cpuid2 *cpuid; 1187 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm; 1188 1189 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */ 1190 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); 1191 cpuid->nent = 2; 1192 1193 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */ 1194 entry_feat = &cpuid->entries[0]; 1195 entry_feat->function = HV_CPUID_FEATURES; 1196 1197 entry_recomm = &cpuid->entries[1]; 1198 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; 1199 entry_recomm->ebx = cpu->hyperv_spinlock_attempts; 1200 1201 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { 1202 entry_feat->eax |= HV_HYPERCALL_AVAILABLE; 1203 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; 1204 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1205 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; 1206 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; 1207 } 1208 1209 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { 1210 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; 1211 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; 1212 } 1213 1214 if (has_msr_hv_frequencies) { 1215 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; 1216 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; 1217 } 1218 1219 if (has_msr_hv_crash) { 1220 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; 1221 } 1222 1223 if (has_msr_hv_reenlightenment) { 1224 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; 1225 } 1226 1227 if (has_msr_hv_reset) { 1228 entry_feat->eax |= HV_RESET_AVAILABLE; 1229 } 1230 1231 if (has_msr_hv_vpindex) { 1232 entry_feat->eax |= HV_VP_INDEX_AVAILABLE; 1233 } 1234 1235 if (has_msr_hv_runtime) { 1236 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; 1237 } 1238 1239 if (has_msr_hv_synic) { 1240 unsigned int cap = cpu->hyperv_synic_kvm_only ? 1241 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1242 1243 if (kvm_check_extension(cs->kvm_state, cap) > 0) { 1244 entry_feat->eax |= HV_SYNIC_AVAILABLE; 1245 } 1246 } 1247 1248 if (has_msr_hv_stimer) { 1249 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; 1250 } 1251 1252 if (has_msr_hv_syndbg_options) { 1253 entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE; 1254 entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; 1255 entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED; 1256 } 1257 1258 if (kvm_check_extension(cs->kvm_state, 1259 KVM_CAP_HYPERV_TLBFLUSH) > 0) { 1260 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; 1261 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1262 } 1263 1264 if (kvm_check_extension(cs->kvm_state, 1265 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1266 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1267 } 1268 1269 if (kvm_check_extension(cs->kvm_state, 1270 KVM_CAP_HYPERV_SEND_IPI) > 0) { 1271 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; 1272 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1273 } 1274 1275 return cpuid; 1276 } 1277 1278 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) 1279 { 1280 struct kvm_cpuid_entry2 *entry; 1281 struct kvm_cpuid2 *cpuid; 1282 1283 if (hv_cpuid_cache) { 1284 cpuid = hv_cpuid_cache; 1285 } else { 1286 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { 1287 cpuid = get_supported_hv_cpuid(cs); 1288 } else { 1289 /* 1290 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded 1291 * before KVM context is created but this is only done when 1292 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies 1293 * KVM_CAP_HYPERV_CPUID. 1294 */ 1295 assert(cs->kvm_state); 1296 1297 cpuid = get_supported_hv_cpuid_legacy(cs); 1298 } 1299 hv_cpuid_cache = cpuid; 1300 } 1301 1302 if (!cpuid) { 1303 return 0; 1304 } 1305 1306 entry = cpuid_find_entry(cpuid, func, 0); 1307 if (!entry) { 1308 return 0; 1309 } 1310 1311 return cpuid_entry_get_reg(entry, reg); 1312 } 1313 1314 static bool hyperv_feature_supported(CPUState *cs, int feature) 1315 { 1316 uint32_t func, bits; 1317 int i, reg; 1318 1319 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { 1320 1321 func = kvm_hyperv_properties[feature].flags[i].func; 1322 reg = kvm_hyperv_properties[feature].flags[i].reg; 1323 bits = kvm_hyperv_properties[feature].flags[i].bits; 1324 1325 if (!func) { 1326 continue; 1327 } 1328 1329 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) { 1330 return false; 1331 } 1332 } 1333 1334 return true; 1335 } 1336 1337 /* Checks that all feature dependencies are enabled */ 1338 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp) 1339 { 1340 uint64_t deps; 1341 int dep_feat; 1342 1343 deps = kvm_hyperv_properties[feature].dependencies; 1344 while (deps) { 1345 dep_feat = ctz64(deps); 1346 if (!(hyperv_feat_enabled(cpu, dep_feat))) { 1347 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1348 kvm_hyperv_properties[feature].desc, 1349 kvm_hyperv_properties[dep_feat].desc); 1350 return false; 1351 } 1352 deps &= ~(1ull << dep_feat); 1353 } 1354 1355 return true; 1356 } 1357 1358 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) 1359 { 1360 X86CPU *cpu = X86_CPU(cs); 1361 uint32_t r = 0; 1362 int i, j; 1363 1364 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) { 1365 if (!hyperv_feat_enabled(cpu, i)) { 1366 continue; 1367 } 1368 1369 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) { 1370 if (kvm_hyperv_properties[i].flags[j].func != func) { 1371 continue; 1372 } 1373 if (kvm_hyperv_properties[i].flags[j].reg != reg) { 1374 continue; 1375 } 1376 1377 r |= kvm_hyperv_properties[i].flags[j].bits; 1378 } 1379 } 1380 1381 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */ 1382 if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) { 1383 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1384 r |= DEFAULT_EVMCS_VERSION; 1385 } 1386 } 1387 1388 return r; 1389 } 1390 1391 /* 1392 * Expand Hyper-V CPU features. In partucular, check that all the requested 1393 * features are supported by the host and the sanity of the configuration 1394 * (that all the required dependencies are included). Also, this takes care 1395 * of 'hv_passthrough' mode and fills the environment with all supported 1396 * Hyper-V features. 1397 */ 1398 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) 1399 { 1400 CPUState *cs = CPU(cpu); 1401 Error *local_err = NULL; 1402 int feat; 1403 1404 if (!hyperv_enabled(cpu)) 1405 return true; 1406 1407 /* 1408 * When kvm_hyperv_expand_features is called at CPU feature expansion 1409 * time per-CPU kvm_state is not available yet so we can only proceed 1410 * when KVM_CAP_SYS_HYPERV_CPUID is supported. 1411 */ 1412 if (!cs->kvm_state && 1413 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID)) 1414 return true; 1415 1416 if (cpu->hyperv_passthrough) { 1417 cpu->hyperv_vendor_id[0] = 1418 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); 1419 cpu->hyperv_vendor_id[1] = 1420 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); 1421 cpu->hyperv_vendor_id[2] = 1422 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); 1423 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, 1424 sizeof(cpu->hyperv_vendor_id) + 1); 1425 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, 1426 sizeof(cpu->hyperv_vendor_id)); 1427 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; 1428 1429 cpu->hyperv_interface_id[0] = 1430 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); 1431 cpu->hyperv_interface_id[1] = 1432 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); 1433 cpu->hyperv_interface_id[2] = 1434 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); 1435 cpu->hyperv_interface_id[3] = 1436 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); 1437 1438 cpu->hyperv_ver_id_build = 1439 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); 1440 cpu->hyperv_ver_id_major = 1441 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16; 1442 cpu->hyperv_ver_id_minor = 1443 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff; 1444 cpu->hyperv_ver_id_sp = 1445 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); 1446 cpu->hyperv_ver_id_sb = 1447 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24; 1448 cpu->hyperv_ver_id_sn = 1449 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff; 1450 1451 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, 1452 R_EAX); 1453 cpu->hyperv_limits[0] = 1454 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); 1455 cpu->hyperv_limits[1] = 1456 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); 1457 cpu->hyperv_limits[2] = 1458 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); 1459 1460 cpu->hyperv_spinlock_attempts = 1461 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); 1462 1463 /* 1464 * Mark feature as enabled in 'cpu->hyperv_features' as 1465 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs. 1466 */ 1467 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1468 if (hyperv_feature_supported(cs, feat)) { 1469 cpu->hyperv_features |= BIT(feat); 1470 } 1471 } 1472 } else { 1473 /* Check features availability and dependencies */ 1474 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1475 /* If the feature was not requested skip it. */ 1476 if (!hyperv_feat_enabled(cpu, feat)) { 1477 continue; 1478 } 1479 1480 /* Check if the feature is supported by KVM */ 1481 if (!hyperv_feature_supported(cs, feat)) { 1482 error_setg(errp, "Hyper-V %s is not supported by kernel", 1483 kvm_hyperv_properties[feat].desc); 1484 return false; 1485 } 1486 1487 /* Check dependencies */ 1488 if (!hv_feature_check_deps(cpu, feat, &local_err)) { 1489 error_propagate(errp, local_err); 1490 return false; 1491 } 1492 } 1493 } 1494 1495 /* Additional dependencies not covered by kvm_hyperv_properties[] */ 1496 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1497 !cpu->hyperv_synic_kvm_only && 1498 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { 1499 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1500 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, 1501 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); 1502 return false; 1503 } 1504 1505 return true; 1506 } 1507 1508 /* 1509 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent. 1510 */ 1511 static int hyperv_fill_cpuids(CPUState *cs, 1512 struct kvm_cpuid_entry2 *cpuid_ent) 1513 { 1514 X86CPU *cpu = X86_CPU(cs); 1515 struct kvm_cpuid_entry2 *c; 1516 uint32_t signature[3]; 1517 uint32_t cpuid_i = 0, max_cpuid_leaf = 0; 1518 uint32_t nested_eax = 1519 hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX); 1520 1521 max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES : 1522 HV_CPUID_IMPLEMENT_LIMITS; 1523 1524 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { 1525 max_cpuid_leaf = 1526 MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); 1527 } 1528 1529 c = &cpuid_ent[cpuid_i++]; 1530 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; 1531 c->eax = max_cpuid_leaf; 1532 c->ebx = cpu->hyperv_vendor_id[0]; 1533 c->ecx = cpu->hyperv_vendor_id[1]; 1534 c->edx = cpu->hyperv_vendor_id[2]; 1535 1536 c = &cpuid_ent[cpuid_i++]; 1537 c->function = HV_CPUID_INTERFACE; 1538 c->eax = cpu->hyperv_interface_id[0]; 1539 c->ebx = cpu->hyperv_interface_id[1]; 1540 c->ecx = cpu->hyperv_interface_id[2]; 1541 c->edx = cpu->hyperv_interface_id[3]; 1542 1543 c = &cpuid_ent[cpuid_i++]; 1544 c->function = HV_CPUID_VERSION; 1545 c->eax = cpu->hyperv_ver_id_build; 1546 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | 1547 cpu->hyperv_ver_id_minor; 1548 c->ecx = cpu->hyperv_ver_id_sp; 1549 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | 1550 (cpu->hyperv_ver_id_sn & 0xffffff); 1551 1552 c = &cpuid_ent[cpuid_i++]; 1553 c->function = HV_CPUID_FEATURES; 1554 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); 1555 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); 1556 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); 1557 1558 /* Unconditionally required with any Hyper-V enlightenment */ 1559 c->eax |= HV_HYPERCALL_AVAILABLE; 1560 1561 /* SynIC and Vmbus devices require messages/signals hypercalls */ 1562 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1563 !cpu->hyperv_synic_kvm_only) { 1564 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; 1565 } 1566 1567 1568 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ 1569 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1570 1571 c = &cpuid_ent[cpuid_i++]; 1572 c->function = HV_CPUID_ENLIGHTMENT_INFO; 1573 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); 1574 c->ebx = cpu->hyperv_spinlock_attempts; 1575 1576 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && 1577 !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) { 1578 c->eax |= HV_APIC_ACCESS_RECOMMENDED; 1579 } 1580 1581 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { 1582 c->eax |= HV_NO_NONARCH_CORESHARING; 1583 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { 1584 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & 1585 HV_NO_NONARCH_CORESHARING; 1586 } 1587 1588 c = &cpuid_ent[cpuid_i++]; 1589 c->function = HV_CPUID_IMPLEMENT_LIMITS; 1590 c->eax = cpu->hv_max_vps; 1591 c->ebx = cpu->hyperv_limits[0]; 1592 c->ecx = cpu->hyperv_limits[1]; 1593 c->edx = cpu->hyperv_limits[2]; 1594 1595 if (nested_eax) { 1596 uint32_t function; 1597 1598 /* Create zeroed 0x40000006..0x40000009 leaves */ 1599 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; 1600 function < HV_CPUID_NESTED_FEATURES; function++) { 1601 c = &cpuid_ent[cpuid_i++]; 1602 c->function = function; 1603 } 1604 1605 c = &cpuid_ent[cpuid_i++]; 1606 c->function = HV_CPUID_NESTED_FEATURES; 1607 c->eax = nested_eax; 1608 } 1609 1610 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { 1611 c = &cpuid_ent[cpuid_i++]; 1612 c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS; 1613 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? 1614 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; 1615 memcpy(signature, "Microsoft VS", 12); 1616 c->eax = 0; 1617 c->ebx = signature[0]; 1618 c->ecx = signature[1]; 1619 c->edx = signature[2]; 1620 1621 c = &cpuid_ent[cpuid_i++]; 1622 c->function = HV_CPUID_SYNDBG_INTERFACE; 1623 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); 1624 c->eax = signature[0]; 1625 c->ebx = 0; 1626 c->ecx = 0; 1627 c->edx = 0; 1628 1629 c = &cpuid_ent[cpuid_i++]; 1630 c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; 1631 c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; 1632 c->ebx = 0; 1633 c->ecx = 0; 1634 c->edx = 0; 1635 } 1636 1637 return cpuid_i; 1638 } 1639 1640 static Error *hv_passthrough_mig_blocker; 1641 static Error *hv_no_nonarch_cs_mig_blocker; 1642 1643 /* Checks that the exposed eVMCS version range is supported by KVM */ 1644 static bool evmcs_version_supported(uint16_t evmcs_version, 1645 uint16_t supported_evmcs_version) 1646 { 1647 uint8_t min_version = evmcs_version & 0xff; 1648 uint8_t max_version = evmcs_version >> 8; 1649 uint8_t min_supported_version = supported_evmcs_version & 0xff; 1650 uint8_t max_supported_version = supported_evmcs_version >> 8; 1651 1652 return (min_version >= min_supported_version) && 1653 (max_version <= max_supported_version); 1654 } 1655 1656 static int hyperv_init_vcpu(X86CPU *cpu) 1657 { 1658 CPUState *cs = CPU(cpu); 1659 Error *local_err = NULL; 1660 int ret; 1661 1662 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { 1663 error_setg(&hv_passthrough_mig_blocker, 1664 "'hv-passthrough' CPU flag prevents migration, use explicit" 1665 " set of hv-* flags instead"); 1666 ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err); 1667 if (ret < 0) { 1668 error_report_err(local_err); 1669 return ret; 1670 } 1671 } 1672 1673 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && 1674 hv_no_nonarch_cs_mig_blocker == NULL) { 1675 error_setg(&hv_no_nonarch_cs_mig_blocker, 1676 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" 1677 " use explicit 'hv-no-nonarch-coresharing=on' instead (but" 1678 " make sure SMT is disabled and/or that vCPUs are properly" 1679 " pinned)"); 1680 ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err); 1681 if (ret < 0) { 1682 error_report_err(local_err); 1683 return ret; 1684 } 1685 } 1686 1687 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { 1688 /* 1689 * the kernel doesn't support setting vp_index; assert that its value 1690 * is in sync 1691 */ 1692 uint64_t value; 1693 1694 ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value); 1695 if (ret < 0) { 1696 return ret; 1697 } 1698 1699 if (value != hyperv_vp_index(CPU(cpu))) { 1700 error_report("kernel's vp_index != QEMU's vp_index"); 1701 return -ENXIO; 1702 } 1703 } 1704 1705 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 1706 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? 1707 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1708 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); 1709 if (ret < 0) { 1710 error_report("failed to turn on HyperV SynIC in KVM: %s", 1711 strerror(-ret)); 1712 return ret; 1713 } 1714 1715 if (!cpu->hyperv_synic_kvm_only) { 1716 ret = hyperv_x86_synic_add(cpu); 1717 if (ret < 0) { 1718 error_report("failed to create HyperV SynIC: %s", 1719 strerror(-ret)); 1720 return ret; 1721 } 1722 } 1723 } 1724 1725 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1726 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION; 1727 uint16_t supported_evmcs_version; 1728 1729 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, 1730 (uintptr_t)&supported_evmcs_version); 1731 1732 /* 1733 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs' 1734 * option sets. Note: we hardcode the maximum supported eVMCS version 1735 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if) 1736 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have 1737 * to be added. 1738 */ 1739 if (ret < 0) { 1740 error_report("Hyper-V %s is not supported by kernel", 1741 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); 1742 return ret; 1743 } 1744 1745 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) { 1746 error_report("eVMCS version range [%d..%d] is not supported by " 1747 "kernel (supported: [%d..%d])", evmcs_version & 0xff, 1748 evmcs_version >> 8, supported_evmcs_version & 0xff, 1749 supported_evmcs_version >> 8); 1750 return -ENOTSUP; 1751 } 1752 } 1753 1754 if (cpu->hyperv_enforce_cpuid) { 1755 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1); 1756 if (ret < 0) { 1757 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s", 1758 strerror(-ret)); 1759 return ret; 1760 } 1761 } 1762 1763 /* Skip SynIC and VP_INDEX since they are hard deps already */ 1764 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_STIMER) && 1765 hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && 1766 hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) { 1767 hyperv_x86_set_vmbus_recommended_features_enabled(); 1768 } 1769 1770 return 0; 1771 } 1772 1773 static Error *invtsc_mig_blocker; 1774 1775 #define KVM_MAX_CPUID_ENTRIES 100 1776 1777 static void kvm_init_xsave(CPUX86State *env) 1778 { 1779 if (has_xsave2) { 1780 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); 1781 } else { 1782 env->xsave_buf_len = sizeof(struct kvm_xsave); 1783 } 1784 1785 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); 1786 memset(env->xsave_buf, 0, env->xsave_buf_len); 1787 /* 1788 * The allocated storage must be large enough for all of the 1789 * possible XSAVE state components. 1790 */ 1791 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= 1792 env->xsave_buf_len); 1793 } 1794 1795 static void kvm_init_nested_state(CPUX86State *env) 1796 { 1797 struct kvm_vmx_nested_state_hdr *vmx_hdr; 1798 uint32_t size; 1799 1800 if (!env->nested_state) { 1801 return; 1802 } 1803 1804 size = env->nested_state->size; 1805 1806 memset(env->nested_state, 0, size); 1807 env->nested_state->size = size; 1808 1809 if (cpu_has_vmx(env)) { 1810 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; 1811 vmx_hdr = &env->nested_state->hdr.vmx; 1812 vmx_hdr->vmxon_pa = -1ull; 1813 vmx_hdr->vmcs12_pa = -1ull; 1814 } else if (cpu_has_svm(env)) { 1815 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; 1816 } 1817 } 1818 1819 static uint32_t kvm_x86_build_cpuid(CPUX86State *env, 1820 struct kvm_cpuid_entry2 *entries, 1821 uint32_t cpuid_i) 1822 { 1823 uint32_t limit, i, j; 1824 uint32_t unused; 1825 struct kvm_cpuid_entry2 *c; 1826 1827 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); 1828 1829 for (i = 0; i <= limit; i++) { 1830 j = 0; 1831 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1832 goto full; 1833 } 1834 c = &entries[cpuid_i++]; 1835 switch (i) { 1836 case 2: { 1837 /* Keep reading function 2 till all the input is received */ 1838 int times; 1839 1840 c->function = i; 1841 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | 1842 KVM_CPUID_FLAG_STATE_READ_NEXT; 1843 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1844 times = c->eax & 0xff; 1845 1846 for (j = 1; j < times; ++j) { 1847 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1848 goto full; 1849 } 1850 c = &entries[cpuid_i++]; 1851 c->function = i; 1852 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; 1853 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1854 } 1855 break; 1856 } 1857 case 0x1f: 1858 if (!x86_has_extended_topo(env->avail_cpu_topo)) { 1859 cpuid_i--; 1860 break; 1861 } 1862 /* fallthrough */ 1863 case 4: 1864 case 0xb: 1865 case 0xd: 1866 for (j = 0; ; j++) { 1867 if (i == 0xd && j == 64) { 1868 break; 1869 } 1870 1871 c->function = i; 1872 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1873 c->index = j; 1874 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1875 1876 if (i == 4 && c->eax == 0) { 1877 break; 1878 } 1879 if (i == 0xb && !(c->ecx & 0xff00)) { 1880 break; 1881 } 1882 if (i == 0x1f && !(c->ecx & 0xff00)) { 1883 break; 1884 } 1885 if (i == 0xd && c->eax == 0) { 1886 continue; 1887 } 1888 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1889 goto full; 1890 } 1891 c = &entries[cpuid_i++]; 1892 } 1893 break; 1894 case 0x12: 1895 for (j = 0; ; j++) { 1896 c->function = i; 1897 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1898 c->index = j; 1899 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1900 1901 if (j > 1 && (c->eax & 0xf) != 1) { 1902 break; 1903 } 1904 1905 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1906 goto full; 1907 } 1908 c = &entries[cpuid_i++]; 1909 } 1910 break; 1911 case 0x7: 1912 case 0x14: 1913 case 0x1d: 1914 case 0x1e: { 1915 uint32_t times; 1916 1917 c->function = i; 1918 c->index = 0; 1919 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1920 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1921 times = c->eax; 1922 1923 for (j = 1; j <= times; ++j) { 1924 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1925 goto full; 1926 } 1927 c = &entries[cpuid_i++]; 1928 c->function = i; 1929 c->index = j; 1930 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1931 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1932 } 1933 break; 1934 } 1935 default: 1936 c->function = i; 1937 c->flags = 0; 1938 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1939 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1940 /* 1941 * KVM already returns all zeroes if a CPUID entry is missing, 1942 * so we can omit it and avoid hitting KVM's 80-entry limit. 1943 */ 1944 cpuid_i--; 1945 } 1946 break; 1947 } 1948 } 1949 1950 if (limit >= 0x0a) { 1951 uint32_t eax, edx; 1952 1953 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); 1954 1955 has_architectural_pmu_version = eax & 0xff; 1956 if (has_architectural_pmu_version > 0) { 1957 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; 1958 1959 /* Shouldn't be more than 32, since that's the number of bits 1960 * available in EBX to tell us _which_ counters are available. 1961 * Play it safe. 1962 */ 1963 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { 1964 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; 1965 } 1966 1967 if (has_architectural_pmu_version > 1) { 1968 num_architectural_pmu_fixed_counters = edx & 0x1f; 1969 1970 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { 1971 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; 1972 } 1973 } 1974 } 1975 } 1976 1977 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); 1978 1979 for (i = 0x80000000; i <= limit; i++) { 1980 j = 0; 1981 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1982 goto full; 1983 } 1984 c = &entries[cpuid_i++]; 1985 1986 switch (i) { 1987 case 0x8000001d: 1988 /* Query for all AMD cache information leaves */ 1989 for (j = 0; ; j++) { 1990 c->function = i; 1991 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1992 c->index = j; 1993 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1994 1995 if (c->eax == 0) { 1996 break; 1997 } 1998 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1999 goto full; 2000 } 2001 c = &entries[cpuid_i++]; 2002 } 2003 break; 2004 default: 2005 c->function = i; 2006 c->flags = 0; 2007 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 2008 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 2009 /* 2010 * KVM already returns all zeroes if a CPUID entry is missing, 2011 * so we can omit it and avoid hitting KVM's 80-entry limit. 2012 */ 2013 cpuid_i--; 2014 } 2015 break; 2016 } 2017 } 2018 2019 /* Call Centaur's CPUID instructions they are supported. */ 2020 if (env->cpuid_xlevel2 > 0) { 2021 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); 2022 2023 for (i = 0xC0000000; i <= limit; i++) { 2024 j = 0; 2025 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 2026 goto full; 2027 } 2028 c = &entries[cpuid_i++]; 2029 2030 c->function = i; 2031 c->flags = 0; 2032 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 2033 } 2034 } 2035 2036 return cpuid_i; 2037 2038 full: 2039 fprintf(stderr, "cpuid_data is full, no space for " 2040 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 2041 abort(); 2042 } 2043 2044 int kvm_arch_init_vcpu(CPUState *cs) 2045 { 2046 struct { 2047 struct kvm_cpuid2 cpuid; 2048 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 2049 } cpuid_data; 2050 /* 2051 * The kernel defines these structs with padding fields so there 2052 * should be no extra padding in our cpuid_data struct. 2053 */ 2054 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != 2055 sizeof(struct kvm_cpuid2) + 2056 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 2057 2058 X86CPU *cpu = X86_CPU(cs); 2059 CPUX86State *env = &cpu->env; 2060 uint32_t cpuid_i; 2061 struct kvm_cpuid_entry2 *c; 2062 uint32_t signature[3]; 2063 int kvm_base = KVM_CPUID_SIGNATURE; 2064 int max_nested_state_len; 2065 int r; 2066 Error *local_err = NULL; 2067 2068 memset(&cpuid_data, 0, sizeof(cpuid_data)); 2069 2070 cpuid_i = 0; 2071 2072 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); 2073 2074 r = kvm_arch_set_tsc_khz(cs); 2075 if (r < 0) { 2076 return r; 2077 } 2078 2079 /* vcpu's TSC frequency is either specified by user, or following 2080 * the value used by KVM if the former is not present. In the 2081 * latter case, we query it from KVM and record in env->tsc_khz, 2082 * so that vcpu's TSC frequency can be migrated later via this field. 2083 */ 2084 if (!env->tsc_khz) { 2085 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 2086 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 2087 -ENOTSUP; 2088 if (r > 0) { 2089 env->tsc_khz = r; 2090 } 2091 } 2092 2093 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; 2094 2095 /* 2096 * kvm_hyperv_expand_features() is called here for the second time in case 2097 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle 2098 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to 2099 * check which Hyper-V enlightenments are supported and which are not, we 2100 * can still proceed and check/expand Hyper-V enlightenments here so legacy 2101 * behavior is preserved. 2102 */ 2103 if (!kvm_hyperv_expand_features(cpu, &local_err)) { 2104 error_report_err(local_err); 2105 return -ENOSYS; 2106 } 2107 2108 if (hyperv_enabled(cpu)) { 2109 r = hyperv_init_vcpu(cpu); 2110 if (r) { 2111 return r; 2112 } 2113 2114 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); 2115 kvm_base = KVM_CPUID_SIGNATURE_NEXT; 2116 has_msr_hv_hypercall = true; 2117 } 2118 2119 if (cs->kvm_state->xen_version) { 2120 #ifdef CONFIG_XEN_EMU 2121 struct kvm_cpuid_entry2 *xen_max_leaf; 2122 2123 memcpy(signature, "XenVMMXenVMM", 12); 2124 2125 xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; 2126 c->function = kvm_base + XEN_CPUID_SIGNATURE; 2127 c->eax = kvm_base + XEN_CPUID_TIME; 2128 c->ebx = signature[0]; 2129 c->ecx = signature[1]; 2130 c->edx = signature[2]; 2131 2132 c = &cpuid_data.entries[cpuid_i++]; 2133 c->function = kvm_base + XEN_CPUID_VENDOR; 2134 c->eax = cs->kvm_state->xen_version; 2135 c->ebx = 0; 2136 c->ecx = 0; 2137 c->edx = 0; 2138 2139 c = &cpuid_data.entries[cpuid_i++]; 2140 c->function = kvm_base + XEN_CPUID_HVM_MSR; 2141 /* Number of hypercall-transfer pages */ 2142 c->eax = 1; 2143 /* Hypercall MSR base address */ 2144 if (hyperv_enabled(cpu)) { 2145 c->ebx = XEN_HYPERCALL_MSR_HYPERV; 2146 kvm_xen_init(cs->kvm_state, c->ebx); 2147 } else { 2148 c->ebx = XEN_HYPERCALL_MSR; 2149 } 2150 c->ecx = 0; 2151 c->edx = 0; 2152 2153 c = &cpuid_data.entries[cpuid_i++]; 2154 c->function = kvm_base + XEN_CPUID_TIME; 2155 c->eax = ((!!tsc_is_stable_and_known(env) << 1) | 2156 (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); 2157 /* default=0 (emulate if necessary) */ 2158 c->ebx = 0; 2159 /* guest tsc frequency */ 2160 c->ecx = env->user_tsc_khz; 2161 /* guest tsc incarnation (migration count) */ 2162 c->edx = 0; 2163 2164 c = &cpuid_data.entries[cpuid_i++]; 2165 c->function = kvm_base + XEN_CPUID_HVM; 2166 xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; 2167 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { 2168 c->function = kvm_base + XEN_CPUID_HVM; 2169 2170 if (cpu->xen_vapic) { 2171 c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; 2172 c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; 2173 } 2174 2175 c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; 2176 2177 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { 2178 c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; 2179 c->ebx = cs->cpu_index; 2180 } 2181 2182 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { 2183 c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; 2184 } 2185 } 2186 2187 r = kvm_xen_init_vcpu(cs); 2188 if (r) { 2189 return r; 2190 } 2191 2192 kvm_base += 0x100; 2193 #else /* CONFIG_XEN_EMU */ 2194 /* This should never happen as kvm_arch_init() would have died first. */ 2195 fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); 2196 abort(); 2197 #endif 2198 } else if (cpu->expose_kvm) { 2199 memcpy(signature, "KVMKVMKVM\0\0\0", 12); 2200 c = &cpuid_data.entries[cpuid_i++]; 2201 c->function = KVM_CPUID_SIGNATURE | kvm_base; 2202 c->eax = KVM_CPUID_FEATURES | kvm_base; 2203 c->ebx = signature[0]; 2204 c->ecx = signature[1]; 2205 c->edx = signature[2]; 2206 2207 c = &cpuid_data.entries[cpuid_i++]; 2208 c->function = KVM_CPUID_FEATURES | kvm_base; 2209 c->eax = env->features[FEAT_KVM]; 2210 c->edx = env->features[FEAT_KVM_HINTS]; 2211 } 2212 2213 if (cpu->kvm_pv_enforce_cpuid) { 2214 r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); 2215 if (r < 0) { 2216 fprintf(stderr, 2217 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", 2218 strerror(-r)); 2219 abort(); 2220 } 2221 } 2222 2223 cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i); 2224 cpuid_data.cpuid.nent = cpuid_i; 2225 2226 if (((env->cpuid_version >> 8)&0xF) >= 6 2227 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 2228 (CPUID_MCE | CPUID_MCA)) { 2229 uint64_t mcg_cap, unsupported_caps; 2230 int banks; 2231 int ret; 2232 2233 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); 2234 if (ret < 0) { 2235 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); 2236 return ret; 2237 } 2238 2239 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { 2240 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)", 2241 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); 2242 return -ENOTSUP; 2243 } 2244 2245 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); 2246 if (unsupported_caps) { 2247 if (unsupported_caps & MCG_LMCE_P) { 2248 error_report("kvm: LMCE not supported"); 2249 return -ENOTSUP; 2250 } 2251 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64, 2252 unsupported_caps); 2253 } 2254 2255 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; 2256 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); 2257 if (ret < 0) { 2258 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); 2259 return ret; 2260 } 2261 } 2262 2263 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env); 2264 2265 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); 2266 if (c) { 2267 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || 2268 !!(c->ecx & CPUID_EXT_SMX); 2269 } 2270 2271 c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0); 2272 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { 2273 has_msr_feature_control = true; 2274 } 2275 2276 if (env->mcg_cap & MCG_LMCE_P) { 2277 has_msr_mcg_ext_ctl = has_msr_feature_control = true; 2278 } 2279 2280 if (!env->user_tsc_khz) { 2281 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && 2282 invtsc_mig_blocker == NULL) { 2283 error_setg(&invtsc_mig_blocker, 2284 "State blocked by non-migratable CPU device" 2285 " (invtsc flag)"); 2286 r = migrate_add_blocker(&invtsc_mig_blocker, &local_err); 2287 if (r < 0) { 2288 error_report_err(local_err); 2289 return r; 2290 } 2291 } 2292 } 2293 2294 if (cpu->vmware_cpuid_freq 2295 /* Guests depend on 0x40000000 to detect this feature, so only expose 2296 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ 2297 && cpu->expose_kvm 2298 && kvm_base == KVM_CPUID_SIGNATURE 2299 /* TSC clock must be stable and known for this feature. */ 2300 && tsc_is_stable_and_known(env)) { 2301 2302 c = &cpuid_data.entries[cpuid_i++]; 2303 c->function = KVM_CPUID_SIGNATURE | 0x10; 2304 c->eax = env->tsc_khz; 2305 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ 2306 c->ecx = c->edx = 0; 2307 2308 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0); 2309 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); 2310 } 2311 2312 cpuid_data.cpuid.nent = cpuid_i; 2313 2314 cpuid_data.cpuid.padding = 0; 2315 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); 2316 if (r) { 2317 goto fail; 2318 } 2319 kvm_init_xsave(env); 2320 2321 max_nested_state_len = kvm_max_nested_state_length(); 2322 if (max_nested_state_len > 0) { 2323 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); 2324 2325 if (cpu_has_vmx(env) || cpu_has_svm(env)) { 2326 env->nested_state = g_malloc0(max_nested_state_len); 2327 env->nested_state->size = max_nested_state_len; 2328 2329 kvm_init_nested_state(env); 2330 } 2331 } 2332 2333 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); 2334 2335 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { 2336 has_msr_tsc_aux = false; 2337 } 2338 2339 kvm_init_msrs(cpu); 2340 2341 return 0; 2342 2343 fail: 2344 migrate_del_blocker(&invtsc_mig_blocker); 2345 2346 return r; 2347 } 2348 2349 int kvm_arch_destroy_vcpu(CPUState *cs) 2350 { 2351 X86CPU *cpu = X86_CPU(cs); 2352 CPUX86State *env = &cpu->env; 2353 2354 g_free(env->xsave_buf); 2355 2356 g_free(cpu->kvm_msr_buf); 2357 cpu->kvm_msr_buf = NULL; 2358 2359 g_free(env->nested_state); 2360 env->nested_state = NULL; 2361 2362 qemu_del_vm_change_state_handler(cpu->vmsentry); 2363 2364 return 0; 2365 } 2366 2367 void kvm_arch_reset_vcpu(X86CPU *cpu) 2368 { 2369 CPUX86State *env = &cpu->env; 2370 2371 env->xcr0 = 1; 2372 if (kvm_irqchip_in_kernel()) { 2373 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : 2374 KVM_MP_STATE_UNINITIALIZED; 2375 } else { 2376 env->mp_state = KVM_MP_STATE_RUNNABLE; 2377 } 2378 2379 /* enabled by default */ 2380 env->poll_control_msr = 1; 2381 2382 kvm_init_nested_state(env); 2383 2384 sev_es_set_reset_vector(CPU(cpu)); 2385 } 2386 2387 void kvm_arch_after_reset_vcpu(X86CPU *cpu) 2388 { 2389 CPUX86State *env = &cpu->env; 2390 int i; 2391 2392 /* 2393 * Reset SynIC after all other devices have been reset to let them remove 2394 * their SINT routes first. 2395 */ 2396 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 2397 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 2398 env->msr_hv_synic_sint[i] = HV_SINT_MASKED; 2399 } 2400 2401 hyperv_x86_synic_reset(cpu); 2402 } 2403 } 2404 2405 void kvm_arch_do_init_vcpu(X86CPU *cpu) 2406 { 2407 CPUX86State *env = &cpu->env; 2408 2409 /* APs get directly into wait-for-SIPI state. */ 2410 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { 2411 env->mp_state = KVM_MP_STATE_INIT_RECEIVED; 2412 } 2413 } 2414 2415 static int kvm_get_supported_feature_msrs(KVMState *s) 2416 { 2417 int ret = 0; 2418 2419 if (kvm_feature_msrs != NULL) { 2420 return 0; 2421 } 2422 2423 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) { 2424 return 0; 2425 } 2426 2427 struct kvm_msr_list msr_list; 2428 2429 msr_list.nmsrs = 0; 2430 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list); 2431 if (ret < 0 && ret != -E2BIG) { 2432 error_report("Fetch KVM feature MSR list failed: %s", 2433 strerror(-ret)); 2434 return ret; 2435 } 2436 2437 assert(msr_list.nmsrs > 0); 2438 kvm_feature_msrs = g_malloc0(sizeof(msr_list) + 2439 msr_list.nmsrs * sizeof(msr_list.indices[0])); 2440 2441 kvm_feature_msrs->nmsrs = msr_list.nmsrs; 2442 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs); 2443 2444 if (ret < 0) { 2445 error_report("Fetch KVM feature MSR list failed: %s", 2446 strerror(-ret)); 2447 g_free(kvm_feature_msrs); 2448 kvm_feature_msrs = NULL; 2449 return ret; 2450 } 2451 2452 return 0; 2453 } 2454 2455 static int kvm_get_supported_msrs(KVMState *s) 2456 { 2457 int ret = 0; 2458 struct kvm_msr_list msr_list, *kvm_msr_list; 2459 2460 /* 2461 * Obtain MSR list from KVM. These are the MSRs that we must 2462 * save/restore. 2463 */ 2464 msr_list.nmsrs = 0; 2465 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); 2466 if (ret < 0 && ret != -E2BIG) { 2467 return ret; 2468 } 2469 /* 2470 * Old kernel modules had a bug and could write beyond the provided 2471 * memory. Allocate at least a safe amount of 1K. 2472 */ 2473 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + 2474 msr_list.nmsrs * 2475 sizeof(msr_list.indices[0]))); 2476 2477 kvm_msr_list->nmsrs = msr_list.nmsrs; 2478 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); 2479 if (ret >= 0) { 2480 int i; 2481 2482 for (i = 0; i < kvm_msr_list->nmsrs; i++) { 2483 switch (kvm_msr_list->indices[i]) { 2484 case MSR_STAR: 2485 has_msr_star = true; 2486 break; 2487 case MSR_VM_HSAVE_PA: 2488 has_msr_hsave_pa = true; 2489 break; 2490 case MSR_TSC_AUX: 2491 has_msr_tsc_aux = true; 2492 break; 2493 case MSR_TSC_ADJUST: 2494 has_msr_tsc_adjust = true; 2495 break; 2496 case MSR_IA32_TSCDEADLINE: 2497 has_msr_tsc_deadline = true; 2498 break; 2499 case MSR_IA32_SMBASE: 2500 has_msr_smbase = true; 2501 break; 2502 case MSR_SMI_COUNT: 2503 has_msr_smi_count = true; 2504 break; 2505 case MSR_IA32_MISC_ENABLE: 2506 has_msr_misc_enable = true; 2507 break; 2508 case MSR_IA32_BNDCFGS: 2509 has_msr_bndcfgs = true; 2510 break; 2511 case MSR_IA32_XSS: 2512 has_msr_xss = true; 2513 break; 2514 case MSR_IA32_UMWAIT_CONTROL: 2515 has_msr_umwait = true; 2516 break; 2517 case HV_X64_MSR_CRASH_CTL: 2518 has_msr_hv_crash = true; 2519 break; 2520 case HV_X64_MSR_RESET: 2521 has_msr_hv_reset = true; 2522 break; 2523 case HV_X64_MSR_VP_INDEX: 2524 has_msr_hv_vpindex = true; 2525 break; 2526 case HV_X64_MSR_VP_RUNTIME: 2527 has_msr_hv_runtime = true; 2528 break; 2529 case HV_X64_MSR_SCONTROL: 2530 has_msr_hv_synic = true; 2531 break; 2532 case HV_X64_MSR_STIMER0_CONFIG: 2533 has_msr_hv_stimer = true; 2534 break; 2535 case HV_X64_MSR_TSC_FREQUENCY: 2536 has_msr_hv_frequencies = true; 2537 break; 2538 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 2539 has_msr_hv_reenlightenment = true; 2540 break; 2541 case HV_X64_MSR_SYNDBG_OPTIONS: 2542 has_msr_hv_syndbg_options = true; 2543 break; 2544 case MSR_IA32_SPEC_CTRL: 2545 has_msr_spec_ctrl = true; 2546 break; 2547 case MSR_AMD64_TSC_RATIO: 2548 has_tsc_scale_msr = true; 2549 break; 2550 case MSR_IA32_TSX_CTRL: 2551 has_msr_tsx_ctrl = true; 2552 break; 2553 case MSR_VIRT_SSBD: 2554 has_msr_virt_ssbd = true; 2555 break; 2556 case MSR_IA32_ARCH_CAPABILITIES: 2557 has_msr_arch_capabs = true; 2558 break; 2559 case MSR_IA32_CORE_CAPABILITY: 2560 has_msr_core_capabs = true; 2561 break; 2562 case MSR_IA32_PERF_CAPABILITIES: 2563 has_msr_perf_capabs = true; 2564 break; 2565 case MSR_IA32_VMX_VMFUNC: 2566 has_msr_vmx_vmfunc = true; 2567 break; 2568 case MSR_IA32_UCODE_REV: 2569 has_msr_ucode_rev = true; 2570 break; 2571 case MSR_IA32_VMX_PROCBASED_CTLS2: 2572 has_msr_vmx_procbased_ctls2 = true; 2573 break; 2574 case MSR_IA32_PKRS: 2575 has_msr_pkrs = true; 2576 break; 2577 } 2578 } 2579 } 2580 2581 g_free(kvm_msr_list); 2582 2583 return ret; 2584 } 2585 2586 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, 2587 uint32_t msr, 2588 uint64_t *val) 2589 { 2590 CPUState *cs = CPU(cpu); 2591 2592 *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ 2593 *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ 2594 2595 return true; 2596 } 2597 2598 static bool kvm_rdmsr_rapl_power_unit(X86CPU *cpu, 2599 uint32_t msr, 2600 uint64_t *val) 2601 { 2602 2603 CPUState *cs = CPU(cpu); 2604 2605 *val = cs->kvm_state->msr_energy.msr_unit; 2606 2607 return true; 2608 } 2609 2610 static bool kvm_rdmsr_pkg_power_limit(X86CPU *cpu, 2611 uint32_t msr, 2612 uint64_t *val) 2613 { 2614 2615 CPUState *cs = CPU(cpu); 2616 2617 *val = cs->kvm_state->msr_energy.msr_limit; 2618 2619 return true; 2620 } 2621 2622 static bool kvm_rdmsr_pkg_power_info(X86CPU *cpu, 2623 uint32_t msr, 2624 uint64_t *val) 2625 { 2626 2627 CPUState *cs = CPU(cpu); 2628 2629 *val = cs->kvm_state->msr_energy.msr_info; 2630 2631 return true; 2632 } 2633 2634 static bool kvm_rdmsr_pkg_energy_status(X86CPU *cpu, 2635 uint32_t msr, 2636 uint64_t *val) 2637 { 2638 2639 CPUState *cs = CPU(cpu); 2640 *val = cs->kvm_state->msr_energy.msr_value[cs->cpu_index]; 2641 2642 return true; 2643 } 2644 2645 static Notifier smram_machine_done; 2646 static KVMMemoryListener smram_listener; 2647 static AddressSpace smram_address_space; 2648 static MemoryRegion smram_as_root; 2649 static MemoryRegion smram_as_mem; 2650 2651 static void register_smram_listener(Notifier *n, void *unused) 2652 { 2653 MemoryRegion *smram = 2654 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 2655 2656 /* Outer container... */ 2657 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); 2658 memory_region_set_enabled(&smram_as_root, true); 2659 2660 /* ... with two regions inside: normal system memory with low 2661 * priority, and... 2662 */ 2663 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", 2664 get_system_memory(), 0, ~0ull); 2665 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); 2666 memory_region_set_enabled(&smram_as_mem, true); 2667 2668 if (smram) { 2669 /* ... SMRAM with higher priority */ 2670 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); 2671 memory_region_set_enabled(smram, true); 2672 } 2673 2674 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); 2675 kvm_memory_listener_register(kvm_state, &smram_listener, 2676 &smram_address_space, 1, "kvm-smram"); 2677 } 2678 2679 static void *kvm_msr_energy_thread(void *data) 2680 { 2681 KVMState *s = data; 2682 struct KVMMsrEnergy *vmsr = &s->msr_energy; 2683 2684 g_autofree vmsr_package_energy_stat *pkg_stat = NULL; 2685 g_autofree vmsr_thread_stat *thd_stat = NULL; 2686 g_autofree CPUState *cpu = NULL; 2687 g_autofree unsigned int *vpkgs_energy_stat = NULL; 2688 unsigned int num_threads = 0; 2689 2690 X86CPUTopoIDs topo_ids; 2691 2692 rcu_register_thread(); 2693 2694 /* Allocate memory for each package energy status */ 2695 pkg_stat = g_new0(vmsr_package_energy_stat, vmsr->host_topo.maxpkgs); 2696 2697 /* Allocate memory for thread stats */ 2698 thd_stat = g_new0(vmsr_thread_stat, 1); 2699 2700 /* Allocate memory for holding virtual package energy counter */ 2701 vpkgs_energy_stat = g_new0(unsigned int, vmsr->guest_vsockets); 2702 2703 /* Populate the max tick of each packages */ 2704 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { 2705 /* 2706 * Max numbers of ticks per package 2707 * Time in second * Number of ticks/second * Number of cores/package 2708 * ex: 100 ticks/second/CPU, 12 CPUs per Package gives 1200 ticks max 2709 */ 2710 vmsr->host_topo.maxticks[i] = (MSR_ENERGY_THREAD_SLEEP_US / 1000000) 2711 * sysconf(_SC_CLK_TCK) 2712 * vmsr->host_topo.pkg_cpu_count[i]; 2713 } 2714 2715 while (true) { 2716 /* Get all qemu threads id */ 2717 g_autofree pid_t *thread_ids 2718 = vmsr_get_thread_ids(vmsr->pid, &num_threads); 2719 2720 if (thread_ids == NULL) { 2721 goto clean; 2722 } 2723 2724 thd_stat = g_renew(vmsr_thread_stat, thd_stat, num_threads); 2725 /* Unlike g_new0, g_renew0 function doesn't exist yet... */ 2726 memset(thd_stat, 0, num_threads * sizeof(vmsr_thread_stat)); 2727 2728 /* Populate all the thread stats */ 2729 for (int i = 0; i < num_threads; i++) { 2730 thd_stat[i].utime = g_new0(unsigned long long, 2); 2731 thd_stat[i].stime = g_new0(unsigned long long, 2); 2732 thd_stat[i].thread_id = thread_ids[i]; 2733 vmsr_read_thread_stat(vmsr->pid, 2734 thd_stat[i].thread_id, 2735 &thd_stat[i].utime[0], 2736 &thd_stat[i].stime[0], 2737 &thd_stat[i].cpu_id); 2738 thd_stat[i].pkg_id = 2739 vmsr_get_physical_package_id(thd_stat[i].cpu_id); 2740 } 2741 2742 /* Retrieve all packages power plane energy counter */ 2743 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { 2744 for (int j = 0; j < num_threads; j++) { 2745 /* 2746 * Use the first thread we found that ran on the CPU 2747 * of the package to read the packages energy counter 2748 */ 2749 if (thd_stat[j].pkg_id == i) { 2750 pkg_stat[i].e_start = 2751 vmsr_read_msr(MSR_PKG_ENERGY_STATUS, 2752 thd_stat[j].cpu_id, 2753 thd_stat[j].thread_id, 2754 s->msr_energy.sioc); 2755 break; 2756 } 2757 } 2758 } 2759 2760 /* Sleep a short period while the other threads are working */ 2761 usleep(MSR_ENERGY_THREAD_SLEEP_US); 2762 2763 /* 2764 * Retrieve all packages power plane energy counter 2765 * Calculate the delta of all packages 2766 */ 2767 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { 2768 for (int j = 0; j < num_threads; j++) { 2769 /* 2770 * Use the first thread we found that ran on the CPU 2771 * of the package to read the packages energy counter 2772 */ 2773 if (thd_stat[j].pkg_id == i) { 2774 pkg_stat[i].e_end = 2775 vmsr_read_msr(MSR_PKG_ENERGY_STATUS, 2776 thd_stat[j].cpu_id, 2777 thd_stat[j].thread_id, 2778 s->msr_energy.sioc); 2779 /* 2780 * Prevent the case we have migrate the VM 2781 * during the sleep period or any other cases 2782 * were energy counter might be lower after 2783 * the sleep period. 2784 */ 2785 if (pkg_stat[i].e_end > pkg_stat[i].e_start) { 2786 pkg_stat[i].e_delta = 2787 pkg_stat[i].e_end - pkg_stat[i].e_start; 2788 } else { 2789 pkg_stat[i].e_delta = 0; 2790 } 2791 break; 2792 } 2793 } 2794 } 2795 2796 /* Delta of ticks spend by each thread between the sample */ 2797 for (int i = 0; i < num_threads; i++) { 2798 vmsr_read_thread_stat(vmsr->pid, 2799 thd_stat[i].thread_id, 2800 &thd_stat[i].utime[1], 2801 &thd_stat[i].stime[1], 2802 &thd_stat[i].cpu_id); 2803 2804 if (vmsr->pid < 0) { 2805 /* 2806 * We don't count the dead thread 2807 * i.e threads that existed before the sleep 2808 * and not anymore 2809 */ 2810 thd_stat[i].delta_ticks = 0; 2811 } else { 2812 vmsr_delta_ticks(thd_stat, i); 2813 } 2814 } 2815 2816 /* 2817 * Identify the vcpu threads 2818 * Calculate the number of vcpu per package 2819 */ 2820 CPU_FOREACH(cpu) { 2821 for (int i = 0; i < num_threads; i++) { 2822 if (cpu->thread_id == thd_stat[i].thread_id) { 2823 thd_stat[i].is_vcpu = true; 2824 thd_stat[i].vcpu_id = cpu->cpu_index; 2825 pkg_stat[thd_stat[i].pkg_id].nb_vcpu++; 2826 thd_stat[i].acpi_id = kvm_arch_vcpu_id(cpu); 2827 break; 2828 } 2829 } 2830 } 2831 2832 /* Retrieve the virtual package number of each vCPU */ 2833 for (int i = 0; i < vmsr->guest_cpu_list->len; i++) { 2834 for (int j = 0; j < num_threads; j++) { 2835 if ((thd_stat[j].acpi_id == 2836 vmsr->guest_cpu_list->cpus[i].arch_id) 2837 && (thd_stat[j].is_vcpu == true)) { 2838 x86_topo_ids_from_apicid(thd_stat[j].acpi_id, 2839 &vmsr->guest_topo_info, &topo_ids); 2840 thd_stat[j].vpkg_id = topo_ids.pkg_id; 2841 } 2842 } 2843 } 2844 2845 /* Calculate the total energy of all non-vCPU thread */ 2846 for (int i = 0; i < num_threads; i++) { 2847 if ((thd_stat[i].is_vcpu != true) && 2848 (thd_stat[i].delta_ticks > 0)) { 2849 double temp; 2850 temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta, 2851 thd_stat[i].delta_ticks, 2852 vmsr->host_topo.maxticks[thd_stat[i].pkg_id]); 2853 pkg_stat[thd_stat[i].pkg_id].e_ratio 2854 += (uint64_t)lround(temp); 2855 } 2856 } 2857 2858 /* Calculate the ratio per non-vCPU thread of each package */ 2859 for (int i = 0; i < vmsr->host_topo.maxpkgs; i++) { 2860 if (pkg_stat[i].nb_vcpu > 0) { 2861 pkg_stat[i].e_ratio = pkg_stat[i].e_ratio / pkg_stat[i].nb_vcpu; 2862 } 2863 } 2864 2865 /* 2866 * Calculate the energy for each Package: 2867 * Energy Package = sum of each vCPU energy that belongs to the package 2868 */ 2869 for (int i = 0; i < num_threads; i++) { 2870 if ((thd_stat[i].is_vcpu == true) && \ 2871 (thd_stat[i].delta_ticks > 0)) { 2872 double temp; 2873 temp = vmsr_get_ratio(pkg_stat[thd_stat[i].pkg_id].e_delta, 2874 thd_stat[i].delta_ticks, 2875 vmsr->host_topo.maxticks[thd_stat[i].pkg_id]); 2876 vpkgs_energy_stat[thd_stat[i].vpkg_id] += 2877 (uint64_t)lround(temp); 2878 vpkgs_energy_stat[thd_stat[i].vpkg_id] += 2879 pkg_stat[thd_stat[i].pkg_id].e_ratio; 2880 } 2881 } 2882 2883 /* 2884 * Finally populate the vmsr register of each vCPU with the total 2885 * package value to emulate the real hardware where each CPU return the 2886 * value of the package it belongs. 2887 */ 2888 for (int i = 0; i < num_threads; i++) { 2889 if ((thd_stat[i].is_vcpu == true) && \ 2890 (thd_stat[i].delta_ticks > 0)) { 2891 vmsr->msr_value[thd_stat[i].vcpu_id] = \ 2892 vpkgs_energy_stat[thd_stat[i].vpkg_id]; 2893 } 2894 } 2895 2896 /* Freeing memory before zeroing the pointer */ 2897 for (int i = 0; i < num_threads; i++) { 2898 g_free(thd_stat[i].utime); 2899 g_free(thd_stat[i].stime); 2900 } 2901 } 2902 2903 clean: 2904 rcu_unregister_thread(); 2905 return NULL; 2906 } 2907 2908 static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) 2909 { 2910 MachineClass *mc = MACHINE_GET_CLASS(ms); 2911 struct KVMMsrEnergy *r = &s->msr_energy; 2912 int ret = 0; 2913 2914 /* 2915 * Sanity check 2916 * 1. Host cpu must be Intel cpu 2917 * 2. RAPL must be enabled on the Host 2918 */ 2919 if (!is_host_cpu_intel()) { 2920 error_report("The RAPL feature can only be enabled on hosts " 2921 "with Intel CPU models"); 2922 ret = 1; 2923 goto out; 2924 } 2925 2926 if (!is_rapl_enabled()) { 2927 ret = 1; 2928 goto out; 2929 } 2930 2931 /* Retrieve the virtual topology */ 2932 vmsr_init_topo_info(&r->guest_topo_info, ms); 2933 2934 /* Retrieve the number of vcpu */ 2935 r->guest_vcpus = ms->smp.cpus; 2936 2937 /* Retrieve the number of virtual sockets */ 2938 r->guest_vsockets = ms->smp.sockets; 2939 2940 /* Allocate register memory (MSR_PKG_STATUS) for each vcpu */ 2941 r->msr_value = g_new0(uint64_t, r->guest_vcpus); 2942 2943 /* Retrieve the CPUArchIDlist */ 2944 r->guest_cpu_list = mc->possible_cpu_arch_ids(ms); 2945 2946 /* Max number of cpus on the Host */ 2947 r->host_topo.maxcpus = vmsr_get_maxcpus(); 2948 if (r->host_topo.maxcpus == 0) { 2949 error_report("host max cpus = 0"); 2950 ret = 1; 2951 goto out; 2952 } 2953 2954 /* Max number of packages on the host */ 2955 r->host_topo.maxpkgs = vmsr_get_max_physical_package(r->host_topo.maxcpus); 2956 if (r->host_topo.maxpkgs == 0) { 2957 error_report("host max pkgs = 0"); 2958 ret = 1; 2959 goto out; 2960 } 2961 2962 /* Allocate memory for each package on the host */ 2963 r->host_topo.pkg_cpu_count = g_new0(unsigned int, r->host_topo.maxpkgs); 2964 r->host_topo.maxticks = g_new0(unsigned int, r->host_topo.maxpkgs); 2965 2966 vmsr_count_cpus_per_package(r->host_topo.pkg_cpu_count, 2967 r->host_topo.maxpkgs); 2968 for (int i = 0; i < r->host_topo.maxpkgs; i++) { 2969 if (r->host_topo.pkg_cpu_count[i] == 0) { 2970 error_report("cpu per packages = 0 on package_%d", i); 2971 ret = 1; 2972 goto out; 2973 } 2974 } 2975 2976 /* Get QEMU PID*/ 2977 r->pid = getpid(); 2978 2979 /* Compute the socket path if necessary */ 2980 if (s->msr_energy.socket_path == NULL) { 2981 s->msr_energy.socket_path = vmsr_compute_default_paths(); 2982 } 2983 2984 /* Open socket with vmsr helper */ 2985 s->msr_energy.sioc = vmsr_open_socket(s->msr_energy.socket_path); 2986 2987 if (s->msr_energy.sioc == NULL) { 2988 error_report("vmsr socket opening failed"); 2989 ret = 1; 2990 goto out; 2991 } 2992 2993 /* Those MSR values should not change */ 2994 r->msr_unit = vmsr_read_msr(MSR_RAPL_POWER_UNIT, 0, r->pid, 2995 s->msr_energy.sioc); 2996 r->msr_limit = vmsr_read_msr(MSR_PKG_POWER_LIMIT, 0, r->pid, 2997 s->msr_energy.sioc); 2998 r->msr_info = vmsr_read_msr(MSR_PKG_POWER_INFO, 0, r->pid, 2999 s->msr_energy.sioc); 3000 if (r->msr_unit == 0 || r->msr_limit == 0 || r->msr_info == 0) { 3001 error_report("can't read any virtual msr"); 3002 ret = 1; 3003 goto out; 3004 } 3005 3006 qemu_thread_create(&r->msr_thr, "kvm-msr", 3007 kvm_msr_energy_thread, 3008 s, QEMU_THREAD_JOINABLE); 3009 out: 3010 return ret; 3011 } 3012 3013 int kvm_arch_get_default_type(MachineState *ms) 3014 { 3015 return 0; 3016 } 3017 3018 static int kvm_vm_enable_exception_payload(KVMState *s) 3019 { 3020 int ret = 0; 3021 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); 3022 if (has_exception_payload) { 3023 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); 3024 if (ret < 0) { 3025 error_report("kvm: Failed to enable exception payload cap: %s", 3026 strerror(-ret)); 3027 } 3028 } 3029 3030 return ret; 3031 } 3032 3033 static int kvm_vm_enable_triple_fault_event(KVMState *s) 3034 { 3035 int ret = 0; 3036 has_triple_fault_event = \ 3037 kvm_check_extension(s, 3038 KVM_CAP_X86_TRIPLE_FAULT_EVENT); 3039 if (has_triple_fault_event) { 3040 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); 3041 if (ret < 0) { 3042 error_report("kvm: Failed to enable triple fault event cap: %s", 3043 strerror(-ret)); 3044 } 3045 } 3046 return ret; 3047 } 3048 3049 static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t identity_base) 3050 { 3051 return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); 3052 } 3053 3054 static int kvm_vm_set_nr_mmu_pages(KVMState *s) 3055 { 3056 uint64_t shadow_mem; 3057 int ret = 0; 3058 shadow_mem = object_property_get_int(OBJECT(s), 3059 "kvm-shadow-mem", 3060 &error_abort); 3061 if (shadow_mem != -1) { 3062 shadow_mem /= 4096; 3063 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); 3064 } 3065 return ret; 3066 } 3067 3068 static int kvm_vm_set_tss_addr(KVMState *s, uint64_t tss_base) 3069 { 3070 return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, tss_base); 3071 } 3072 3073 static int kvm_vm_enable_disable_exits(KVMState *s) 3074 { 3075 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); 3076 /* Work around for kernel header with a typo. TODO: fix header and drop. */ 3077 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) 3078 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL 3079 #endif 3080 if (disable_exits) { 3081 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | 3082 KVM_X86_DISABLE_EXITS_HLT | 3083 KVM_X86_DISABLE_EXITS_PAUSE | 3084 KVM_X86_DISABLE_EXITS_CSTATE); 3085 } 3086 3087 return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, 3088 disable_exits); 3089 } 3090 3091 static int kvm_vm_enable_bus_lock_exit(KVMState *s) 3092 { 3093 int ret = 0; 3094 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); 3095 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { 3096 error_report("kvm: bus lock detection unsupported"); 3097 return -ENOTSUP; 3098 } 3099 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, 3100 KVM_BUS_LOCK_DETECTION_EXIT); 3101 if (ret < 0) { 3102 error_report("kvm: Failed to enable bus lock detection cap: %s", 3103 strerror(-ret)); 3104 } 3105 3106 return ret; 3107 } 3108 3109 static int kvm_vm_enable_notify_vmexit(KVMState *s) 3110 { 3111 int ret = 0; 3112 if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) { 3113 uint64_t notify_window_flags = 3114 ((uint64_t)s->notify_window << 32) | 3115 KVM_X86_NOTIFY_VMEXIT_ENABLED | 3116 KVM_X86_NOTIFY_VMEXIT_USER; 3117 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, 3118 notify_window_flags); 3119 if (ret < 0) { 3120 error_report("kvm: Failed to enable notify vmexit cap: %s", 3121 strerror(-ret)); 3122 } 3123 } 3124 return ret; 3125 } 3126 3127 static int kvm_vm_enable_userspace_msr(KVMState *s) 3128 { 3129 int ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, 3130 KVM_MSR_EXIT_REASON_FILTER); 3131 if (ret < 0) { 3132 error_report("Could not enable user space MSRs: %s", 3133 strerror(-ret)); 3134 exit(1); 3135 } 3136 3137 if (!kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, 3138 kvm_rdmsr_core_thread_count, NULL)) { 3139 error_report("Could not install MSR_CORE_THREAD_COUNT handler!"); 3140 exit(1); 3141 } 3142 3143 return 0; 3144 } 3145 3146 static void kvm_vm_enable_energy_msrs(KVMState *s) 3147 { 3148 bool r; 3149 if (s->msr_energy.enable == true) { 3150 r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT, 3151 kvm_rdmsr_rapl_power_unit, NULL); 3152 if (!r) { 3153 error_report("Could not install MSR_RAPL_POWER_UNIT \ 3154 handler"); 3155 exit(1); 3156 } 3157 3158 r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT, 3159 kvm_rdmsr_pkg_power_limit, NULL); 3160 if (!r) { 3161 error_report("Could not install MSR_PKG_POWER_LIMIT \ 3162 handler"); 3163 exit(1); 3164 } 3165 3166 r = kvm_filter_msr(s, MSR_PKG_POWER_INFO, 3167 kvm_rdmsr_pkg_power_info, NULL); 3168 if (!r) { 3169 error_report("Could not install MSR_PKG_POWER_INFO \ 3170 handler"); 3171 exit(1); 3172 } 3173 r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS, 3174 kvm_rdmsr_pkg_energy_status, NULL); 3175 if (!r) { 3176 error_report("Could not install MSR_PKG_ENERGY_STATUS \ 3177 handler"); 3178 exit(1); 3179 } 3180 } 3181 return; 3182 } 3183 3184 int kvm_arch_init(MachineState *ms, KVMState *s) 3185 { 3186 int ret; 3187 struct utsname utsname; 3188 Error *local_err = NULL; 3189 3190 /* 3191 * Initialize SEV context, if required 3192 * 3193 * If no memory encryption is requested (ms->cgs == NULL) this is 3194 * a no-op. 3195 * 3196 * It's also a no-op if a non-SEV confidential guest support 3197 * mechanism is selected. SEV is the only mechanism available to 3198 * select on x86 at present, so this doesn't arise, but if new 3199 * mechanisms are supported in future (e.g. TDX), they'll need 3200 * their own initialization either here or elsewhere. 3201 */ 3202 if (ms->cgs) { 3203 ret = confidential_guest_kvm_init(ms->cgs, &local_err); 3204 if (ret < 0) { 3205 error_report_err(local_err); 3206 return ret; 3207 } 3208 } 3209 3210 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); 3211 has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; 3212 3213 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); 3214 3215 ret = kvm_vm_enable_exception_payload(s); 3216 if (ret < 0) { 3217 return ret; 3218 } 3219 3220 ret = kvm_vm_enable_triple_fault_event(s); 3221 if (ret < 0) { 3222 return ret; 3223 } 3224 3225 if (s->xen_version) { 3226 #ifdef CONFIG_XEN_EMU 3227 if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) { 3228 error_report("kvm: Xen support only available in PC machine"); 3229 return -ENOTSUP; 3230 } 3231 /* hyperv_enabled() doesn't work yet. */ 3232 uint32_t msr = XEN_HYPERCALL_MSR; 3233 ret = kvm_xen_init(s, msr); 3234 if (ret < 0) { 3235 return ret; 3236 } 3237 #else 3238 error_report("kvm: Xen support not enabled in qemu"); 3239 return -ENOTSUP; 3240 #endif 3241 } 3242 3243 ret = kvm_get_supported_msrs(s); 3244 if (ret < 0) { 3245 return ret; 3246 } 3247 3248 kvm_get_supported_feature_msrs(s); 3249 3250 uname(&utsname); 3251 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; 3252 3253 ret = kvm_vm_set_identity_map_addr(s, KVM_IDENTITY_BASE); 3254 if (ret < 0) { 3255 return ret; 3256 } 3257 3258 /* Set TSS base one page after EPT identity map. */ 3259 ret = kvm_vm_set_tss_addr(s, KVM_IDENTITY_BASE + 0x1000); 3260 if (ret < 0) { 3261 return ret; 3262 } 3263 3264 /* Tell fw_cfg to notify the BIOS to reserve the range. */ 3265 e820_add_entry(KVM_IDENTITY_BASE, 0x4000, E820_RESERVED); 3266 3267 ret = kvm_vm_set_nr_mmu_pages(s); 3268 if (ret < 0) { 3269 return ret; 3270 } 3271 3272 if (kvm_check_extension(s, KVM_CAP_X86_SMM) && 3273 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && 3274 x86_machine_is_smm_enabled(X86_MACHINE(ms))) { 3275 smram_machine_done.notify = register_smram_listener; 3276 qemu_add_machine_init_done_notifier(&smram_machine_done); 3277 } 3278 3279 if (enable_cpu_pm) { 3280 ret = kvm_vm_enable_disable_exits(s); 3281 if (ret < 0) { 3282 error_report("kvm: guest stopping CPU not supported: %s", 3283 strerror(-ret)); 3284 } 3285 } 3286 3287 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { 3288 X86MachineState *x86ms = X86_MACHINE(ms); 3289 3290 if (x86ms->bus_lock_ratelimit > 0) { 3291 ret = kvm_vm_enable_bus_lock_exit(s); 3292 if (ret < 0) { 3293 return ret; 3294 } 3295 ratelimit_init(&bus_lock_ratelimit_ctrl); 3296 ratelimit_set_speed(&bus_lock_ratelimit_ctrl, 3297 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME); 3298 } 3299 } 3300 3301 if (kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { 3302 ret = kvm_vm_enable_notify_vmexit(s); 3303 if (ret < 0) { 3304 return ret; 3305 } 3306 } 3307 3308 if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { 3309 ret = kvm_vm_enable_userspace_msr(s); 3310 if (ret < 0) { 3311 return ret; 3312 } 3313 3314 if (s->msr_energy.enable == true) { 3315 kvm_vm_enable_energy_msrs(s); 3316 if (kvm_msr_energy_thread_init(s, ms)) { 3317 error_report("kvm : error RAPL feature requirement not met"); 3318 exit(1); 3319 } 3320 } 3321 } 3322 3323 return 0; 3324 } 3325 3326 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 3327 { 3328 lhs->selector = rhs->selector; 3329 lhs->base = rhs->base; 3330 lhs->limit = rhs->limit; 3331 lhs->type = 3; 3332 lhs->present = 1; 3333 lhs->dpl = 3; 3334 lhs->db = 0; 3335 lhs->s = 1; 3336 lhs->l = 0; 3337 lhs->g = 0; 3338 lhs->avl = 0; 3339 lhs->unusable = 0; 3340 } 3341 3342 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 3343 { 3344 unsigned flags = rhs->flags; 3345 lhs->selector = rhs->selector; 3346 lhs->base = rhs->base; 3347 lhs->limit = rhs->limit; 3348 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; 3349 lhs->present = (flags & DESC_P_MASK) != 0; 3350 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; 3351 lhs->db = (flags >> DESC_B_SHIFT) & 1; 3352 lhs->s = (flags & DESC_S_MASK) != 0; 3353 lhs->l = (flags >> DESC_L_SHIFT) & 1; 3354 lhs->g = (flags & DESC_G_MASK) != 0; 3355 lhs->avl = (flags & DESC_AVL_MASK) != 0; 3356 lhs->unusable = !lhs->present; 3357 lhs->padding = 0; 3358 } 3359 3360 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) 3361 { 3362 lhs->selector = rhs->selector; 3363 lhs->base = rhs->base; 3364 lhs->limit = rhs->limit; 3365 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | 3366 ((rhs->present && !rhs->unusable) * DESC_P_MASK) | 3367 (rhs->dpl << DESC_DPL_SHIFT) | 3368 (rhs->db << DESC_B_SHIFT) | 3369 (rhs->s * DESC_S_MASK) | 3370 (rhs->l << DESC_L_SHIFT) | 3371 (rhs->g * DESC_G_MASK) | 3372 (rhs->avl * DESC_AVL_MASK); 3373 } 3374 3375 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) 3376 { 3377 if (set) { 3378 *kvm_reg = *qemu_reg; 3379 } else { 3380 *qemu_reg = *kvm_reg; 3381 } 3382 } 3383 3384 static int kvm_getput_regs(X86CPU *cpu, int set) 3385 { 3386 CPUX86State *env = &cpu->env; 3387 struct kvm_regs regs; 3388 int ret = 0; 3389 3390 if (!set) { 3391 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s); 3392 if (ret < 0) { 3393 return ret; 3394 } 3395 } 3396 3397 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); 3398 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); 3399 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); 3400 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); 3401 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); 3402 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); 3403 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); 3404 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); 3405 #ifdef TARGET_X86_64 3406 kvm_getput_reg(®s.r8, &env->regs[8], set); 3407 kvm_getput_reg(®s.r9, &env->regs[9], set); 3408 kvm_getput_reg(®s.r10, &env->regs[10], set); 3409 kvm_getput_reg(®s.r11, &env->regs[11], set); 3410 kvm_getput_reg(®s.r12, &env->regs[12], set); 3411 kvm_getput_reg(®s.r13, &env->regs[13], set); 3412 kvm_getput_reg(®s.r14, &env->regs[14], set); 3413 kvm_getput_reg(®s.r15, &env->regs[15], set); 3414 #endif 3415 3416 kvm_getput_reg(®s.rflags, &env->eflags, set); 3417 kvm_getput_reg(®s.rip, &env->eip, set); 3418 3419 if (set) { 3420 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s); 3421 } 3422 3423 return ret; 3424 } 3425 3426 static int kvm_put_xsave(X86CPU *cpu) 3427 { 3428 CPUX86State *env = &cpu->env; 3429 void *xsave = env->xsave_buf; 3430 3431 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); 3432 3433 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); 3434 } 3435 3436 static int kvm_put_xcrs(X86CPU *cpu) 3437 { 3438 CPUX86State *env = &cpu->env; 3439 struct kvm_xcrs xcrs = {}; 3440 3441 if (!has_xcrs) { 3442 return 0; 3443 } 3444 3445 xcrs.nr_xcrs = 1; 3446 xcrs.flags = 0; 3447 xcrs.xcrs[0].xcr = 0; 3448 xcrs.xcrs[0].value = env->xcr0; 3449 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); 3450 } 3451 3452 static int kvm_put_sregs(X86CPU *cpu) 3453 { 3454 CPUX86State *env = &cpu->env; 3455 struct kvm_sregs sregs; 3456 3457 /* 3458 * The interrupt_bitmap is ignored because KVM_SET_SREGS is 3459 * always followed by KVM_SET_VCPU_EVENTS. 3460 */ 3461 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); 3462 3463 if ((env->eflags & VM_MASK)) { 3464 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 3465 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 3466 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 3467 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 3468 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 3469 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 3470 } else { 3471 set_seg(&sregs.cs, &env->segs[R_CS]); 3472 set_seg(&sregs.ds, &env->segs[R_DS]); 3473 set_seg(&sregs.es, &env->segs[R_ES]); 3474 set_seg(&sregs.fs, &env->segs[R_FS]); 3475 set_seg(&sregs.gs, &env->segs[R_GS]); 3476 set_seg(&sregs.ss, &env->segs[R_SS]); 3477 } 3478 3479 set_seg(&sregs.tr, &env->tr); 3480 set_seg(&sregs.ldt, &env->ldt); 3481 3482 sregs.idt.limit = env->idt.limit; 3483 sregs.idt.base = env->idt.base; 3484 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 3485 sregs.gdt.limit = env->gdt.limit; 3486 sregs.gdt.base = env->gdt.base; 3487 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 3488 3489 sregs.cr0 = env->cr[0]; 3490 sregs.cr2 = env->cr[2]; 3491 sregs.cr3 = env->cr[3]; 3492 sregs.cr4 = env->cr[4]; 3493 3494 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 3495 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 3496 3497 sregs.efer = env->efer; 3498 3499 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 3500 } 3501 3502 static int kvm_put_sregs2(X86CPU *cpu) 3503 { 3504 CPUX86State *env = &cpu->env; 3505 struct kvm_sregs2 sregs; 3506 int i; 3507 3508 sregs.flags = 0; 3509 3510 if ((env->eflags & VM_MASK)) { 3511 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 3512 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 3513 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 3514 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 3515 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 3516 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 3517 } else { 3518 set_seg(&sregs.cs, &env->segs[R_CS]); 3519 set_seg(&sregs.ds, &env->segs[R_DS]); 3520 set_seg(&sregs.es, &env->segs[R_ES]); 3521 set_seg(&sregs.fs, &env->segs[R_FS]); 3522 set_seg(&sregs.gs, &env->segs[R_GS]); 3523 set_seg(&sregs.ss, &env->segs[R_SS]); 3524 } 3525 3526 set_seg(&sregs.tr, &env->tr); 3527 set_seg(&sregs.ldt, &env->ldt); 3528 3529 sregs.idt.limit = env->idt.limit; 3530 sregs.idt.base = env->idt.base; 3531 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 3532 sregs.gdt.limit = env->gdt.limit; 3533 sregs.gdt.base = env->gdt.base; 3534 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 3535 3536 sregs.cr0 = env->cr[0]; 3537 sregs.cr2 = env->cr[2]; 3538 sregs.cr3 = env->cr[3]; 3539 sregs.cr4 = env->cr[4]; 3540 3541 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 3542 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 3543 3544 sregs.efer = env->efer; 3545 3546 if (env->pdptrs_valid) { 3547 for (i = 0; i < 4; i++) { 3548 sregs.pdptrs[i] = env->pdptrs[i]; 3549 } 3550 sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 3551 } 3552 3553 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs); 3554 } 3555 3556 3557 static void kvm_msr_buf_reset(X86CPU *cpu) 3558 { 3559 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); 3560 } 3561 3562 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) 3563 { 3564 struct kvm_msrs *msrs = cpu->kvm_msr_buf; 3565 void *limit = ((void *)msrs) + MSR_BUF_SIZE; 3566 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; 3567 3568 assert((void *)(entry + 1) <= limit); 3569 3570 entry->index = index; 3571 entry->reserved = 0; 3572 entry->data = value; 3573 msrs->nmsrs++; 3574 } 3575 3576 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) 3577 { 3578 kvm_msr_buf_reset(cpu); 3579 kvm_msr_entry_add(cpu, index, value); 3580 3581 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 3582 } 3583 3584 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value) 3585 { 3586 int ret; 3587 struct { 3588 struct kvm_msrs info; 3589 struct kvm_msr_entry entries[1]; 3590 } msr_data = { 3591 .info.nmsrs = 1, 3592 .entries[0].index = index, 3593 }; 3594 3595 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); 3596 if (ret < 0) { 3597 return ret; 3598 } 3599 assert(ret == 1); 3600 *value = msr_data.entries[0].data; 3601 return ret; 3602 } 3603 void kvm_put_apicbase(X86CPU *cpu, uint64_t value) 3604 { 3605 int ret; 3606 3607 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); 3608 assert(ret == 1); 3609 } 3610 3611 static int kvm_put_tscdeadline_msr(X86CPU *cpu) 3612 { 3613 CPUX86State *env = &cpu->env; 3614 int ret; 3615 3616 if (!has_msr_tsc_deadline) { 3617 return 0; 3618 } 3619 3620 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); 3621 if (ret < 0) { 3622 return ret; 3623 } 3624 3625 assert(ret == 1); 3626 return 0; 3627 } 3628 3629 /* 3630 * Provide a separate write service for the feature control MSR in order to 3631 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done 3632 * before writing any other state because forcibly leaving nested mode 3633 * invalidates the VCPU state. 3634 */ 3635 static int kvm_put_msr_feature_control(X86CPU *cpu) 3636 { 3637 int ret; 3638 3639 if (!has_msr_feature_control) { 3640 return 0; 3641 } 3642 3643 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, 3644 cpu->env.msr_ia32_feature_control); 3645 if (ret < 0) { 3646 return ret; 3647 } 3648 3649 assert(ret == 1); 3650 return 0; 3651 } 3652 3653 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features) 3654 { 3655 uint32_t default1, can_be_one, can_be_zero; 3656 uint32_t must_be_one; 3657 3658 switch (index) { 3659 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 3660 default1 = 0x00000016; 3661 break; 3662 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 3663 default1 = 0x0401e172; 3664 break; 3665 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 3666 default1 = 0x000011ff; 3667 break; 3668 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 3669 default1 = 0x00036dff; 3670 break; 3671 case MSR_IA32_VMX_PROCBASED_CTLS2: 3672 default1 = 0; 3673 break; 3674 default: 3675 abort(); 3676 } 3677 3678 /* If a feature bit is set, the control can be either set or clear. 3679 * Otherwise the value is limited to either 0 or 1 by default1. 3680 */ 3681 can_be_one = features | default1; 3682 can_be_zero = features | ~default1; 3683 must_be_one = ~can_be_zero; 3684 3685 /* 3686 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one). 3687 * Bit 32:63 -> 1 if the control bit can be one. 3688 */ 3689 return must_be_one | (((uint64_t)can_be_one) << 32); 3690 } 3691 3692 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) 3693 { 3694 uint64_t kvm_vmx_basic = 3695 kvm_arch_get_supported_msr_feature(kvm_state, 3696 MSR_IA32_VMX_BASIC); 3697 3698 if (!kvm_vmx_basic) { 3699 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0), 3700 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail. 3701 */ 3702 return; 3703 } 3704 3705 uint64_t kvm_vmx_misc = 3706 kvm_arch_get_supported_msr_feature(kvm_state, 3707 MSR_IA32_VMX_MISC); 3708 uint64_t kvm_vmx_ept_vpid = 3709 kvm_arch_get_supported_msr_feature(kvm_state, 3710 MSR_IA32_VMX_EPT_VPID_CAP); 3711 3712 /* 3713 * If the guest is 64-bit, a value of 1 is allowed for the host address 3714 * space size vmexit control. 3715 */ 3716 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM 3717 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0; 3718 3719 /* 3720 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should 3721 * not change them for backwards compatibility. 3722 */ 3723 uint64_t fixed_vmx_basic = kvm_vmx_basic & 3724 (MSR_VMX_BASIC_VMCS_REVISION_MASK | 3725 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK | 3726 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK); 3727 3728 /* 3729 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can 3730 * change in the future but are always zero for now, clear them to be 3731 * future proof. Bits 32-63 in theory could change, though KVM does 3732 * not support dual-monitor treatment and probably never will; mask 3733 * them out as well. 3734 */ 3735 uint64_t fixed_vmx_misc = kvm_vmx_misc & 3736 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK | 3737 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK); 3738 3739 /* 3740 * EPT memory types should not change either, so we do not bother 3741 * adding features for them. 3742 */ 3743 uint64_t fixed_vmx_ept_mask = 3744 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ? 3745 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0); 3746 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask; 3747 3748 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 3749 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 3750 f[FEAT_VMX_PROCBASED_CTLS])); 3751 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS, 3752 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS, 3753 f[FEAT_VMX_PINBASED_CTLS])); 3754 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS, 3755 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS, 3756 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit); 3757 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS, 3758 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS, 3759 f[FEAT_VMX_ENTRY_CTLS])); 3760 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2, 3761 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2, 3762 f[FEAT_VMX_SECONDARY_CTLS])); 3763 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP, 3764 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid); 3765 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC, 3766 f[FEAT_VMX_BASIC] | fixed_vmx_basic); 3767 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC, 3768 f[FEAT_VMX_MISC] | fixed_vmx_misc); 3769 if (has_msr_vmx_vmfunc) { 3770 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]); 3771 } 3772 3773 /* 3774 * Just to be safe, write these with constant values. The CRn_FIXED1 3775 * MSRs are generated by KVM based on the vCPU's CPUID. 3776 */ 3777 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0, 3778 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK); 3779 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, 3780 CR4_VMXE_MASK); 3781 3782 if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { 3783 /* FRED injected-event data (0x2052). */ 3784 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52); 3785 } else if (f[FEAT_VMX_EXIT_CTLS] & 3786 VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) { 3787 /* Secondary VM-exit controls (0x2044). */ 3788 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44); 3789 } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { 3790 /* TSC multiplier (0x2032). */ 3791 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); 3792 } else { 3793 /* Preemption timer (0x482E). */ 3794 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E); 3795 } 3796 } 3797 3798 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f) 3799 { 3800 uint64_t kvm_perf_cap = 3801 kvm_arch_get_supported_msr_feature(kvm_state, 3802 MSR_IA32_PERF_CAPABILITIES); 3803 3804 if (kvm_perf_cap) { 3805 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES, 3806 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]); 3807 } 3808 } 3809 3810 static int kvm_buf_set_msrs(X86CPU *cpu) 3811 { 3812 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 3813 if (ret < 0) { 3814 return ret; 3815 } 3816 3817 if (ret < cpu->kvm_msr_buf->nmsrs) { 3818 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 3819 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, 3820 (uint32_t)e->index, (uint64_t)e->data); 3821 } 3822 3823 assert(ret == cpu->kvm_msr_buf->nmsrs); 3824 return 0; 3825 } 3826 3827 static void kvm_init_msrs(X86CPU *cpu) 3828 { 3829 CPUX86State *env = &cpu->env; 3830 3831 kvm_msr_buf_reset(cpu); 3832 if (has_msr_arch_capabs) { 3833 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, 3834 env->features[FEAT_ARCH_CAPABILITIES]); 3835 } 3836 3837 if (has_msr_core_capabs) { 3838 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, 3839 env->features[FEAT_CORE_CAPABILITY]); 3840 } 3841 3842 if (has_msr_perf_capabs && cpu->enable_pmu) { 3843 kvm_msr_entry_add_perf(cpu, env->features); 3844 } 3845 3846 if (has_msr_ucode_rev) { 3847 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); 3848 } 3849 3850 /* 3851 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but 3852 * all kernels with MSR features should have them. 3853 */ 3854 if (kvm_feature_msrs && cpu_has_vmx(env)) { 3855 kvm_msr_entry_add_vmx(cpu, env->features); 3856 } 3857 3858 assert(kvm_buf_set_msrs(cpu) == 0); 3859 } 3860 3861 static int kvm_put_msrs(X86CPU *cpu, int level) 3862 { 3863 CPUX86State *env = &cpu->env; 3864 int i; 3865 3866 kvm_msr_buf_reset(cpu); 3867 3868 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); 3869 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); 3870 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); 3871 kvm_msr_entry_add(cpu, MSR_PAT, env->pat); 3872 if (has_msr_star) { 3873 kvm_msr_entry_add(cpu, MSR_STAR, env->star); 3874 } 3875 if (has_msr_hsave_pa) { 3876 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); 3877 } 3878 if (has_msr_tsc_aux) { 3879 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); 3880 } 3881 if (has_msr_tsc_adjust) { 3882 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); 3883 } 3884 if (has_msr_misc_enable) { 3885 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 3886 env->msr_ia32_misc_enable); 3887 } 3888 if (has_msr_smbase) { 3889 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); 3890 } 3891 if (has_msr_smi_count) { 3892 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); 3893 } 3894 if (has_msr_pkrs) { 3895 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs); 3896 } 3897 if (has_msr_bndcfgs) { 3898 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); 3899 } 3900 if (has_msr_xss) { 3901 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); 3902 } 3903 if (has_msr_umwait) { 3904 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); 3905 } 3906 if (has_msr_spec_ctrl) { 3907 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); 3908 } 3909 if (has_tsc_scale_msr) { 3910 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); 3911 } 3912 3913 if (has_msr_tsx_ctrl) { 3914 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); 3915 } 3916 if (has_msr_virt_ssbd) { 3917 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); 3918 } 3919 3920 #ifdef TARGET_X86_64 3921 if (lm_capable_kernel) { 3922 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); 3923 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); 3924 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); 3925 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); 3926 if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { 3927 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, env->fred_rsp0); 3928 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, env->fred_rsp1); 3929 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, env->fred_rsp2); 3930 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, env->fred_rsp3); 3931 kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, env->fred_stklvls); 3932 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, env->fred_ssp1); 3933 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, env->fred_ssp2); 3934 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, env->fred_ssp3); 3935 kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, env->fred_config); 3936 } 3937 } 3938 #endif 3939 3940 /* 3941 * The following MSRs have side effects on the guest or are too heavy 3942 * for normal writeback. Limit them to reset or full state updates. 3943 */ 3944 if (level >= KVM_PUT_RESET_STATE) { 3945 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); 3946 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); 3947 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); 3948 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3949 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); 3950 } 3951 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3952 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); 3953 } 3954 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3955 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); 3956 } 3957 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3958 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); 3959 } 3960 3961 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3962 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); 3963 } 3964 3965 if (has_architectural_pmu_version > 0) { 3966 if (has_architectural_pmu_version > 1) { 3967 /* Stop the counter. */ 3968 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3969 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3970 } 3971 3972 /* Set the counter values. */ 3973 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3974 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 3975 env->msr_fixed_counters[i]); 3976 } 3977 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3978 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 3979 env->msr_gp_counters[i]); 3980 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 3981 env->msr_gp_evtsel[i]); 3982 } 3983 if (has_architectural_pmu_version > 1) { 3984 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 3985 env->msr_global_status); 3986 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 3987 env->msr_global_ovf_ctrl); 3988 3989 /* Now start the PMU. */ 3990 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 3991 env->msr_fixed_ctr_ctrl); 3992 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 3993 env->msr_global_ctrl); 3994 } 3995 } 3996 /* 3997 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, 3998 * only sync them to KVM on the first cpu 3999 */ 4000 if (current_cpu == first_cpu) { 4001 if (has_msr_hv_hypercall) { 4002 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 4003 env->msr_hv_guest_os_id); 4004 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 4005 env->msr_hv_hypercall); 4006 } 4007 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 4008 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 4009 env->msr_hv_tsc); 4010 } 4011 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 4012 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 4013 env->msr_hv_reenlightenment_control); 4014 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 4015 env->msr_hv_tsc_emulation_control); 4016 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 4017 env->msr_hv_tsc_emulation_status); 4018 } 4019 #ifdef CONFIG_SYNDBG 4020 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) && 4021 has_msr_hv_syndbg_options) { 4022 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 4023 hyperv_syndbg_query_options()); 4024 } 4025 #endif 4026 } 4027 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 4028 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 4029 env->msr_hv_vapic); 4030 } 4031 if (has_msr_hv_crash) { 4032 int j; 4033 4034 for (j = 0; j < HV_CRASH_PARAMS; j++) 4035 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 4036 env->msr_hv_crash_params[j]); 4037 4038 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); 4039 } 4040 if (has_msr_hv_runtime) { 4041 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); 4042 } 4043 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) 4044 && hv_vpindex_settable) { 4045 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, 4046 hyperv_vp_index(CPU(cpu))); 4047 } 4048 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 4049 int j; 4050 4051 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); 4052 4053 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 4054 env->msr_hv_synic_control); 4055 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 4056 env->msr_hv_synic_evt_page); 4057 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 4058 env->msr_hv_synic_msg_page); 4059 4060 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { 4061 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, 4062 env->msr_hv_synic_sint[j]); 4063 } 4064 } 4065 if (has_msr_hv_stimer) { 4066 int j; 4067 4068 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { 4069 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, 4070 env->msr_hv_stimer_config[j]); 4071 } 4072 4073 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { 4074 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, 4075 env->msr_hv_stimer_count[j]); 4076 } 4077 } 4078 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 4079 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); 4080 4081 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); 4082 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); 4083 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); 4084 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); 4085 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); 4086 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); 4087 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); 4088 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); 4089 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); 4090 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); 4091 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); 4092 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); 4093 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 4094 /* The CPU GPs if we write to a bit above the physical limit of 4095 * the host CPU (and KVM emulates that) 4096 */ 4097 uint64_t mask = env->mtrr_var[i].mask; 4098 mask &= phys_mask; 4099 4100 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 4101 env->mtrr_var[i].base); 4102 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); 4103 } 4104 } 4105 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 4106 int addr_num = kvm_arch_get_supported_cpuid(kvm_state, 4107 0x14, 1, R_EAX) & 0x7; 4108 4109 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 4110 env->msr_rtit_ctrl); 4111 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 4112 env->msr_rtit_status); 4113 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 4114 env->msr_rtit_output_base); 4115 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 4116 env->msr_rtit_output_mask); 4117 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 4118 env->msr_rtit_cr3_match); 4119 for (i = 0; i < addr_num; i++) { 4120 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 4121 env->msr_rtit_addrs[i]); 4122 } 4123 } 4124 4125 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 4126 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 4127 env->msr_ia32_sgxlepubkeyhash[0]); 4128 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 4129 env->msr_ia32_sgxlepubkeyhash[1]); 4130 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 4131 env->msr_ia32_sgxlepubkeyhash[2]); 4132 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 4133 env->msr_ia32_sgxlepubkeyhash[3]); 4134 } 4135 4136 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 4137 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 4138 env->msr_xfd); 4139 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 4140 env->msr_xfd_err); 4141 } 4142 4143 if (kvm_enabled() && cpu->enable_pmu && 4144 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { 4145 uint64_t depth; 4146 int ret; 4147 4148 /* 4149 * Only migrate Arch LBR states when the host Arch LBR depth 4150 * equals that of source guest's, this is to avoid mismatch 4151 * of guest/host config for the msr hence avoid unexpected 4152 * misbehavior. 4153 */ 4154 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); 4155 4156 if (ret == 1 && !!depth && depth == env->msr_lbr_depth) { 4157 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl); 4158 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth); 4159 4160 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { 4161 if (!env->lbr_records[i].from) { 4162 continue; 4163 } 4164 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 4165 env->lbr_records[i].from); 4166 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 4167 env->lbr_records[i].to); 4168 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 4169 env->lbr_records[i].info); 4170 } 4171 } 4172 } 4173 4174 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see 4175 * kvm_put_msr_feature_control. */ 4176 } 4177 4178 if (env->mcg_cap) { 4179 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); 4180 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); 4181 if (has_msr_mcg_ext_ctl) { 4182 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); 4183 } 4184 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 4185 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); 4186 } 4187 } 4188 4189 return kvm_buf_set_msrs(cpu); 4190 } 4191 4192 4193 static int kvm_get_xsave(X86CPU *cpu) 4194 { 4195 CPUX86State *env = &cpu->env; 4196 void *xsave = env->xsave_buf; 4197 unsigned long type; 4198 int ret; 4199 4200 type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; 4201 ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); 4202 if (ret < 0) { 4203 return ret; 4204 } 4205 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len); 4206 4207 return 0; 4208 } 4209 4210 static int kvm_get_xcrs(X86CPU *cpu) 4211 { 4212 CPUX86State *env = &cpu->env; 4213 int i, ret; 4214 struct kvm_xcrs xcrs; 4215 4216 if (!has_xcrs) { 4217 return 0; 4218 } 4219 4220 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); 4221 if (ret < 0) { 4222 return ret; 4223 } 4224 4225 for (i = 0; i < xcrs.nr_xcrs; i++) { 4226 /* Only support xcr0 now */ 4227 if (xcrs.xcrs[i].xcr == 0) { 4228 env->xcr0 = xcrs.xcrs[i].value; 4229 break; 4230 } 4231 } 4232 return 0; 4233 } 4234 4235 static int kvm_get_sregs(X86CPU *cpu) 4236 { 4237 CPUX86State *env = &cpu->env; 4238 struct kvm_sregs sregs; 4239 int ret; 4240 4241 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 4242 if (ret < 0) { 4243 return ret; 4244 } 4245 4246 /* 4247 * The interrupt_bitmap is ignored because KVM_GET_SREGS is 4248 * always preceded by KVM_GET_VCPU_EVENTS. 4249 */ 4250 4251 get_seg(&env->segs[R_CS], &sregs.cs); 4252 get_seg(&env->segs[R_DS], &sregs.ds); 4253 get_seg(&env->segs[R_ES], &sregs.es); 4254 get_seg(&env->segs[R_FS], &sregs.fs); 4255 get_seg(&env->segs[R_GS], &sregs.gs); 4256 get_seg(&env->segs[R_SS], &sregs.ss); 4257 4258 get_seg(&env->tr, &sregs.tr); 4259 get_seg(&env->ldt, &sregs.ldt); 4260 4261 env->idt.limit = sregs.idt.limit; 4262 env->idt.base = sregs.idt.base; 4263 env->gdt.limit = sregs.gdt.limit; 4264 env->gdt.base = sregs.gdt.base; 4265 4266 env->cr[0] = sregs.cr0; 4267 env->cr[2] = sregs.cr2; 4268 env->cr[3] = sregs.cr3; 4269 env->cr[4] = sregs.cr4; 4270 4271 env->efer = sregs.efer; 4272 if (sev_es_enabled() && env->efer & MSR_EFER_LME && 4273 env->cr[0] & CR0_PG_MASK) { 4274 env->efer |= MSR_EFER_LMA; 4275 } 4276 4277 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 4278 x86_update_hflags(env); 4279 4280 return 0; 4281 } 4282 4283 static int kvm_get_sregs2(X86CPU *cpu) 4284 { 4285 CPUX86State *env = &cpu->env; 4286 struct kvm_sregs2 sregs; 4287 int i, ret; 4288 4289 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs); 4290 if (ret < 0) { 4291 return ret; 4292 } 4293 4294 get_seg(&env->segs[R_CS], &sregs.cs); 4295 get_seg(&env->segs[R_DS], &sregs.ds); 4296 get_seg(&env->segs[R_ES], &sregs.es); 4297 get_seg(&env->segs[R_FS], &sregs.fs); 4298 get_seg(&env->segs[R_GS], &sregs.gs); 4299 get_seg(&env->segs[R_SS], &sregs.ss); 4300 4301 get_seg(&env->tr, &sregs.tr); 4302 get_seg(&env->ldt, &sregs.ldt); 4303 4304 env->idt.limit = sregs.idt.limit; 4305 env->idt.base = sregs.idt.base; 4306 env->gdt.limit = sregs.gdt.limit; 4307 env->gdt.base = sregs.gdt.base; 4308 4309 env->cr[0] = sregs.cr0; 4310 env->cr[2] = sregs.cr2; 4311 env->cr[3] = sregs.cr3; 4312 env->cr[4] = sregs.cr4; 4313 4314 env->efer = sregs.efer; 4315 if (sev_es_enabled() && env->efer & MSR_EFER_LME && 4316 env->cr[0] & CR0_PG_MASK) { 4317 env->efer |= MSR_EFER_LMA; 4318 } 4319 4320 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 4321 4322 if (env->pdptrs_valid) { 4323 for (i = 0; i < 4; i++) { 4324 env->pdptrs[i] = sregs.pdptrs[i]; 4325 } 4326 } 4327 4328 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 4329 x86_update_hflags(env); 4330 4331 return 0; 4332 } 4333 4334 static int kvm_get_msrs(X86CPU *cpu) 4335 { 4336 CPUX86State *env = &cpu->env; 4337 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; 4338 int ret, i; 4339 uint64_t mtrr_top_bits; 4340 4341 kvm_msr_buf_reset(cpu); 4342 4343 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); 4344 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); 4345 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); 4346 kvm_msr_entry_add(cpu, MSR_PAT, 0); 4347 if (has_msr_star) { 4348 kvm_msr_entry_add(cpu, MSR_STAR, 0); 4349 } 4350 if (has_msr_hsave_pa) { 4351 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); 4352 } 4353 if (has_msr_tsc_aux) { 4354 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); 4355 } 4356 if (has_msr_tsc_adjust) { 4357 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); 4358 } 4359 if (has_msr_tsc_deadline) { 4360 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); 4361 } 4362 if (has_msr_misc_enable) { 4363 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); 4364 } 4365 if (has_msr_smbase) { 4366 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); 4367 } 4368 if (has_msr_smi_count) { 4369 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0); 4370 } 4371 if (has_msr_feature_control) { 4372 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); 4373 } 4374 if (has_msr_pkrs) { 4375 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0); 4376 } 4377 if (has_msr_bndcfgs) { 4378 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); 4379 } 4380 if (has_msr_xss) { 4381 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); 4382 } 4383 if (has_msr_umwait) { 4384 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); 4385 } 4386 if (has_msr_spec_ctrl) { 4387 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); 4388 } 4389 if (has_tsc_scale_msr) { 4390 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0); 4391 } 4392 4393 if (has_msr_tsx_ctrl) { 4394 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); 4395 } 4396 if (has_msr_virt_ssbd) { 4397 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); 4398 } 4399 if (!env->tsc_valid) { 4400 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); 4401 env->tsc_valid = !runstate_is_running(); 4402 } 4403 4404 #ifdef TARGET_X86_64 4405 if (lm_capable_kernel) { 4406 kvm_msr_entry_add(cpu, MSR_CSTAR, 0); 4407 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); 4408 kvm_msr_entry_add(cpu, MSR_FMASK, 0); 4409 kvm_msr_entry_add(cpu, MSR_LSTAR, 0); 4410 if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { 4411 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, 0); 4412 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, 0); 4413 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, 0); 4414 kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, 0); 4415 kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, 0); 4416 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, 0); 4417 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, 0); 4418 kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, 0); 4419 kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, 0); 4420 } 4421 } 4422 #endif 4423 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); 4424 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); 4425 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 4426 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0); 4427 } 4428 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 4429 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); 4430 } 4431 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 4432 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); 4433 } 4434 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 4435 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); 4436 } 4437 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 4438 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); 4439 } 4440 if (has_architectural_pmu_version > 0) { 4441 if (has_architectural_pmu_version > 1) { 4442 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 4443 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 4444 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); 4445 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); 4446 } 4447 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 4448 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); 4449 } 4450 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 4451 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); 4452 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); 4453 } 4454 } 4455 4456 if (env->mcg_cap) { 4457 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); 4458 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); 4459 if (has_msr_mcg_ext_ctl) { 4460 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); 4461 } 4462 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 4463 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); 4464 } 4465 } 4466 4467 if (has_msr_hv_hypercall) { 4468 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); 4469 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); 4470 } 4471 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 4472 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); 4473 } 4474 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 4475 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); 4476 } 4477 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 4478 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); 4479 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); 4480 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); 4481 } 4482 if (has_msr_hv_syndbg_options) { 4483 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0); 4484 } 4485 if (has_msr_hv_crash) { 4486 int j; 4487 4488 for (j = 0; j < HV_CRASH_PARAMS; j++) { 4489 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); 4490 } 4491 } 4492 if (has_msr_hv_runtime) { 4493 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); 4494 } 4495 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 4496 uint32_t msr; 4497 4498 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); 4499 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); 4500 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); 4501 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { 4502 kvm_msr_entry_add(cpu, msr, 0); 4503 } 4504 } 4505 if (has_msr_hv_stimer) { 4506 uint32_t msr; 4507 4508 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; 4509 msr++) { 4510 kvm_msr_entry_add(cpu, msr, 0); 4511 } 4512 } 4513 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 4514 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); 4515 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); 4516 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); 4517 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); 4518 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); 4519 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); 4520 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); 4521 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); 4522 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); 4523 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); 4524 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); 4525 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); 4526 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 4527 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); 4528 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); 4529 } 4530 } 4531 4532 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 4533 int addr_num = 4534 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; 4535 4536 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); 4537 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); 4538 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); 4539 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); 4540 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); 4541 for (i = 0; i < addr_num; i++) { 4542 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); 4543 } 4544 } 4545 4546 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 4547 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0); 4548 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0); 4549 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0); 4550 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); 4551 } 4552 4553 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 4554 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); 4555 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); 4556 } 4557 4558 if (kvm_enabled() && cpu->enable_pmu && 4559 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { 4560 uint64_t depth; 4561 4562 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); 4563 if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) { 4564 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0); 4565 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0); 4566 4567 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { 4568 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0); 4569 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0); 4570 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0); 4571 } 4572 } 4573 } 4574 4575 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); 4576 if (ret < 0) { 4577 return ret; 4578 } 4579 4580 if (ret < cpu->kvm_msr_buf->nmsrs) { 4581 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 4582 error_report("error: failed to get MSR 0x%" PRIx32, 4583 (uint32_t)e->index); 4584 } 4585 4586 assert(ret == cpu->kvm_msr_buf->nmsrs); 4587 /* 4588 * MTRR masks: Each mask consists of 5 parts 4589 * a 10..0: must be zero 4590 * b 11 : valid bit 4591 * c n-1.12: actual mask bits 4592 * d 51..n: reserved must be zero 4593 * e 63.52: reserved must be zero 4594 * 4595 * 'n' is the number of physical bits supported by the CPU and is 4596 * apparently always <= 52. We know our 'n' but don't know what 4597 * the destinations 'n' is; it might be smaller, in which case 4598 * it masks (c) on loading. It might be larger, in which case 4599 * we fill 'd' so that d..c is consistent irrespetive of the 'n' 4600 * we're migrating to. 4601 */ 4602 4603 if (cpu->fill_mtrr_mask) { 4604 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); 4605 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); 4606 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); 4607 } else { 4608 mtrr_top_bits = 0; 4609 } 4610 4611 for (i = 0; i < ret; i++) { 4612 uint32_t index = msrs[i].index; 4613 switch (index) { 4614 case MSR_IA32_SYSENTER_CS: 4615 env->sysenter_cs = msrs[i].data; 4616 break; 4617 case MSR_IA32_SYSENTER_ESP: 4618 env->sysenter_esp = msrs[i].data; 4619 break; 4620 case MSR_IA32_SYSENTER_EIP: 4621 env->sysenter_eip = msrs[i].data; 4622 break; 4623 case MSR_PAT: 4624 env->pat = msrs[i].data; 4625 break; 4626 case MSR_STAR: 4627 env->star = msrs[i].data; 4628 break; 4629 #ifdef TARGET_X86_64 4630 case MSR_CSTAR: 4631 env->cstar = msrs[i].data; 4632 break; 4633 case MSR_KERNELGSBASE: 4634 env->kernelgsbase = msrs[i].data; 4635 break; 4636 case MSR_FMASK: 4637 env->fmask = msrs[i].data; 4638 break; 4639 case MSR_LSTAR: 4640 env->lstar = msrs[i].data; 4641 break; 4642 case MSR_IA32_FRED_RSP0: 4643 env->fred_rsp0 = msrs[i].data; 4644 break; 4645 case MSR_IA32_FRED_RSP1: 4646 env->fred_rsp1 = msrs[i].data; 4647 break; 4648 case MSR_IA32_FRED_RSP2: 4649 env->fred_rsp2 = msrs[i].data; 4650 break; 4651 case MSR_IA32_FRED_RSP3: 4652 env->fred_rsp3 = msrs[i].data; 4653 break; 4654 case MSR_IA32_FRED_STKLVLS: 4655 env->fred_stklvls = msrs[i].data; 4656 break; 4657 case MSR_IA32_FRED_SSP1: 4658 env->fred_ssp1 = msrs[i].data; 4659 break; 4660 case MSR_IA32_FRED_SSP2: 4661 env->fred_ssp2 = msrs[i].data; 4662 break; 4663 case MSR_IA32_FRED_SSP3: 4664 env->fred_ssp3 = msrs[i].data; 4665 break; 4666 case MSR_IA32_FRED_CONFIG: 4667 env->fred_config = msrs[i].data; 4668 break; 4669 #endif 4670 case MSR_IA32_TSC: 4671 env->tsc = msrs[i].data; 4672 break; 4673 case MSR_TSC_AUX: 4674 env->tsc_aux = msrs[i].data; 4675 break; 4676 case MSR_TSC_ADJUST: 4677 env->tsc_adjust = msrs[i].data; 4678 break; 4679 case MSR_IA32_TSCDEADLINE: 4680 env->tsc_deadline = msrs[i].data; 4681 break; 4682 case MSR_VM_HSAVE_PA: 4683 env->vm_hsave = msrs[i].data; 4684 break; 4685 case MSR_KVM_SYSTEM_TIME: 4686 env->system_time_msr = msrs[i].data; 4687 break; 4688 case MSR_KVM_WALL_CLOCK: 4689 env->wall_clock_msr = msrs[i].data; 4690 break; 4691 case MSR_MCG_STATUS: 4692 env->mcg_status = msrs[i].data; 4693 break; 4694 case MSR_MCG_CTL: 4695 env->mcg_ctl = msrs[i].data; 4696 break; 4697 case MSR_MCG_EXT_CTL: 4698 env->mcg_ext_ctl = msrs[i].data; 4699 break; 4700 case MSR_IA32_MISC_ENABLE: 4701 env->msr_ia32_misc_enable = msrs[i].data; 4702 break; 4703 case MSR_IA32_SMBASE: 4704 env->smbase = msrs[i].data; 4705 break; 4706 case MSR_SMI_COUNT: 4707 env->msr_smi_count = msrs[i].data; 4708 break; 4709 case MSR_IA32_FEATURE_CONTROL: 4710 env->msr_ia32_feature_control = msrs[i].data; 4711 break; 4712 case MSR_IA32_BNDCFGS: 4713 env->msr_bndcfgs = msrs[i].data; 4714 break; 4715 case MSR_IA32_XSS: 4716 env->xss = msrs[i].data; 4717 break; 4718 case MSR_IA32_UMWAIT_CONTROL: 4719 env->umwait = msrs[i].data; 4720 break; 4721 case MSR_IA32_PKRS: 4722 env->pkrs = msrs[i].data; 4723 break; 4724 default: 4725 if (msrs[i].index >= MSR_MC0_CTL && 4726 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { 4727 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; 4728 } 4729 break; 4730 case MSR_KVM_ASYNC_PF_EN: 4731 env->async_pf_en_msr = msrs[i].data; 4732 break; 4733 case MSR_KVM_ASYNC_PF_INT: 4734 env->async_pf_int_msr = msrs[i].data; 4735 break; 4736 case MSR_KVM_PV_EOI_EN: 4737 env->pv_eoi_en_msr = msrs[i].data; 4738 break; 4739 case MSR_KVM_STEAL_TIME: 4740 env->steal_time_msr = msrs[i].data; 4741 break; 4742 case MSR_KVM_POLL_CONTROL: { 4743 env->poll_control_msr = msrs[i].data; 4744 break; 4745 } 4746 case MSR_CORE_PERF_FIXED_CTR_CTRL: 4747 env->msr_fixed_ctr_ctrl = msrs[i].data; 4748 break; 4749 case MSR_CORE_PERF_GLOBAL_CTRL: 4750 env->msr_global_ctrl = msrs[i].data; 4751 break; 4752 case MSR_CORE_PERF_GLOBAL_STATUS: 4753 env->msr_global_status = msrs[i].data; 4754 break; 4755 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 4756 env->msr_global_ovf_ctrl = msrs[i].data; 4757 break; 4758 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: 4759 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; 4760 break; 4761 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: 4762 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; 4763 break; 4764 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: 4765 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; 4766 break; 4767 case HV_X64_MSR_HYPERCALL: 4768 env->msr_hv_hypercall = msrs[i].data; 4769 break; 4770 case HV_X64_MSR_GUEST_OS_ID: 4771 env->msr_hv_guest_os_id = msrs[i].data; 4772 break; 4773 case HV_X64_MSR_APIC_ASSIST_PAGE: 4774 env->msr_hv_vapic = msrs[i].data; 4775 break; 4776 case HV_X64_MSR_REFERENCE_TSC: 4777 env->msr_hv_tsc = msrs[i].data; 4778 break; 4779 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4780 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; 4781 break; 4782 case HV_X64_MSR_VP_RUNTIME: 4783 env->msr_hv_runtime = msrs[i].data; 4784 break; 4785 case HV_X64_MSR_SCONTROL: 4786 env->msr_hv_synic_control = msrs[i].data; 4787 break; 4788 case HV_X64_MSR_SIEFP: 4789 env->msr_hv_synic_evt_page = msrs[i].data; 4790 break; 4791 case HV_X64_MSR_SIMP: 4792 env->msr_hv_synic_msg_page = msrs[i].data; 4793 break; 4794 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 4795 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; 4796 break; 4797 case HV_X64_MSR_STIMER0_CONFIG: 4798 case HV_X64_MSR_STIMER1_CONFIG: 4799 case HV_X64_MSR_STIMER2_CONFIG: 4800 case HV_X64_MSR_STIMER3_CONFIG: 4801 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = 4802 msrs[i].data; 4803 break; 4804 case HV_X64_MSR_STIMER0_COUNT: 4805 case HV_X64_MSR_STIMER1_COUNT: 4806 case HV_X64_MSR_STIMER2_COUNT: 4807 case HV_X64_MSR_STIMER3_COUNT: 4808 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = 4809 msrs[i].data; 4810 break; 4811 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4812 env->msr_hv_reenlightenment_control = msrs[i].data; 4813 break; 4814 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4815 env->msr_hv_tsc_emulation_control = msrs[i].data; 4816 break; 4817 case HV_X64_MSR_TSC_EMULATION_STATUS: 4818 env->msr_hv_tsc_emulation_status = msrs[i].data; 4819 break; 4820 case HV_X64_MSR_SYNDBG_OPTIONS: 4821 env->msr_hv_syndbg_options = msrs[i].data; 4822 break; 4823 case MSR_MTRRdefType: 4824 env->mtrr_deftype = msrs[i].data; 4825 break; 4826 case MSR_MTRRfix64K_00000: 4827 env->mtrr_fixed[0] = msrs[i].data; 4828 break; 4829 case MSR_MTRRfix16K_80000: 4830 env->mtrr_fixed[1] = msrs[i].data; 4831 break; 4832 case MSR_MTRRfix16K_A0000: 4833 env->mtrr_fixed[2] = msrs[i].data; 4834 break; 4835 case MSR_MTRRfix4K_C0000: 4836 env->mtrr_fixed[3] = msrs[i].data; 4837 break; 4838 case MSR_MTRRfix4K_C8000: 4839 env->mtrr_fixed[4] = msrs[i].data; 4840 break; 4841 case MSR_MTRRfix4K_D0000: 4842 env->mtrr_fixed[5] = msrs[i].data; 4843 break; 4844 case MSR_MTRRfix4K_D8000: 4845 env->mtrr_fixed[6] = msrs[i].data; 4846 break; 4847 case MSR_MTRRfix4K_E0000: 4848 env->mtrr_fixed[7] = msrs[i].data; 4849 break; 4850 case MSR_MTRRfix4K_E8000: 4851 env->mtrr_fixed[8] = msrs[i].data; 4852 break; 4853 case MSR_MTRRfix4K_F0000: 4854 env->mtrr_fixed[9] = msrs[i].data; 4855 break; 4856 case MSR_MTRRfix4K_F8000: 4857 env->mtrr_fixed[10] = msrs[i].data; 4858 break; 4859 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): 4860 if (index & 1) { 4861 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | 4862 mtrr_top_bits; 4863 } else { 4864 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; 4865 } 4866 break; 4867 case MSR_IA32_SPEC_CTRL: 4868 env->spec_ctrl = msrs[i].data; 4869 break; 4870 case MSR_AMD64_TSC_RATIO: 4871 env->amd_tsc_scale_msr = msrs[i].data; 4872 break; 4873 case MSR_IA32_TSX_CTRL: 4874 env->tsx_ctrl = msrs[i].data; 4875 break; 4876 case MSR_VIRT_SSBD: 4877 env->virt_ssbd = msrs[i].data; 4878 break; 4879 case MSR_IA32_RTIT_CTL: 4880 env->msr_rtit_ctrl = msrs[i].data; 4881 break; 4882 case MSR_IA32_RTIT_STATUS: 4883 env->msr_rtit_status = msrs[i].data; 4884 break; 4885 case MSR_IA32_RTIT_OUTPUT_BASE: 4886 env->msr_rtit_output_base = msrs[i].data; 4887 break; 4888 case MSR_IA32_RTIT_OUTPUT_MASK: 4889 env->msr_rtit_output_mask = msrs[i].data; 4890 break; 4891 case MSR_IA32_RTIT_CR3_MATCH: 4892 env->msr_rtit_cr3_match = msrs[i].data; 4893 break; 4894 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 4895 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; 4896 break; 4897 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 4898 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = 4899 msrs[i].data; 4900 break; 4901 case MSR_IA32_XFD: 4902 env->msr_xfd = msrs[i].data; 4903 break; 4904 case MSR_IA32_XFD_ERR: 4905 env->msr_xfd_err = msrs[i].data; 4906 break; 4907 case MSR_ARCH_LBR_CTL: 4908 env->msr_lbr_ctl = msrs[i].data; 4909 break; 4910 case MSR_ARCH_LBR_DEPTH: 4911 env->msr_lbr_depth = msrs[i].data; 4912 break; 4913 case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: 4914 env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data; 4915 break; 4916 case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: 4917 env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data; 4918 break; 4919 case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: 4920 env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; 4921 break; 4922 } 4923 } 4924 4925 return 0; 4926 } 4927 4928 static int kvm_put_mp_state(X86CPU *cpu) 4929 { 4930 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; 4931 4932 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 4933 } 4934 4935 static int kvm_get_mp_state(X86CPU *cpu) 4936 { 4937 CPUState *cs = CPU(cpu); 4938 CPUX86State *env = &cpu->env; 4939 struct kvm_mp_state mp_state; 4940 int ret; 4941 4942 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); 4943 if (ret < 0) { 4944 return ret; 4945 } 4946 env->mp_state = mp_state.mp_state; 4947 if (kvm_irqchip_in_kernel()) { 4948 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); 4949 } 4950 return 0; 4951 } 4952 4953 static int kvm_get_apic(X86CPU *cpu) 4954 { 4955 DeviceState *apic = cpu->apic_state; 4956 struct kvm_lapic_state kapic; 4957 int ret; 4958 4959 if (apic && kvm_irqchip_in_kernel()) { 4960 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); 4961 if (ret < 0) { 4962 return ret; 4963 } 4964 4965 kvm_get_apic_state(apic, &kapic); 4966 } 4967 return 0; 4968 } 4969 4970 static int kvm_put_vcpu_events(X86CPU *cpu, int level) 4971 { 4972 CPUState *cs = CPU(cpu); 4973 CPUX86State *env = &cpu->env; 4974 struct kvm_vcpu_events events = {}; 4975 4976 events.flags = 0; 4977 4978 if (has_exception_payload) { 4979 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 4980 events.exception.pending = env->exception_pending; 4981 events.exception_has_payload = env->exception_has_payload; 4982 events.exception_payload = env->exception_payload; 4983 } 4984 events.exception.nr = env->exception_nr; 4985 events.exception.injected = env->exception_injected; 4986 events.exception.has_error_code = env->has_error_code; 4987 events.exception.error_code = env->error_code; 4988 4989 events.interrupt.injected = (env->interrupt_injected >= 0); 4990 events.interrupt.nr = env->interrupt_injected; 4991 events.interrupt.soft = env->soft_interrupt; 4992 4993 events.nmi.injected = env->nmi_injected; 4994 events.nmi.pending = env->nmi_pending; 4995 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); 4996 4997 events.sipi_vector = env->sipi_vector; 4998 4999 if (has_msr_smbase) { 5000 events.flags |= KVM_VCPUEVENT_VALID_SMM; 5001 events.smi.smm = !!(env->hflags & HF_SMM_MASK); 5002 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); 5003 if (kvm_irqchip_in_kernel()) { 5004 /* As soon as these are moved to the kernel, remove them 5005 * from cs->interrupt_request. 5006 */ 5007 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; 5008 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; 5009 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); 5010 } else { 5011 /* Keep these in cs->interrupt_request. */ 5012 events.smi.pending = 0; 5013 events.smi.latched_init = 0; 5014 } 5015 } 5016 5017 if (level >= KVM_PUT_RESET_STATE) { 5018 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 5019 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 5020 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR; 5021 } 5022 } 5023 5024 if (has_triple_fault_event) { 5025 events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5026 events.triple_fault.pending = env->triple_fault_pending; 5027 } 5028 5029 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); 5030 } 5031 5032 static int kvm_get_vcpu_events(X86CPU *cpu) 5033 { 5034 CPUX86State *env = &cpu->env; 5035 struct kvm_vcpu_events events; 5036 int ret; 5037 5038 memset(&events, 0, sizeof(events)); 5039 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); 5040 if (ret < 0) { 5041 return ret; 5042 } 5043 5044 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5045 env->exception_pending = events.exception.pending; 5046 env->exception_has_payload = events.exception_has_payload; 5047 env->exception_payload = events.exception_payload; 5048 } else { 5049 env->exception_pending = 0; 5050 env->exception_has_payload = false; 5051 } 5052 env->exception_injected = events.exception.injected; 5053 env->exception_nr = 5054 (env->exception_pending || env->exception_injected) ? 5055 events.exception.nr : -1; 5056 env->has_error_code = events.exception.has_error_code; 5057 env->error_code = events.exception.error_code; 5058 5059 env->interrupt_injected = 5060 events.interrupt.injected ? events.interrupt.nr : -1; 5061 env->soft_interrupt = events.interrupt.soft; 5062 5063 env->nmi_injected = events.nmi.injected; 5064 env->nmi_pending = events.nmi.pending; 5065 if (events.nmi.masked) { 5066 env->hflags2 |= HF2_NMI_MASK; 5067 } else { 5068 env->hflags2 &= ~HF2_NMI_MASK; 5069 } 5070 5071 if (events.flags & KVM_VCPUEVENT_VALID_SMM) { 5072 if (events.smi.smm) { 5073 env->hflags |= HF_SMM_MASK; 5074 } else { 5075 env->hflags &= ~HF_SMM_MASK; 5076 } 5077 if (events.smi.pending) { 5078 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 5079 } else { 5080 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 5081 } 5082 if (events.smi.smm_inside_nmi) { 5083 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; 5084 } else { 5085 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; 5086 } 5087 if (events.smi.latched_init) { 5088 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 5089 } else { 5090 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 5091 } 5092 } 5093 5094 if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5095 env->triple_fault_pending = events.triple_fault.pending; 5096 } 5097 5098 env->sipi_vector = events.sipi_vector; 5099 5100 return 0; 5101 } 5102 5103 static int kvm_put_debugregs(X86CPU *cpu) 5104 { 5105 CPUX86State *env = &cpu->env; 5106 struct kvm_debugregs dbgregs; 5107 int i; 5108 5109 memset(&dbgregs, 0, sizeof(dbgregs)); 5110 for (i = 0; i < 4; i++) { 5111 dbgregs.db[i] = env->dr[i]; 5112 } 5113 dbgregs.dr6 = env->dr[6]; 5114 dbgregs.dr7 = env->dr[7]; 5115 dbgregs.flags = 0; 5116 5117 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); 5118 } 5119 5120 static int kvm_get_debugregs(X86CPU *cpu) 5121 { 5122 CPUX86State *env = &cpu->env; 5123 struct kvm_debugregs dbgregs; 5124 int i, ret; 5125 5126 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); 5127 if (ret < 0) { 5128 return ret; 5129 } 5130 for (i = 0; i < 4; i++) { 5131 env->dr[i] = dbgregs.db[i]; 5132 } 5133 env->dr[4] = env->dr[6] = dbgregs.dr6; 5134 env->dr[5] = env->dr[7] = dbgregs.dr7; 5135 5136 return 0; 5137 } 5138 5139 static int kvm_put_nested_state(X86CPU *cpu) 5140 { 5141 CPUX86State *env = &cpu->env; 5142 int max_nested_state_len = kvm_max_nested_state_length(); 5143 5144 if (!env->nested_state) { 5145 return 0; 5146 } 5147 5148 /* 5149 * Copy flags that are affected by reset from env->hflags and env->hflags2. 5150 */ 5151 if (env->hflags & HF_GUEST_MASK) { 5152 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE; 5153 } else { 5154 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE; 5155 } 5156 5157 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */ 5158 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) { 5159 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET; 5160 } else { 5161 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET; 5162 } 5163 5164 assert(env->nested_state->size <= max_nested_state_len); 5165 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); 5166 } 5167 5168 static int kvm_get_nested_state(X86CPU *cpu) 5169 { 5170 CPUX86State *env = &cpu->env; 5171 int max_nested_state_len = kvm_max_nested_state_length(); 5172 int ret; 5173 5174 if (!env->nested_state) { 5175 return 0; 5176 } 5177 5178 /* 5179 * It is possible that migration restored a smaller size into 5180 * nested_state->hdr.size than what our kernel support. 5181 * We preserve migration origin nested_state->hdr.size for 5182 * call to KVM_SET_NESTED_STATE but wish that our next call 5183 * to KVM_GET_NESTED_STATE will use max size our kernel support. 5184 */ 5185 env->nested_state->size = max_nested_state_len; 5186 5187 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); 5188 if (ret < 0) { 5189 return ret; 5190 } 5191 5192 /* 5193 * Copy flags that are affected by reset to env->hflags and env->hflags2. 5194 */ 5195 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { 5196 env->hflags |= HF_GUEST_MASK; 5197 } else { 5198 env->hflags &= ~HF_GUEST_MASK; 5199 } 5200 5201 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */ 5202 if (cpu_has_svm(env)) { 5203 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) { 5204 env->hflags2 |= HF2_GIF_MASK; 5205 } else { 5206 env->hflags2 &= ~HF2_GIF_MASK; 5207 } 5208 } 5209 5210 return ret; 5211 } 5212 5213 int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp) 5214 { 5215 X86CPU *x86_cpu = X86_CPU(cpu); 5216 int ret; 5217 5218 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 5219 5220 /* 5221 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX 5222 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also 5223 * precede kvm_put_nested_state() when 'real' nested state is set. 5224 */ 5225 if (level >= KVM_PUT_RESET_STATE) { 5226 ret = kvm_put_msr_feature_control(x86_cpu); 5227 if (ret < 0) { 5228 return ret; 5229 } 5230 } 5231 5232 /* must be before kvm_put_nested_state so that EFER.SVME is set */ 5233 ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); 5234 if (ret < 0) { 5235 return ret; 5236 } 5237 5238 if (level >= KVM_PUT_RESET_STATE) { 5239 ret = kvm_put_nested_state(x86_cpu); 5240 if (ret < 0) { 5241 return ret; 5242 } 5243 } 5244 5245 if (level == KVM_PUT_FULL_STATE) { 5246 /* We don't check for kvm_arch_set_tsc_khz() errors here, 5247 * because TSC frequency mismatch shouldn't abort migration, 5248 * unless the user explicitly asked for a more strict TSC 5249 * setting (e.g. using an explicit "tsc-freq" option). 5250 */ 5251 kvm_arch_set_tsc_khz(cpu); 5252 } 5253 5254 #ifdef CONFIG_XEN_EMU 5255 if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { 5256 ret = kvm_put_xen_state(cpu); 5257 if (ret < 0) { 5258 return ret; 5259 } 5260 } 5261 #endif 5262 5263 ret = kvm_getput_regs(x86_cpu, 1); 5264 if (ret < 0) { 5265 return ret; 5266 } 5267 ret = kvm_put_xsave(x86_cpu); 5268 if (ret < 0) { 5269 return ret; 5270 } 5271 ret = kvm_put_xcrs(x86_cpu); 5272 if (ret < 0) { 5273 return ret; 5274 } 5275 ret = kvm_put_msrs(x86_cpu, level); 5276 if (ret < 0) { 5277 return ret; 5278 } 5279 ret = kvm_put_vcpu_events(x86_cpu, level); 5280 if (ret < 0) { 5281 return ret; 5282 } 5283 if (level >= KVM_PUT_RESET_STATE) { 5284 ret = kvm_put_mp_state(x86_cpu); 5285 if (ret < 0) { 5286 return ret; 5287 } 5288 } 5289 5290 ret = kvm_put_tscdeadline_msr(x86_cpu); 5291 if (ret < 0) { 5292 return ret; 5293 } 5294 ret = kvm_put_debugregs(x86_cpu); 5295 if (ret < 0) { 5296 return ret; 5297 } 5298 return 0; 5299 } 5300 5301 int kvm_arch_get_registers(CPUState *cs, Error **errp) 5302 { 5303 X86CPU *cpu = X86_CPU(cs); 5304 int ret; 5305 5306 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); 5307 5308 ret = kvm_get_vcpu_events(cpu); 5309 if (ret < 0) { 5310 goto out; 5311 } 5312 /* 5313 * KVM_GET_MPSTATE can modify CS and RIP, call it before 5314 * KVM_GET_REGS and KVM_GET_SREGS. 5315 */ 5316 ret = kvm_get_mp_state(cpu); 5317 if (ret < 0) { 5318 goto out; 5319 } 5320 ret = kvm_getput_regs(cpu, 0); 5321 if (ret < 0) { 5322 goto out; 5323 } 5324 ret = kvm_get_xsave(cpu); 5325 if (ret < 0) { 5326 goto out; 5327 } 5328 ret = kvm_get_xcrs(cpu); 5329 if (ret < 0) { 5330 goto out; 5331 } 5332 ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); 5333 if (ret < 0) { 5334 goto out; 5335 } 5336 ret = kvm_get_msrs(cpu); 5337 if (ret < 0) { 5338 goto out; 5339 } 5340 ret = kvm_get_apic(cpu); 5341 if (ret < 0) { 5342 goto out; 5343 } 5344 ret = kvm_get_debugregs(cpu); 5345 if (ret < 0) { 5346 goto out; 5347 } 5348 ret = kvm_get_nested_state(cpu); 5349 if (ret < 0) { 5350 goto out; 5351 } 5352 #ifdef CONFIG_XEN_EMU 5353 if (xen_mode == XEN_EMULATE) { 5354 ret = kvm_get_xen_state(cs); 5355 if (ret < 0) { 5356 goto out; 5357 } 5358 } 5359 #endif 5360 ret = 0; 5361 out: 5362 cpu_sync_bndcs_hflags(&cpu->env); 5363 return ret; 5364 } 5365 5366 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 5367 { 5368 X86CPU *x86_cpu = X86_CPU(cpu); 5369 CPUX86State *env = &x86_cpu->env; 5370 int ret; 5371 5372 /* Inject NMI */ 5373 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { 5374 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { 5375 bql_lock(); 5376 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 5377 bql_unlock(); 5378 DPRINTF("injected NMI\n"); 5379 ret = kvm_vcpu_ioctl(cpu, KVM_NMI); 5380 if (ret < 0) { 5381 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", 5382 strerror(-ret)); 5383 } 5384 } 5385 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 5386 bql_lock(); 5387 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 5388 bql_unlock(); 5389 DPRINTF("injected SMI\n"); 5390 ret = kvm_vcpu_ioctl(cpu, KVM_SMI); 5391 if (ret < 0) { 5392 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", 5393 strerror(-ret)); 5394 } 5395 } 5396 } 5397 5398 if (!kvm_pic_in_kernel()) { 5399 bql_lock(); 5400 } 5401 5402 /* Force the VCPU out of its inner loop to process any INIT requests 5403 * or (for userspace APIC, but it is cheap to combine the checks here) 5404 * pending TPR access reports. 5405 */ 5406 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 5407 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 5408 !(env->hflags & HF_SMM_MASK)) { 5409 cpu->exit_request = 1; 5410 } 5411 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 5412 cpu->exit_request = 1; 5413 } 5414 } 5415 5416 if (!kvm_pic_in_kernel()) { 5417 /* Try to inject an interrupt if the guest can accept it */ 5418 if (run->ready_for_interrupt_injection && 5419 (cpu->interrupt_request & CPU_INTERRUPT_HARD) && 5420 (env->eflags & IF_MASK)) { 5421 int irq; 5422 5423 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 5424 irq = cpu_get_pic_interrupt(env); 5425 if (irq >= 0) { 5426 struct kvm_interrupt intr; 5427 5428 intr.irq = irq; 5429 DPRINTF("injected interrupt %d\n", irq); 5430 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); 5431 if (ret < 0) { 5432 fprintf(stderr, 5433 "KVM: injection failed, interrupt lost (%s)\n", 5434 strerror(-ret)); 5435 } 5436 } 5437 } 5438 5439 /* If we have an interrupt but the guest is not ready to receive an 5440 * interrupt, request an interrupt window exit. This will 5441 * cause a return to userspace as soon as the guest is ready to 5442 * receive interrupts. */ 5443 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { 5444 run->request_interrupt_window = 1; 5445 } else { 5446 run->request_interrupt_window = 0; 5447 } 5448 5449 DPRINTF("setting tpr\n"); 5450 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); 5451 5452 bql_unlock(); 5453 } 5454 } 5455 5456 static void kvm_rate_limit_on_bus_lock(void) 5457 { 5458 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1); 5459 5460 if (delay_ns) { 5461 g_usleep(delay_ns / SCALE_US); 5462 } 5463 } 5464 5465 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) 5466 { 5467 X86CPU *x86_cpu = X86_CPU(cpu); 5468 CPUX86State *env = &x86_cpu->env; 5469 5470 if (run->flags & KVM_RUN_X86_SMM) { 5471 env->hflags |= HF_SMM_MASK; 5472 } else { 5473 env->hflags &= ~HF_SMM_MASK; 5474 } 5475 if (run->if_flag) { 5476 env->eflags |= IF_MASK; 5477 } else { 5478 env->eflags &= ~IF_MASK; 5479 } 5480 if (run->flags & KVM_RUN_X86_BUS_LOCK) { 5481 kvm_rate_limit_on_bus_lock(); 5482 } 5483 5484 #ifdef CONFIG_XEN_EMU 5485 /* 5486 * If the callback is asserted as a GSI (or PCI INTx) then check if 5487 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert 5488 * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC 5489 * EOI and only resample then, exactly how the VFIO eventfd pairs 5490 * are designed to work for level triggered interrupts. 5491 */ 5492 if (x86_cpu->env.xen_callback_asserted) { 5493 kvm_xen_maybe_deassert_callback(cpu); 5494 } 5495 #endif 5496 5497 /* We need to protect the apic state against concurrent accesses from 5498 * different threads in case the userspace irqchip is used. */ 5499 if (!kvm_irqchip_in_kernel()) { 5500 bql_lock(); 5501 } 5502 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); 5503 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); 5504 if (!kvm_irqchip_in_kernel()) { 5505 bql_unlock(); 5506 } 5507 return cpu_get_mem_attrs(env); 5508 } 5509 5510 int kvm_arch_process_async_events(CPUState *cs) 5511 { 5512 X86CPU *cpu = X86_CPU(cs); 5513 CPUX86State *env = &cpu->env; 5514 5515 if (cs->interrupt_request & CPU_INTERRUPT_MCE) { 5516 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ 5517 assert(env->mcg_cap); 5518 5519 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 5520 5521 kvm_cpu_synchronize_state(cs); 5522 5523 if (env->exception_nr == EXCP08_DBLE) { 5524 /* this means triple fault */ 5525 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 5526 cs->exit_request = 1; 5527 return 0; 5528 } 5529 kvm_queue_exception(env, EXCP12_MCHK, 0, 0); 5530 env->has_error_code = 0; 5531 5532 cs->halted = 0; 5533 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { 5534 env->mp_state = KVM_MP_STATE_RUNNABLE; 5535 } 5536 } 5537 5538 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && 5539 !(env->hflags & HF_SMM_MASK)) { 5540 kvm_cpu_synchronize_state(cs); 5541 do_cpu_init(cpu); 5542 } 5543 5544 if (kvm_irqchip_in_kernel()) { 5545 return 0; 5546 } 5547 5548 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { 5549 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 5550 apic_poll_irq(cpu->apic_state); 5551 } 5552 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && 5553 (env->eflags & IF_MASK)) || 5554 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { 5555 cs->halted = 0; 5556 } 5557 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { 5558 kvm_cpu_synchronize_state(cs); 5559 do_cpu_sipi(cpu); 5560 } 5561 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { 5562 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; 5563 kvm_cpu_synchronize_state(cs); 5564 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 5565 env->tpr_access_type); 5566 } 5567 5568 return cs->halted; 5569 } 5570 5571 static int kvm_handle_halt(X86CPU *cpu) 5572 { 5573 CPUState *cs = CPU(cpu); 5574 CPUX86State *env = &cpu->env; 5575 5576 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && 5577 (env->eflags & IF_MASK)) && 5578 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { 5579 cs->halted = 1; 5580 return EXCP_HLT; 5581 } 5582 5583 return 0; 5584 } 5585 5586 static int kvm_handle_tpr_access(X86CPU *cpu) 5587 { 5588 CPUState *cs = CPU(cpu); 5589 struct kvm_run *run = cs->kvm_run; 5590 5591 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, 5592 run->tpr_access.is_write ? TPR_ACCESS_WRITE 5593 : TPR_ACCESS_READ); 5594 return 1; 5595 } 5596 5597 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 5598 { 5599 static const uint8_t int3 = 0xcc; 5600 5601 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || 5602 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { 5603 return -EINVAL; 5604 } 5605 return 0; 5606 } 5607 5608 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 5609 { 5610 uint8_t int3; 5611 5612 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { 5613 return -EINVAL; 5614 } 5615 if (int3 != 0xcc) { 5616 return 0; 5617 } 5618 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { 5619 return -EINVAL; 5620 } 5621 return 0; 5622 } 5623 5624 static struct { 5625 target_ulong addr; 5626 int len; 5627 int type; 5628 } hw_breakpoint[4]; 5629 5630 static int nb_hw_breakpoint; 5631 5632 static int find_hw_breakpoint(target_ulong addr, int len, int type) 5633 { 5634 int n; 5635 5636 for (n = 0; n < nb_hw_breakpoint; n++) { 5637 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && 5638 (hw_breakpoint[n].len == len || len == -1)) { 5639 return n; 5640 } 5641 } 5642 return -1; 5643 } 5644 5645 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 5646 { 5647 switch (type) { 5648 case GDB_BREAKPOINT_HW: 5649 len = 1; 5650 break; 5651 case GDB_WATCHPOINT_WRITE: 5652 case GDB_WATCHPOINT_ACCESS: 5653 switch (len) { 5654 case 1: 5655 break; 5656 case 2: 5657 case 4: 5658 case 8: 5659 if (addr & (len - 1)) { 5660 return -EINVAL; 5661 } 5662 break; 5663 default: 5664 return -EINVAL; 5665 } 5666 break; 5667 default: 5668 return -ENOSYS; 5669 } 5670 5671 if (nb_hw_breakpoint == 4) { 5672 return -ENOBUFS; 5673 } 5674 if (find_hw_breakpoint(addr, len, type) >= 0) { 5675 return -EEXIST; 5676 } 5677 hw_breakpoint[nb_hw_breakpoint].addr = addr; 5678 hw_breakpoint[nb_hw_breakpoint].len = len; 5679 hw_breakpoint[nb_hw_breakpoint].type = type; 5680 nb_hw_breakpoint++; 5681 5682 return 0; 5683 } 5684 5685 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 5686 { 5687 int n; 5688 5689 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); 5690 if (n < 0) { 5691 return -ENOENT; 5692 } 5693 nb_hw_breakpoint--; 5694 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; 5695 5696 return 0; 5697 } 5698 5699 void kvm_arch_remove_all_hw_breakpoints(void) 5700 { 5701 nb_hw_breakpoint = 0; 5702 } 5703 5704 static CPUWatchpoint hw_watchpoint; 5705 5706 static int kvm_handle_debug(X86CPU *cpu, 5707 struct kvm_debug_exit_arch *arch_info) 5708 { 5709 CPUState *cs = CPU(cpu); 5710 CPUX86State *env = &cpu->env; 5711 int ret = 0; 5712 int n; 5713 5714 if (arch_info->exception == EXCP01_DB) { 5715 if (arch_info->dr6 & DR6_BS) { 5716 if (cs->singlestep_enabled) { 5717 ret = EXCP_DEBUG; 5718 } 5719 } else { 5720 for (n = 0; n < 4; n++) { 5721 if (arch_info->dr6 & (1 << n)) { 5722 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { 5723 case 0x0: 5724 ret = EXCP_DEBUG; 5725 break; 5726 case 0x1: 5727 ret = EXCP_DEBUG; 5728 cs->watchpoint_hit = &hw_watchpoint; 5729 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 5730 hw_watchpoint.flags = BP_MEM_WRITE; 5731 break; 5732 case 0x3: 5733 ret = EXCP_DEBUG; 5734 cs->watchpoint_hit = &hw_watchpoint; 5735 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 5736 hw_watchpoint.flags = BP_MEM_ACCESS; 5737 break; 5738 } 5739 } 5740 } 5741 } 5742 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { 5743 ret = EXCP_DEBUG; 5744 } 5745 if (ret == 0) { 5746 cpu_synchronize_state(cs); 5747 assert(env->exception_nr == -1); 5748 5749 /* pass to guest */ 5750 kvm_queue_exception(env, arch_info->exception, 5751 arch_info->exception == EXCP01_DB, 5752 arch_info->dr6); 5753 env->has_error_code = 0; 5754 } 5755 5756 return ret; 5757 } 5758 5759 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 5760 { 5761 const uint8_t type_code[] = { 5762 [GDB_BREAKPOINT_HW] = 0x0, 5763 [GDB_WATCHPOINT_WRITE] = 0x1, 5764 [GDB_WATCHPOINT_ACCESS] = 0x3 5765 }; 5766 const uint8_t len_code[] = { 5767 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 5768 }; 5769 int n; 5770 5771 if (kvm_sw_breakpoints_active(cpu)) { 5772 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 5773 } 5774 if (nb_hw_breakpoint > 0) { 5775 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 5776 dbg->arch.debugreg[7] = 0x0600; 5777 for (n = 0; n < nb_hw_breakpoint; n++) { 5778 dbg->arch.debugreg[n] = hw_breakpoint[n].addr; 5779 dbg->arch.debugreg[7] |= (2 << (n * 2)) | 5780 (type_code[hw_breakpoint[n].type] << (16 + n*4)) | 5781 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); 5782 } 5783 } 5784 } 5785 5786 static bool kvm_install_msr_filters(KVMState *s) 5787 { 5788 uint64_t zero = 0; 5789 struct kvm_msr_filter filter = { 5790 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW, 5791 }; 5792 int r, i, j = 0; 5793 5794 for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) { 5795 KVMMSRHandlers *handler = &msr_handlers[i]; 5796 if (handler->msr) { 5797 struct kvm_msr_filter_range *range = &filter.ranges[j++]; 5798 5799 *range = (struct kvm_msr_filter_range) { 5800 .flags = 0, 5801 .nmsrs = 1, 5802 .base = handler->msr, 5803 .bitmap = (__u8 *)&zero, 5804 }; 5805 5806 if (handler->rdmsr) { 5807 range->flags |= KVM_MSR_FILTER_READ; 5808 } 5809 5810 if (handler->wrmsr) { 5811 range->flags |= KVM_MSR_FILTER_WRITE; 5812 } 5813 } 5814 } 5815 5816 r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter); 5817 if (r) { 5818 return false; 5819 } 5820 5821 return true; 5822 } 5823 5824 static bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, 5825 QEMUWRMSRHandler *wrmsr) 5826 { 5827 int i; 5828 5829 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5830 if (!msr_handlers[i].msr) { 5831 msr_handlers[i] = (KVMMSRHandlers) { 5832 .msr = msr, 5833 .rdmsr = rdmsr, 5834 .wrmsr = wrmsr, 5835 }; 5836 5837 if (!kvm_install_msr_filters(s)) { 5838 msr_handlers[i] = (KVMMSRHandlers) { }; 5839 return false; 5840 } 5841 5842 return true; 5843 } 5844 } 5845 5846 return false; 5847 } 5848 5849 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run) 5850 { 5851 int i; 5852 bool r; 5853 5854 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5855 KVMMSRHandlers *handler = &msr_handlers[i]; 5856 if (run->msr.index == handler->msr) { 5857 if (handler->rdmsr) { 5858 r = handler->rdmsr(cpu, handler->msr, 5859 (uint64_t *)&run->msr.data); 5860 run->msr.error = r ? 0 : 1; 5861 return 0; 5862 } 5863 } 5864 } 5865 5866 g_assert_not_reached(); 5867 } 5868 5869 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run) 5870 { 5871 int i; 5872 bool r; 5873 5874 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5875 KVMMSRHandlers *handler = &msr_handlers[i]; 5876 if (run->msr.index == handler->msr) { 5877 if (handler->wrmsr) { 5878 r = handler->wrmsr(cpu, handler->msr, run->msr.data); 5879 run->msr.error = r ? 0 : 1; 5880 return 0; 5881 } 5882 } 5883 } 5884 5885 g_assert_not_reached(); 5886 } 5887 5888 static bool has_sgx_provisioning; 5889 5890 static bool __kvm_enable_sgx_provisioning(KVMState *s) 5891 { 5892 int fd, ret; 5893 5894 if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) { 5895 return false; 5896 } 5897 5898 fd = qemu_open_old("/dev/sgx_provision", O_RDONLY); 5899 if (fd < 0) { 5900 return false; 5901 } 5902 5903 ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd); 5904 if (ret) { 5905 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); 5906 exit(1); 5907 } 5908 close(fd); 5909 return true; 5910 } 5911 5912 bool kvm_enable_sgx_provisioning(KVMState *s) 5913 { 5914 return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning); 5915 } 5916 5917 static bool host_supports_vmx(void) 5918 { 5919 uint32_t ecx, unused; 5920 5921 host_cpuid(1, 0, &unused, &unused, &ecx, &unused); 5922 return ecx & CPUID_EXT_VMX; 5923 } 5924 5925 /* 5926 * Currently the handling here only supports use of KVM_HC_MAP_GPA_RANGE 5927 * to service guest-initiated memory attribute update requests so that 5928 * KVM_SET_MEMORY_ATTRIBUTES can update whether or not a page should be 5929 * backed by the private memory pool provided by guest_memfd, and as such 5930 * is only applicable to guest_memfd-backed guests (e.g. SNP/TDX). 5931 * 5932 * Other other use-cases for KVM_HC_MAP_GPA_RANGE, such as for SEV live 5933 * migration, are not implemented here currently. 5934 * 5935 * For the guest_memfd use-case, these exits will generally be synthesized 5936 * by KVM based on platform-specific hypercalls, like GHCB requests in the 5937 * case of SEV-SNP, and not issued directly within the guest though the 5938 * KVM_HC_MAP_GPA_RANGE hypercall. So in this case, KVM_HC_MAP_GPA_RANGE is 5939 * not actually advertised to guests via the KVM CPUID feature bit, as 5940 * opposed to SEV live migration where it would be. Since it is unlikely the 5941 * SEV live migration use-case would be useful for guest-memfd backed guests, 5942 * because private/shared page tracking is already provided through other 5943 * means, these 2 use-cases should be treated as being mutually-exclusive. 5944 */ 5945 static int kvm_handle_hc_map_gpa_range(struct kvm_run *run) 5946 { 5947 uint64_t gpa, size, attributes; 5948 5949 if (!machine_require_guest_memfd(current_machine)) 5950 return -EINVAL; 5951 5952 gpa = run->hypercall.args[0]; 5953 size = run->hypercall.args[1] * TARGET_PAGE_SIZE; 5954 attributes = run->hypercall.args[2]; 5955 5956 trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags); 5957 5958 return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED); 5959 } 5960 5961 static int kvm_handle_hypercall(struct kvm_run *run) 5962 { 5963 if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE) 5964 return kvm_handle_hc_map_gpa_range(run); 5965 5966 return -EINVAL; 5967 } 5968 5969 #define VMX_INVALID_GUEST_STATE 0x80000021 5970 5971 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 5972 { 5973 X86CPU *cpu = X86_CPU(cs); 5974 uint64_t code; 5975 int ret; 5976 bool ctx_invalid; 5977 KVMState *state; 5978 5979 switch (run->exit_reason) { 5980 case KVM_EXIT_HLT: 5981 DPRINTF("handle_hlt\n"); 5982 bql_lock(); 5983 ret = kvm_handle_halt(cpu); 5984 bql_unlock(); 5985 break; 5986 case KVM_EXIT_SET_TPR: 5987 ret = 0; 5988 break; 5989 case KVM_EXIT_TPR_ACCESS: 5990 bql_lock(); 5991 ret = kvm_handle_tpr_access(cpu); 5992 bql_unlock(); 5993 break; 5994 case KVM_EXIT_FAIL_ENTRY: 5995 code = run->fail_entry.hardware_entry_failure_reason; 5996 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", 5997 code); 5998 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { 5999 fprintf(stderr, 6000 "\nIf you're running a guest on an Intel machine without " 6001 "unrestricted mode\n" 6002 "support, the failure can be most likely due to the guest " 6003 "entering an invalid\n" 6004 "state for Intel VT. For example, the guest maybe running " 6005 "in big real mode\n" 6006 "which is not supported on less recent Intel processors." 6007 "\n\n"); 6008 } 6009 ret = -1; 6010 break; 6011 case KVM_EXIT_EXCEPTION: 6012 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n", 6013 run->ex.exception, run->ex.error_code); 6014 ret = -1; 6015 break; 6016 case KVM_EXIT_DEBUG: 6017 DPRINTF("kvm_exit_debug\n"); 6018 bql_lock(); 6019 ret = kvm_handle_debug(cpu, &run->debug.arch); 6020 bql_unlock(); 6021 break; 6022 case KVM_EXIT_HYPERV: 6023 ret = kvm_hv_handle_exit(cpu, &run->hyperv); 6024 break; 6025 case KVM_EXIT_IOAPIC_EOI: 6026 ioapic_eoi_broadcast(run->eoi.vector); 6027 ret = 0; 6028 break; 6029 case KVM_EXIT_X86_BUS_LOCK: 6030 /* already handled in kvm_arch_post_run */ 6031 ret = 0; 6032 break; 6033 case KVM_EXIT_NOTIFY: 6034 ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID); 6035 state = KVM_STATE(current_accel()); 6036 if (ctx_invalid || 6037 state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) { 6038 warn_report("KVM internal error: Encountered a notify exit " 6039 "with invalid context in guest."); 6040 ret = -1; 6041 } else { 6042 warn_report_once("KVM: Encountered a notify exit with valid " 6043 "context in guest. " 6044 "The guest could be misbehaving."); 6045 ret = 0; 6046 } 6047 break; 6048 case KVM_EXIT_X86_RDMSR: 6049 /* We only enable MSR filtering, any other exit is bogus */ 6050 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); 6051 ret = kvm_handle_rdmsr(cpu, run); 6052 break; 6053 case KVM_EXIT_X86_WRMSR: 6054 /* We only enable MSR filtering, any other exit is bogus */ 6055 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); 6056 ret = kvm_handle_wrmsr(cpu, run); 6057 break; 6058 #ifdef CONFIG_XEN_EMU 6059 case KVM_EXIT_XEN: 6060 ret = kvm_xen_handle_exit(cpu, &run->xen); 6061 break; 6062 #endif 6063 case KVM_EXIT_HYPERCALL: 6064 ret = kvm_handle_hypercall(run); 6065 break; 6066 default: 6067 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 6068 ret = -1; 6069 break; 6070 } 6071 6072 return ret; 6073 } 6074 6075 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 6076 { 6077 X86CPU *cpu = X86_CPU(cs); 6078 CPUX86State *env = &cpu->env; 6079 6080 kvm_cpu_synchronize_state(cs); 6081 return !(env->cr[0] & CR0_PE_MASK) || 6082 ((env->segs[R_CS].selector & 3) != 3); 6083 } 6084 6085 void kvm_arch_init_irq_routing(KVMState *s) 6086 { 6087 /* We know at this point that we're using the in-kernel 6088 * irqchip, so we can use irqfds, and on x86 we know 6089 * we can use msi via irqfd and GSI routing. 6090 */ 6091 kvm_msi_via_irqfd_allowed = true; 6092 kvm_gsi_routing_allowed = true; 6093 6094 if (kvm_irqchip_is_split()) { 6095 KVMRouteChange c = kvm_irqchip_begin_route_changes(s); 6096 int i; 6097 6098 /* If the ioapic is in QEMU and the lapics are in KVM, reserve 6099 MSI routes for signaling interrupts to the local apics. */ 6100 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 6101 if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) { 6102 error_report("Could not enable split IRQ mode."); 6103 exit(1); 6104 } 6105 } 6106 kvm_irqchip_commit_route_changes(&c); 6107 } 6108 } 6109 6110 int kvm_arch_irqchip_create(KVMState *s) 6111 { 6112 int ret; 6113 if (kvm_kernel_irqchip_split()) { 6114 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); 6115 if (ret) { 6116 error_report("Could not enable split irqchip mode: %s", 6117 strerror(-ret)); 6118 exit(1); 6119 } else { 6120 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); 6121 kvm_split_irqchip = true; 6122 return 1; 6123 } 6124 } else { 6125 return 0; 6126 } 6127 } 6128 6129 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address) 6130 { 6131 CPUX86State *env; 6132 uint64_t ext_id; 6133 6134 if (!first_cpu) { 6135 return address; 6136 } 6137 env = &X86_CPU(first_cpu)->env; 6138 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) { 6139 return address; 6140 } 6141 6142 /* 6143 * If the remappable format bit is set, or the upper bits are 6144 * already set in address_hi, or the low extended bits aren't 6145 * there anyway, do nothing. 6146 */ 6147 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT); 6148 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) { 6149 return address; 6150 } 6151 6152 address &= ~ext_id; 6153 address |= ext_id << 35; 6154 return address; 6155 } 6156 6157 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 6158 uint64_t address, uint32_t data, PCIDevice *dev) 6159 { 6160 X86IOMMUState *iommu = x86_iommu_get_default(); 6161 6162 if (iommu) { 6163 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu); 6164 6165 if (class->int_remap) { 6166 int ret; 6167 MSIMessage src, dst; 6168 6169 src.address = route->u.msi.address_hi; 6170 src.address <<= VTD_MSI_ADDR_HI_SHIFT; 6171 src.address |= route->u.msi.address_lo; 6172 src.data = route->u.msi.data; 6173 6174 ret = class->int_remap(iommu, &src, &dst, dev ? \ 6175 pci_requester_id(dev) : \ 6176 X86_IOMMU_SID_INVALID); 6177 if (ret) { 6178 trace_kvm_x86_fixup_msi_error(route->gsi); 6179 return 1; 6180 } 6181 6182 /* 6183 * Handled untranslated compatibility format interrupt with 6184 * extended destination ID in the low bits 11-5. */ 6185 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address); 6186 6187 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; 6188 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; 6189 route->u.msi.data = dst.data; 6190 return 0; 6191 } 6192 } 6193 6194 #ifdef CONFIG_XEN_EMU 6195 if (xen_mode == XEN_EMULATE) { 6196 int handled = xen_evtchn_translate_pirq_msi(route, address, data); 6197 6198 /* 6199 * If it was a PIRQ and successfully routed (handled == 0) or it was 6200 * an error (handled < 0), return. If it wasn't a PIRQ, keep going. 6201 */ 6202 if (handled <= 0) { 6203 return handled; 6204 } 6205 } 6206 #endif 6207 6208 address = kvm_swizzle_msi_ext_dest_id(address); 6209 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; 6210 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; 6211 return 0; 6212 } 6213 6214 typedef struct MSIRouteEntry MSIRouteEntry; 6215 6216 struct MSIRouteEntry { 6217 PCIDevice *dev; /* Device pointer */ 6218 int vector; /* MSI/MSIX vector index */ 6219 int virq; /* Virtual IRQ index */ 6220 QLIST_ENTRY(MSIRouteEntry) list; 6221 }; 6222 6223 /* List of used GSI routes */ 6224 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ 6225 QLIST_HEAD_INITIALIZER(msi_route_list); 6226 6227 void kvm_update_msi_routes_all(void *private, bool global, 6228 uint32_t index, uint32_t mask) 6229 { 6230 int cnt = 0, vector; 6231 MSIRouteEntry *entry; 6232 MSIMessage msg; 6233 PCIDevice *dev; 6234 6235 /* TODO: explicit route update */ 6236 QLIST_FOREACH(entry, &msi_route_list, list) { 6237 cnt++; 6238 vector = entry->vector; 6239 dev = entry->dev; 6240 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) { 6241 msg = msix_get_message(dev, vector); 6242 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) { 6243 msg = msi_get_message(dev, vector); 6244 } else { 6245 /* 6246 * Either MSI/MSIX is disabled for the device, or the 6247 * specific message was masked out. Skip this one. 6248 */ 6249 continue; 6250 } 6251 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); 6252 } 6253 kvm_irqchip_commit_routes(kvm_state); 6254 trace_kvm_x86_update_msi_routes(cnt); 6255 } 6256 6257 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 6258 int vector, PCIDevice *dev) 6259 { 6260 static bool notify_list_inited = false; 6261 MSIRouteEntry *entry; 6262 6263 if (!dev) { 6264 /* These are (possibly) IOAPIC routes only used for split 6265 * kernel irqchip mode, while what we are housekeeping are 6266 * PCI devices only. */ 6267 return 0; 6268 } 6269 6270 entry = g_new0(MSIRouteEntry, 1); 6271 entry->dev = dev; 6272 entry->vector = vector; 6273 entry->virq = route->gsi; 6274 QLIST_INSERT_HEAD(&msi_route_list, entry, list); 6275 6276 trace_kvm_x86_add_msi_route(route->gsi); 6277 6278 if (!notify_list_inited) { 6279 /* For the first time we do add route, add ourselves into 6280 * IOMMU's IEC notify list if needed. */ 6281 X86IOMMUState *iommu = x86_iommu_get_default(); 6282 if (iommu) { 6283 x86_iommu_iec_register_notifier(iommu, 6284 kvm_update_msi_routes_all, 6285 NULL); 6286 } 6287 notify_list_inited = true; 6288 } 6289 return 0; 6290 } 6291 6292 int kvm_arch_release_virq_post(int virq) 6293 { 6294 MSIRouteEntry *entry, *next; 6295 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { 6296 if (entry->virq == virq) { 6297 trace_kvm_x86_remove_msi_route(virq); 6298 QLIST_REMOVE(entry, list); 6299 g_free(entry); 6300 break; 6301 } 6302 } 6303 return 0; 6304 } 6305 6306 int kvm_arch_msi_data_to_gsi(uint32_t data) 6307 { 6308 abort(); 6309 } 6310 6311 bool kvm_has_waitpkg(void) 6312 { 6313 return has_msr_umwait; 6314 } 6315 6316 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 6317 6318 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) 6319 { 6320 KVMState *s = kvm_state; 6321 uint64_t supported; 6322 6323 mask &= XSTATE_DYNAMIC_MASK; 6324 if (!mask) { 6325 return; 6326 } 6327 /* 6328 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. 6329 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned 6330 * about them already because they are not supported features. 6331 */ 6332 supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); 6333 supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; 6334 mask &= supported; 6335 6336 while (mask) { 6337 int bit = ctz64(mask); 6338 int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); 6339 if (rc) { 6340 /* 6341 * Older kernel version (<5.17) do not support 6342 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return 6343 * any dynamic feature from kvm_arch_get_supported_cpuid. 6344 */ 6345 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " 6346 "for feature bit %d", bit); 6347 } 6348 mask &= ~BIT_ULL(bit); 6349 } 6350 } 6351 6352 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp) 6353 { 6354 KVMState *s = KVM_STATE(obj); 6355 return s->notify_vmexit; 6356 } 6357 6358 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp) 6359 { 6360 KVMState *s = KVM_STATE(obj); 6361 6362 if (s->fd != -1) { 6363 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 6364 return; 6365 } 6366 6367 s->notify_vmexit = value; 6368 } 6369 6370 static void kvm_arch_get_notify_window(Object *obj, Visitor *v, 6371 const char *name, void *opaque, 6372 Error **errp) 6373 { 6374 KVMState *s = KVM_STATE(obj); 6375 uint32_t value = s->notify_window; 6376 6377 visit_type_uint32(v, name, &value, errp); 6378 } 6379 6380 static void kvm_arch_set_notify_window(Object *obj, Visitor *v, 6381 const char *name, void *opaque, 6382 Error **errp) 6383 { 6384 KVMState *s = KVM_STATE(obj); 6385 uint32_t value; 6386 6387 if (s->fd != -1) { 6388 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 6389 return; 6390 } 6391 6392 if (!visit_type_uint32(v, name, &value, errp)) { 6393 return; 6394 } 6395 6396 s->notify_window = value; 6397 } 6398 6399 static void kvm_arch_get_xen_version(Object *obj, Visitor *v, 6400 const char *name, void *opaque, 6401 Error **errp) 6402 { 6403 KVMState *s = KVM_STATE(obj); 6404 uint32_t value = s->xen_version; 6405 6406 visit_type_uint32(v, name, &value, errp); 6407 } 6408 6409 static void kvm_arch_set_xen_version(Object *obj, Visitor *v, 6410 const char *name, void *opaque, 6411 Error **errp) 6412 { 6413 KVMState *s = KVM_STATE(obj); 6414 Error *error = NULL; 6415 uint32_t value; 6416 6417 visit_type_uint32(v, name, &value, &error); 6418 if (error) { 6419 error_propagate(errp, error); 6420 return; 6421 } 6422 6423 s->xen_version = value; 6424 if (value && xen_mode == XEN_DISABLED) { 6425 xen_mode = XEN_EMULATE; 6426 } 6427 } 6428 6429 static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v, 6430 const char *name, void *opaque, 6431 Error **errp) 6432 { 6433 KVMState *s = KVM_STATE(obj); 6434 uint16_t value = s->xen_gnttab_max_frames; 6435 6436 visit_type_uint16(v, name, &value, errp); 6437 } 6438 6439 static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v, 6440 const char *name, void *opaque, 6441 Error **errp) 6442 { 6443 KVMState *s = KVM_STATE(obj); 6444 Error *error = NULL; 6445 uint16_t value; 6446 6447 visit_type_uint16(v, name, &value, &error); 6448 if (error) { 6449 error_propagate(errp, error); 6450 return; 6451 } 6452 6453 s->xen_gnttab_max_frames = value; 6454 } 6455 6456 static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v, 6457 const char *name, void *opaque, 6458 Error **errp) 6459 { 6460 KVMState *s = KVM_STATE(obj); 6461 uint16_t value = s->xen_evtchn_max_pirq; 6462 6463 visit_type_uint16(v, name, &value, errp); 6464 } 6465 6466 static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v, 6467 const char *name, void *opaque, 6468 Error **errp) 6469 { 6470 KVMState *s = KVM_STATE(obj); 6471 Error *error = NULL; 6472 uint16_t value; 6473 6474 visit_type_uint16(v, name, &value, &error); 6475 if (error) { 6476 error_propagate(errp, error); 6477 return; 6478 } 6479 6480 s->xen_evtchn_max_pirq = value; 6481 } 6482 6483 void kvm_arch_accel_class_init(ObjectClass *oc) 6484 { 6485 object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", 6486 &NotifyVmexitOption_lookup, 6487 kvm_arch_get_notify_vmexit, 6488 kvm_arch_set_notify_vmexit); 6489 object_class_property_set_description(oc, "notify-vmexit", 6490 "Enable notify VM exit"); 6491 6492 object_class_property_add(oc, "notify-window", "uint32", 6493 kvm_arch_get_notify_window, 6494 kvm_arch_set_notify_window, 6495 NULL, NULL); 6496 object_class_property_set_description(oc, "notify-window", 6497 "Clock cycles without an event window " 6498 "after which a notification VM exit occurs"); 6499 6500 object_class_property_add(oc, "xen-version", "uint32", 6501 kvm_arch_get_xen_version, 6502 kvm_arch_set_xen_version, 6503 NULL, NULL); 6504 object_class_property_set_description(oc, "xen-version", 6505 "Xen version to be emulated " 6506 "(in XENVER_version form " 6507 "e.g. 0x4000a for 4.10)"); 6508 6509 object_class_property_add(oc, "xen-gnttab-max-frames", "uint16", 6510 kvm_arch_get_xen_gnttab_max_frames, 6511 kvm_arch_set_xen_gnttab_max_frames, 6512 NULL, NULL); 6513 object_class_property_set_description(oc, "xen-gnttab-max-frames", 6514 "Maximum number of grant table frames"); 6515 6516 object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16", 6517 kvm_arch_get_xen_evtchn_max_pirq, 6518 kvm_arch_set_xen_evtchn_max_pirq, 6519 NULL, NULL); 6520 object_class_property_set_description(oc, "xen-evtchn-max-pirq", 6521 "Maximum number of Xen PIRQs"); 6522 } 6523 6524 void kvm_set_max_apic_id(uint32_t max_apic_id) 6525 { 6526 kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id); 6527 } 6528