1 /* 2 * QEMU KVM support 3 * 4 * Copyright (C) 2006-2008 Qumranet Technologies 5 * Copyright IBM, Corp. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/qapi-events-run-state.h" 17 #include "qapi/error.h" 18 #include "qapi/visitor.h" 19 #include <sys/ioctl.h> 20 #include <sys/utsname.h> 21 #include <sys/syscall.h> 22 23 #include <linux/kvm.h> 24 #include "standard-headers/asm-x86/kvm_para.h" 25 #include "hw/xen/interface/arch-x86/cpuid.h" 26 27 #include "cpu.h" 28 #include "host-cpu.h" 29 #include "sysemu/sysemu.h" 30 #include "sysemu/hw_accel.h" 31 #include "sysemu/kvm_int.h" 32 #include "sysemu/runstate.h" 33 #include "kvm_i386.h" 34 #include "../confidential-guest.h" 35 #include "sev.h" 36 #include "xen-emu.h" 37 #include "hyperv.h" 38 #include "hyperv-proto.h" 39 40 #include "exec/gdbstub.h" 41 #include "qemu/host-utils.h" 42 #include "qemu/main-loop.h" 43 #include "qemu/ratelimit.h" 44 #include "qemu/config-file.h" 45 #include "qemu/error-report.h" 46 #include "qemu/memalign.h" 47 #include "hw/i386/x86.h" 48 #include "hw/i386/kvm/xen_evtchn.h" 49 #include "hw/i386/pc.h" 50 #include "hw/i386/apic.h" 51 #include "hw/i386/apic_internal.h" 52 #include "hw/i386/apic-msidef.h" 53 #include "hw/i386/intel_iommu.h" 54 #include "hw/i386/topology.h" 55 #include "hw/i386/x86-iommu.h" 56 #include "hw/i386/e820_memory_layout.h" 57 58 #include "hw/xen/xen.h" 59 60 #include "hw/pci/pci.h" 61 #include "hw/pci/msi.h" 62 #include "hw/pci/msix.h" 63 #include "migration/blocker.h" 64 #include "exec/memattrs.h" 65 #include "trace.h" 66 67 #include CONFIG_DEVICES 68 69 //#define DEBUG_KVM 70 71 #ifdef DEBUG_KVM 72 #define DPRINTF(fmt, ...) \ 73 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 74 #else 75 #define DPRINTF(fmt, ...) \ 76 do { } while (0) 77 #endif 78 79 /* From arch/x86/kvm/lapic.h */ 80 #define KVM_APIC_BUS_CYCLE_NS 1 81 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) 82 83 #define MSR_KVM_WALL_CLOCK 0x11 84 #define MSR_KVM_SYSTEM_TIME 0x12 85 86 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus 87 * 255 kvm_msr_entry structs */ 88 #define MSR_BUF_SIZE 4096 89 90 static void kvm_init_msrs(X86CPU *cpu); 91 92 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 93 KVM_CAP_INFO(SET_TSS_ADDR), 94 KVM_CAP_INFO(EXT_CPUID), 95 KVM_CAP_INFO(MP_STATE), 96 KVM_CAP_INFO(SIGNAL_MSI), 97 KVM_CAP_INFO(IRQ_ROUTING), 98 KVM_CAP_INFO(DEBUGREGS), 99 KVM_CAP_INFO(XSAVE), 100 KVM_CAP_INFO(VCPU_EVENTS), 101 KVM_CAP_INFO(X86_ROBUST_SINGLESTEP), 102 KVM_CAP_INFO(MCE), 103 KVM_CAP_INFO(ADJUST_CLOCK), 104 KVM_CAP_INFO(SET_IDENTITY_MAP_ADDR), 105 KVM_CAP_LAST_INFO 106 }; 107 108 static bool has_msr_star; 109 static bool has_msr_hsave_pa; 110 static bool has_msr_tsc_aux; 111 static bool has_msr_tsc_adjust; 112 static bool has_msr_tsc_deadline; 113 static bool has_msr_feature_control; 114 static bool has_msr_misc_enable; 115 static bool has_msr_smbase; 116 static bool has_msr_bndcfgs; 117 static int lm_capable_kernel; 118 static bool has_msr_hv_hypercall; 119 static bool has_msr_hv_crash; 120 static bool has_msr_hv_reset; 121 static bool has_msr_hv_vpindex; 122 static bool hv_vpindex_settable; 123 static bool has_msr_hv_runtime; 124 static bool has_msr_hv_synic; 125 static bool has_msr_hv_stimer; 126 static bool has_msr_hv_frequencies; 127 static bool has_msr_hv_reenlightenment; 128 static bool has_msr_hv_syndbg_options; 129 static bool has_msr_xss; 130 static bool has_msr_umwait; 131 static bool has_msr_spec_ctrl; 132 static bool has_tsc_scale_msr; 133 static bool has_msr_tsx_ctrl; 134 static bool has_msr_virt_ssbd; 135 static bool has_msr_smi_count; 136 static bool has_msr_arch_capabs; 137 static bool has_msr_core_capabs; 138 static bool has_msr_vmx_vmfunc; 139 static bool has_msr_ucode_rev; 140 static bool has_msr_vmx_procbased_ctls2; 141 static bool has_msr_perf_capabs; 142 static bool has_msr_pkrs; 143 144 static uint32_t has_architectural_pmu_version; 145 static uint32_t num_architectural_pmu_gp_counters; 146 static uint32_t num_architectural_pmu_fixed_counters; 147 148 static int has_xsave2; 149 static int has_xcrs; 150 static int has_sregs2; 151 static int has_exception_payload; 152 static int has_triple_fault_event; 153 154 static bool has_msr_mcg_ext_ctl; 155 156 static struct kvm_cpuid2 *cpuid_cache; 157 static struct kvm_cpuid2 *hv_cpuid_cache; 158 static struct kvm_msr_list *kvm_feature_msrs; 159 160 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; 161 162 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ 163 static RateLimit bus_lock_ratelimit_ctrl; 164 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); 165 166 static const char *vm_type_name[] = { 167 [KVM_X86_DEFAULT_VM] = "default", 168 [KVM_X86_SEV_VM] = "SEV", 169 [KVM_X86_SEV_ES_VM] = "SEV-ES", 170 [KVM_X86_SNP_VM] = "SEV-SNP", 171 }; 172 173 bool kvm_is_vm_type_supported(int type) 174 { 175 uint32_t machine_types; 176 177 /* 178 * old KVM doesn't support KVM_CAP_VM_TYPES but KVM_X86_DEFAULT_VM 179 * is always supported 180 */ 181 if (type == KVM_X86_DEFAULT_VM) { 182 return true; 183 } 184 185 machine_types = kvm_check_extension(KVM_STATE(current_machine->accelerator), 186 KVM_CAP_VM_TYPES); 187 return !!(machine_types & BIT(type)); 188 } 189 190 int kvm_get_vm_type(MachineState *ms) 191 { 192 int kvm_type = KVM_X86_DEFAULT_VM; 193 194 if (ms->cgs) { 195 if (!object_dynamic_cast(OBJECT(ms->cgs), TYPE_X86_CONFIDENTIAL_GUEST)) { 196 error_report("configuration type %s not supported for x86 guests", 197 object_get_typename(OBJECT(ms->cgs))); 198 exit(1); 199 } 200 kvm_type = x86_confidential_guest_kvm_type( 201 X86_CONFIDENTIAL_GUEST(ms->cgs)); 202 } 203 204 if (!kvm_is_vm_type_supported(kvm_type)) { 205 error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]); 206 exit(1); 207 } 208 209 return kvm_type; 210 } 211 212 bool kvm_has_smm(void) 213 { 214 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); 215 } 216 217 bool kvm_has_adjust_clock_stable(void) 218 { 219 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); 220 221 return (ret & KVM_CLOCK_TSC_STABLE); 222 } 223 224 bool kvm_has_exception_payload(void) 225 { 226 return has_exception_payload; 227 } 228 229 static bool kvm_x2apic_api_set_flags(uint64_t flags) 230 { 231 KVMState *s = KVM_STATE(current_accel()); 232 233 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); 234 } 235 236 #define MEMORIZE(fn, _result) \ 237 ({ \ 238 static bool _memorized; \ 239 \ 240 if (_memorized) { \ 241 return _result; \ 242 } \ 243 _memorized = true; \ 244 _result = fn; \ 245 }) 246 247 static bool has_x2apic_api; 248 249 bool kvm_has_x2apic_api(void) 250 { 251 return has_x2apic_api; 252 } 253 254 bool kvm_enable_x2apic(void) 255 { 256 return MEMORIZE( 257 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | 258 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK), 259 has_x2apic_api); 260 } 261 262 bool kvm_hv_vpindex_settable(void) 263 { 264 return hv_vpindex_settable; 265 } 266 267 static int kvm_get_tsc(CPUState *cs) 268 { 269 X86CPU *cpu = X86_CPU(cs); 270 CPUX86State *env = &cpu->env; 271 uint64_t value; 272 int ret; 273 274 if (env->tsc_valid) { 275 return 0; 276 } 277 278 env->tsc_valid = !runstate_is_running(); 279 280 ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value); 281 if (ret < 0) { 282 return ret; 283 } 284 285 env->tsc = value; 286 return 0; 287 } 288 289 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg) 290 { 291 kvm_get_tsc(cpu); 292 } 293 294 void kvm_synchronize_all_tsc(void) 295 { 296 CPUState *cpu; 297 298 if (kvm_enabled()) { 299 CPU_FOREACH(cpu) { 300 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); 301 } 302 } 303 } 304 305 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) 306 { 307 struct kvm_cpuid2 *cpuid; 308 int r, size; 309 310 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 311 cpuid = g_malloc0(size); 312 cpuid->nent = max; 313 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); 314 if (r == 0 && cpuid->nent >= max) { 315 r = -E2BIG; 316 } 317 if (r < 0) { 318 if (r == -E2BIG) { 319 g_free(cpuid); 320 return NULL; 321 } else { 322 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n", 323 strerror(-r)); 324 exit(1); 325 } 326 } 327 return cpuid; 328 } 329 330 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough 331 * for all entries. 332 */ 333 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) 334 { 335 struct kvm_cpuid2 *cpuid; 336 int max = 1; 337 338 if (cpuid_cache != NULL) { 339 return cpuid_cache; 340 } 341 while ((cpuid = try_get_cpuid(s, max)) == NULL) { 342 max *= 2; 343 } 344 cpuid_cache = cpuid; 345 return cpuid; 346 } 347 348 static bool host_tsx_broken(void) 349 { 350 int family, model, stepping;\ 351 char vendor[CPUID_VENDOR_SZ + 1]; 352 353 host_cpu_vendor_fms(vendor, &family, &model, &stepping); 354 355 /* Check if we are running on a Haswell host known to have broken TSX */ 356 return !strcmp(vendor, CPUID_VENDOR_INTEL) && 357 (family == 6) && 358 ((model == 63 && stepping < 4) || 359 model == 60 || model == 69 || model == 70); 360 } 361 362 /* Returns the value for a specific register on the cpuid entry 363 */ 364 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) 365 { 366 uint32_t ret = 0; 367 switch (reg) { 368 case R_EAX: 369 ret = entry->eax; 370 break; 371 case R_EBX: 372 ret = entry->ebx; 373 break; 374 case R_ECX: 375 ret = entry->ecx; 376 break; 377 case R_EDX: 378 ret = entry->edx; 379 break; 380 } 381 return ret; 382 } 383 384 /* Find matching entry for function/index on kvm_cpuid2 struct 385 */ 386 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, 387 uint32_t function, 388 uint32_t index) 389 { 390 int i; 391 for (i = 0; i < cpuid->nent; ++i) { 392 if (cpuid->entries[i].function == function && 393 cpuid->entries[i].index == index) { 394 return &cpuid->entries[i]; 395 } 396 } 397 /* not found: */ 398 return NULL; 399 } 400 401 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, 402 uint32_t index, int reg) 403 { 404 struct kvm_cpuid2 *cpuid; 405 uint32_t ret = 0; 406 uint32_t cpuid_1_edx, unused; 407 uint64_t bitmask; 408 409 cpuid = get_supported_cpuid(s); 410 411 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); 412 if (entry) { 413 ret = cpuid_entry_get_reg(entry, reg); 414 } 415 416 /* Fixups for the data returned by KVM, below */ 417 418 if (function == 1 && reg == R_EDX) { 419 /* KVM before 2.6.30 misreports the following features */ 420 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; 421 /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */ 422 ret |= CPUID_HT; 423 } else if (function == 1 && reg == R_ECX) { 424 /* We can set the hypervisor flag, even if KVM does not return it on 425 * GET_SUPPORTED_CPUID 426 */ 427 ret |= CPUID_EXT_HYPERVISOR; 428 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it 429 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, 430 * and the irqchip is in the kernel. 431 */ 432 if (kvm_irqchip_in_kernel() && 433 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { 434 ret |= CPUID_EXT_TSC_DEADLINE_TIMER; 435 } 436 437 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled 438 * without the in-kernel irqchip 439 */ 440 if (!kvm_irqchip_in_kernel()) { 441 ret &= ~CPUID_EXT_X2APIC; 442 } 443 444 if (enable_cpu_pm) { 445 int disable_exits = kvm_check_extension(s, 446 KVM_CAP_X86_DISABLE_EXITS); 447 448 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) { 449 ret |= CPUID_EXT_MONITOR; 450 } 451 } 452 } else if (function == 6 && reg == R_EAX) { 453 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ 454 } else if (function == 7 && index == 0 && reg == R_EBX) { 455 /* Not new instructions, just an optimization. */ 456 uint32_t ebx; 457 host_cpuid(7, 0, &unused, &ebx, &unused, &unused); 458 ret |= ebx & CPUID_7_0_EBX_ERMS; 459 460 if (host_tsx_broken()) { 461 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); 462 } 463 } else if (function == 7 && index == 0 && reg == R_EDX) { 464 /* Not new instructions, just an optimization. */ 465 uint32_t edx; 466 host_cpuid(7, 0, &unused, &unused, &unused, &edx); 467 ret |= edx & CPUID_7_0_EDX_FSRM; 468 469 /* 470 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. 471 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is 472 * returned by KVM_GET_MSR_INDEX_LIST. 473 */ 474 if (!has_msr_arch_capabs) { 475 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; 476 } 477 } else if (function == 7 && index == 1 && reg == R_EAX) { 478 /* Not new instructions, just an optimization. */ 479 uint32_t eax; 480 host_cpuid(7, 1, &eax, &unused, &unused, &unused); 481 ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC); 482 } else if (function == 7 && index == 2 && reg == R_EDX) { 483 uint32_t edx; 484 host_cpuid(7, 2, &unused, &unused, &unused, &edx); 485 ret |= edx & CPUID_7_2_EDX_MCDT_NO; 486 } else if (function == 0xd && index == 0 && 487 (reg == R_EAX || reg == R_EDX)) { 488 /* 489 * The value returned by KVM_GET_SUPPORTED_CPUID does not include 490 * features that still have to be enabled with the arch_prctl 491 * system call. QEMU needs the full value, which is retrieved 492 * with KVM_GET_DEVICE_ATTR. 493 */ 494 struct kvm_device_attr attr = { 495 .group = 0, 496 .attr = KVM_X86_XCOMP_GUEST_SUPP, 497 .addr = (unsigned long) &bitmask 498 }; 499 500 bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); 501 if (!sys_attr) { 502 return ret; 503 } 504 505 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); 506 if (rc < 0) { 507 if (rc != -ENXIO) { 508 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " 509 "error: %d", rc); 510 } 511 return ret; 512 } 513 ret = (reg == R_EAX) ? bitmask : bitmask >> 32; 514 } else if (function == 0x80000001 && reg == R_ECX) { 515 /* 516 * It's safe to enable TOPOEXT even if it's not returned by 517 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows 518 * us to keep CPU models including TOPOEXT runnable on older kernels. 519 */ 520 ret |= CPUID_EXT3_TOPOEXT; 521 } else if (function == 0x80000001 && reg == R_EDX) { 522 /* On Intel, kvm returns cpuid according to the Intel spec, 523 * so add missing bits according to the AMD spec: 524 */ 525 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); 526 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; 527 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { 528 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't 529 * be enabled without the in-kernel irqchip 530 */ 531 if (!kvm_irqchip_in_kernel()) { 532 ret &= ~(1U << KVM_FEATURE_PV_UNHALT); 533 } 534 if (kvm_irqchip_is_split()) { 535 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID; 536 } 537 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { 538 ret |= 1U << KVM_HINTS_REALTIME; 539 } 540 541 return ret; 542 } 543 544 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) 545 { 546 struct { 547 struct kvm_msrs info; 548 struct kvm_msr_entry entries[1]; 549 } msr_data = {}; 550 uint64_t value; 551 uint32_t ret, can_be_one, must_be_one; 552 553 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */ 554 return 0; 555 } 556 557 /* Check if requested MSR is supported feature MSR */ 558 int i; 559 for (i = 0; i < kvm_feature_msrs->nmsrs; i++) 560 if (kvm_feature_msrs->indices[i] == index) { 561 break; 562 } 563 if (i == kvm_feature_msrs->nmsrs) { 564 return 0; /* if the feature MSR is not supported, simply return 0 */ 565 } 566 567 msr_data.info.nmsrs = 1; 568 msr_data.entries[0].index = index; 569 570 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data); 571 if (ret != 1) { 572 error_report("KVM get MSR (index=0x%x) feature failed, %s", 573 index, strerror(-ret)); 574 exit(1); 575 } 576 577 value = msr_data.entries[0].data; 578 switch (index) { 579 case MSR_IA32_VMX_PROCBASED_CTLS2: 580 if (!has_msr_vmx_procbased_ctls2) { 581 /* KVM forgot to add these bits for some time, do this ourselves. */ 582 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) & 583 CPUID_XSAVE_XSAVES) { 584 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32; 585 } 586 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) & 587 CPUID_EXT_RDRAND) { 588 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32; 589 } 590 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 591 CPUID_7_0_EBX_INVPCID) { 592 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32; 593 } 594 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) & 595 CPUID_7_0_EBX_RDSEED) { 596 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32; 597 } 598 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) & 599 CPUID_EXT2_RDTSCP) { 600 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32; 601 } 602 } 603 /* fall through */ 604 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 605 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 606 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 607 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 608 /* 609 * Return true for bits that can be one, but do not have to be one. 610 * The SDM tells us which bits could have a "must be one" setting, 611 * so we can do the opposite transformation in make_vmx_msr_value. 612 */ 613 must_be_one = (uint32_t)value; 614 can_be_one = (uint32_t)(value >> 32); 615 return can_be_one & ~must_be_one; 616 617 default: 618 return value; 619 } 620 } 621 622 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, 623 int *max_banks) 624 { 625 *max_banks = kvm_check_extension(s, KVM_CAP_MCE); 626 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); 627 } 628 629 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) 630 { 631 CPUState *cs = CPU(cpu); 632 CPUX86State *env = &cpu->env; 633 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | 634 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; 635 uint64_t mcg_status = MCG_STATUS_MCIP; 636 int flags = 0; 637 638 if (code == BUS_MCEERR_AR) { 639 status |= MCI_STATUS_AR | 0x134; 640 mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV; 641 } else { 642 status |= 0xc0; 643 mcg_status |= MCG_STATUS_RIPV; 644 } 645 646 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; 647 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the 648 * guest kernel back into env->mcg_ext_ctl. 649 */ 650 cpu_synchronize_state(cs); 651 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { 652 mcg_status |= MCG_STATUS_LMCE; 653 flags = 0; 654 } 655 656 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, 657 (MCM_ADDR_PHYS << 6) | 0xc, flags); 658 } 659 660 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar) 661 { 662 MemoryFailureFlags mff = {.action_required = ar, .recursive = false}; 663 664 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action, 665 &mff); 666 } 667 668 static void hardware_memory_error(void *host_addr) 669 { 670 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true); 671 error_report("QEMU got Hardware memory error at addr %p", host_addr); 672 exit(1); 673 } 674 675 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) 676 { 677 X86CPU *cpu = X86_CPU(c); 678 CPUX86State *env = &cpu->env; 679 ram_addr_t ram_addr; 680 hwaddr paddr; 681 682 /* If we get an action required MCE, it has been injected by KVM 683 * while the VM was running. An action optional MCE instead should 684 * be coming from the main thread, which qemu_init_sigbus identifies 685 * as the "early kill" thread. 686 */ 687 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); 688 689 if ((env->mcg_cap & MCG_SER_P) && addr) { 690 ram_addr = qemu_ram_addr_from_host(addr); 691 if (ram_addr != RAM_ADDR_INVALID && 692 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { 693 kvm_hwpoison_page_add(ram_addr); 694 kvm_mce_inject(cpu, paddr, code); 695 696 /* 697 * Use different logging severity based on error type. 698 * If there is additional MCE reporting on the hypervisor, QEMU VA 699 * could be another source to identify the PA and MCE details. 700 */ 701 if (code == BUS_MCEERR_AR) { 702 error_report("Guest MCE Memory Error at QEMU addr %p and " 703 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 704 addr, paddr, "BUS_MCEERR_AR"); 705 } else { 706 warn_report("Guest MCE Memory Error at QEMU addr %p and " 707 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", 708 addr, paddr, "BUS_MCEERR_AO"); 709 } 710 711 return; 712 } 713 714 if (code == BUS_MCEERR_AO) { 715 warn_report("Hardware memory error at addr %p of type %s " 716 "for memory used by QEMU itself instead of guest system!", 717 addr, "BUS_MCEERR_AO"); 718 } 719 } 720 721 if (code == BUS_MCEERR_AR) { 722 hardware_memory_error(addr); 723 } 724 725 /* Hope we are lucky for AO MCE, just notify a event */ 726 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false); 727 } 728 729 static void kvm_queue_exception(CPUX86State *env, 730 int32_t exception_nr, 731 uint8_t exception_has_payload, 732 uint64_t exception_payload) 733 { 734 assert(env->exception_nr == -1); 735 assert(!env->exception_pending); 736 assert(!env->exception_injected); 737 assert(!env->exception_has_payload); 738 739 env->exception_nr = exception_nr; 740 741 if (has_exception_payload) { 742 env->exception_pending = 1; 743 744 env->exception_has_payload = exception_has_payload; 745 env->exception_payload = exception_payload; 746 } else { 747 env->exception_injected = 1; 748 749 if (exception_nr == EXCP01_DB) { 750 assert(exception_has_payload); 751 env->dr[6] = exception_payload; 752 } else if (exception_nr == EXCP0E_PAGE) { 753 assert(exception_has_payload); 754 env->cr[2] = exception_payload; 755 } else { 756 assert(!exception_has_payload); 757 } 758 } 759 } 760 761 static void cpu_update_state(void *opaque, bool running, RunState state) 762 { 763 CPUX86State *env = opaque; 764 765 if (running) { 766 env->tsc_valid = false; 767 } 768 } 769 770 unsigned long kvm_arch_vcpu_id(CPUState *cs) 771 { 772 X86CPU *cpu = X86_CPU(cs); 773 return cpu->apic_id; 774 } 775 776 #ifndef KVM_CPUID_SIGNATURE_NEXT 777 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 778 #endif 779 780 static bool hyperv_enabled(X86CPU *cpu) 781 { 782 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 && 783 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) || 784 cpu->hyperv_features || cpu->hyperv_passthrough); 785 } 786 787 /* 788 * Check whether target_freq is within conservative 789 * ntp correctable bounds (250ppm) of freq 790 */ 791 static inline bool freq_within_bounds(int freq, int target_freq) 792 { 793 int max_freq = freq + (freq * 250 / 1000000); 794 int min_freq = freq - (freq * 250 / 1000000); 795 796 if (target_freq >= min_freq && target_freq <= max_freq) { 797 return true; 798 } 799 800 return false; 801 } 802 803 static int kvm_arch_set_tsc_khz(CPUState *cs) 804 { 805 X86CPU *cpu = X86_CPU(cs); 806 CPUX86State *env = &cpu->env; 807 int r, cur_freq; 808 bool set_ioctl = false; 809 810 if (!env->tsc_khz) { 811 return 0; 812 } 813 814 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 815 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; 816 817 /* 818 * If TSC scaling is supported, attempt to set TSC frequency. 819 */ 820 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) { 821 set_ioctl = true; 822 } 823 824 /* 825 * If desired TSC frequency is within bounds of NTP correction, 826 * attempt to set TSC frequency. 827 */ 828 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) { 829 set_ioctl = true; 830 } 831 832 r = set_ioctl ? 833 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : 834 -ENOTSUP; 835 836 if (r < 0) { 837 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current 838 * TSC frequency doesn't match the one we want. 839 */ 840 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 841 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 842 -ENOTSUP; 843 if (cur_freq <= 0 || cur_freq != env->tsc_khz) { 844 warn_report("TSC frequency mismatch between " 845 "VM (%" PRId64 " kHz) and host (%d kHz), " 846 "and TSC scaling unavailable", 847 env->tsc_khz, cur_freq); 848 return r; 849 } 850 } 851 852 return 0; 853 } 854 855 static bool tsc_is_stable_and_known(CPUX86State *env) 856 { 857 if (!env->tsc_khz) { 858 return false; 859 } 860 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) 861 || env->user_tsc_khz; 862 } 863 864 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1) 865 866 static struct { 867 const char *desc; 868 struct { 869 uint32_t func; 870 int reg; 871 uint32_t bits; 872 } flags[2]; 873 uint64_t dependencies; 874 } kvm_hyperv_properties[] = { 875 [HYPERV_FEAT_RELAXED] = { 876 .desc = "relaxed timing (hv-relaxed)", 877 .flags = { 878 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 879 .bits = HV_RELAXED_TIMING_RECOMMENDED} 880 } 881 }, 882 [HYPERV_FEAT_VAPIC] = { 883 .desc = "virtual APIC (hv-vapic)", 884 .flags = { 885 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 886 .bits = HV_APIC_ACCESS_AVAILABLE} 887 } 888 }, 889 [HYPERV_FEAT_TIME] = { 890 .desc = "clocksources (hv-time)", 891 .flags = { 892 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 893 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE} 894 } 895 }, 896 [HYPERV_FEAT_CRASH] = { 897 .desc = "crash MSRs (hv-crash)", 898 .flags = { 899 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 900 .bits = HV_GUEST_CRASH_MSR_AVAILABLE} 901 } 902 }, 903 [HYPERV_FEAT_RESET] = { 904 .desc = "reset MSR (hv-reset)", 905 .flags = { 906 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 907 .bits = HV_RESET_AVAILABLE} 908 } 909 }, 910 [HYPERV_FEAT_VPINDEX] = { 911 .desc = "VP_INDEX MSR (hv-vpindex)", 912 .flags = { 913 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 914 .bits = HV_VP_INDEX_AVAILABLE} 915 } 916 }, 917 [HYPERV_FEAT_RUNTIME] = { 918 .desc = "VP_RUNTIME MSR (hv-runtime)", 919 .flags = { 920 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 921 .bits = HV_VP_RUNTIME_AVAILABLE} 922 } 923 }, 924 [HYPERV_FEAT_SYNIC] = { 925 .desc = "synthetic interrupt controller (hv-synic)", 926 .flags = { 927 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 928 .bits = HV_SYNIC_AVAILABLE} 929 } 930 }, 931 [HYPERV_FEAT_STIMER] = { 932 .desc = "synthetic timers (hv-stimer)", 933 .flags = { 934 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 935 .bits = HV_SYNTIMERS_AVAILABLE} 936 }, 937 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) 938 }, 939 [HYPERV_FEAT_FREQUENCIES] = { 940 .desc = "frequency MSRs (hv-frequencies)", 941 .flags = { 942 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 943 .bits = HV_ACCESS_FREQUENCY_MSRS}, 944 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 945 .bits = HV_FREQUENCY_MSRS_AVAILABLE} 946 } 947 }, 948 [HYPERV_FEAT_REENLIGHTENMENT] = { 949 .desc = "reenlightenment MSRs (hv-reenlightenment)", 950 .flags = { 951 {.func = HV_CPUID_FEATURES, .reg = R_EAX, 952 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} 953 } 954 }, 955 [HYPERV_FEAT_TLBFLUSH] = { 956 .desc = "paravirtualized TLB flush (hv-tlbflush)", 957 .flags = { 958 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 959 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | 960 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 961 }, 962 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 963 }, 964 [HYPERV_FEAT_EVMCS] = { 965 .desc = "enlightened VMCS (hv-evmcs)", 966 .flags = { 967 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 968 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} 969 }, 970 .dependencies = BIT(HYPERV_FEAT_VAPIC) 971 }, 972 [HYPERV_FEAT_IPI] = { 973 .desc = "paravirtualized IPI (hv-ipi)", 974 .flags = { 975 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 976 .bits = HV_CLUSTER_IPI_RECOMMENDED | 977 HV_EX_PROCESSOR_MASKS_RECOMMENDED} 978 }, 979 .dependencies = BIT(HYPERV_FEAT_VPINDEX) 980 }, 981 [HYPERV_FEAT_STIMER_DIRECT] = { 982 .desc = "direct mode synthetic timers (hv-stimer-direct)", 983 .flags = { 984 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 985 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} 986 }, 987 .dependencies = BIT(HYPERV_FEAT_STIMER) 988 }, 989 [HYPERV_FEAT_AVIC] = { 990 .desc = "AVIC/APICv support (hv-avic/hv-apicv)", 991 .flags = { 992 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, 993 .bits = HV_DEPRECATING_AEOI_RECOMMENDED} 994 } 995 }, 996 #ifdef CONFIG_SYNDBG 997 [HYPERV_FEAT_SYNDBG] = { 998 .desc = "Enable synthetic kernel debugger channel (hv-syndbg)", 999 .flags = { 1000 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1001 .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE} 1002 }, 1003 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED) 1004 }, 1005 #endif 1006 [HYPERV_FEAT_MSR_BITMAP] = { 1007 .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)", 1008 .flags = { 1009 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, 1010 .bits = HV_NESTED_MSR_BITMAP} 1011 } 1012 }, 1013 [HYPERV_FEAT_XMM_INPUT] = { 1014 .desc = "XMM fast hypercall input (hv-xmm-input)", 1015 .flags = { 1016 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1017 .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE} 1018 } 1019 }, 1020 [HYPERV_FEAT_TLBFLUSH_EXT] = { 1021 .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)", 1022 .flags = { 1023 {.func = HV_CPUID_FEATURES, .reg = R_EDX, 1024 .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE} 1025 }, 1026 .dependencies = BIT(HYPERV_FEAT_TLBFLUSH) 1027 }, 1028 [HYPERV_FEAT_TLBFLUSH_DIRECT] = { 1029 .desc = "direct TLB flush (hv-tlbflush-direct)", 1030 .flags = { 1031 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX, 1032 .bits = HV_NESTED_DIRECT_FLUSH} 1033 }, 1034 .dependencies = BIT(HYPERV_FEAT_VAPIC) 1035 }, 1036 }; 1037 1038 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, 1039 bool do_sys_ioctl) 1040 { 1041 struct kvm_cpuid2 *cpuid; 1042 int r, size; 1043 1044 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); 1045 cpuid = g_malloc0(size); 1046 cpuid->nent = max; 1047 1048 if (do_sys_ioctl) { 1049 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1050 } else { 1051 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); 1052 } 1053 if (r == 0 && cpuid->nent >= max) { 1054 r = -E2BIG; 1055 } 1056 if (r < 0) { 1057 if (r == -E2BIG) { 1058 g_free(cpuid); 1059 return NULL; 1060 } else { 1061 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n", 1062 strerror(-r)); 1063 exit(1); 1064 } 1065 } 1066 return cpuid; 1067 } 1068 1069 /* 1070 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough 1071 * for all entries. 1072 */ 1073 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) 1074 { 1075 struct kvm_cpuid2 *cpuid; 1076 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */ 1077 int max = 11; 1078 int i; 1079 bool do_sys_ioctl; 1080 1081 do_sys_ioctl = 1082 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0; 1083 1084 /* 1085 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is 1086 * unsupported, kvm_hyperv_expand_features() checks for that. 1087 */ 1088 assert(do_sys_ioctl || cs->kvm_state); 1089 1090 /* 1091 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with 1092 * -E2BIG, however, it doesn't report back the right size. Keep increasing 1093 * it and re-trying until we succeed. 1094 */ 1095 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) { 1096 max++; 1097 } 1098 1099 /* 1100 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before 1101 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the 1102 * information early, just check for the capability and set the bit 1103 * manually. 1104 */ 1105 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state, 1106 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1107 for (i = 0; i < cpuid->nent; i++) { 1108 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) { 1109 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1110 } 1111 } 1112 } 1113 1114 return cpuid; 1115 } 1116 1117 /* 1118 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature 1119 * leaves from KVM_CAP_HYPERV* and present MSRs data. 1120 */ 1121 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) 1122 { 1123 X86CPU *cpu = X86_CPU(cs); 1124 struct kvm_cpuid2 *cpuid; 1125 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm; 1126 1127 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */ 1128 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); 1129 cpuid->nent = 2; 1130 1131 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */ 1132 entry_feat = &cpuid->entries[0]; 1133 entry_feat->function = HV_CPUID_FEATURES; 1134 1135 entry_recomm = &cpuid->entries[1]; 1136 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; 1137 entry_recomm->ebx = cpu->hyperv_spinlock_attempts; 1138 1139 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { 1140 entry_feat->eax |= HV_HYPERCALL_AVAILABLE; 1141 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; 1142 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1143 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; 1144 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; 1145 } 1146 1147 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { 1148 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; 1149 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; 1150 } 1151 1152 if (has_msr_hv_frequencies) { 1153 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; 1154 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; 1155 } 1156 1157 if (has_msr_hv_crash) { 1158 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; 1159 } 1160 1161 if (has_msr_hv_reenlightenment) { 1162 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; 1163 } 1164 1165 if (has_msr_hv_reset) { 1166 entry_feat->eax |= HV_RESET_AVAILABLE; 1167 } 1168 1169 if (has_msr_hv_vpindex) { 1170 entry_feat->eax |= HV_VP_INDEX_AVAILABLE; 1171 } 1172 1173 if (has_msr_hv_runtime) { 1174 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; 1175 } 1176 1177 if (has_msr_hv_synic) { 1178 unsigned int cap = cpu->hyperv_synic_kvm_only ? 1179 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1180 1181 if (kvm_check_extension(cs->kvm_state, cap) > 0) { 1182 entry_feat->eax |= HV_SYNIC_AVAILABLE; 1183 } 1184 } 1185 1186 if (has_msr_hv_stimer) { 1187 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; 1188 } 1189 1190 if (has_msr_hv_syndbg_options) { 1191 entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE; 1192 entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; 1193 entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED; 1194 } 1195 1196 if (kvm_check_extension(cs->kvm_state, 1197 KVM_CAP_HYPERV_TLBFLUSH) > 0) { 1198 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; 1199 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1200 } 1201 1202 if (kvm_check_extension(cs->kvm_state, 1203 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { 1204 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; 1205 } 1206 1207 if (kvm_check_extension(cs->kvm_state, 1208 KVM_CAP_HYPERV_SEND_IPI) > 0) { 1209 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; 1210 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; 1211 } 1212 1213 return cpuid; 1214 } 1215 1216 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg) 1217 { 1218 struct kvm_cpuid_entry2 *entry; 1219 struct kvm_cpuid2 *cpuid; 1220 1221 if (hv_cpuid_cache) { 1222 cpuid = hv_cpuid_cache; 1223 } else { 1224 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { 1225 cpuid = get_supported_hv_cpuid(cs); 1226 } else { 1227 /* 1228 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded 1229 * before KVM context is created but this is only done when 1230 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies 1231 * KVM_CAP_HYPERV_CPUID. 1232 */ 1233 assert(cs->kvm_state); 1234 1235 cpuid = get_supported_hv_cpuid_legacy(cs); 1236 } 1237 hv_cpuid_cache = cpuid; 1238 } 1239 1240 if (!cpuid) { 1241 return 0; 1242 } 1243 1244 entry = cpuid_find_entry(cpuid, func, 0); 1245 if (!entry) { 1246 return 0; 1247 } 1248 1249 return cpuid_entry_get_reg(entry, reg); 1250 } 1251 1252 static bool hyperv_feature_supported(CPUState *cs, int feature) 1253 { 1254 uint32_t func, bits; 1255 int i, reg; 1256 1257 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { 1258 1259 func = kvm_hyperv_properties[feature].flags[i].func; 1260 reg = kvm_hyperv_properties[feature].flags[i].reg; 1261 bits = kvm_hyperv_properties[feature].flags[i].bits; 1262 1263 if (!func) { 1264 continue; 1265 } 1266 1267 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) { 1268 return false; 1269 } 1270 } 1271 1272 return true; 1273 } 1274 1275 /* Checks that all feature dependencies are enabled */ 1276 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp) 1277 { 1278 uint64_t deps; 1279 int dep_feat; 1280 1281 deps = kvm_hyperv_properties[feature].dependencies; 1282 while (deps) { 1283 dep_feat = ctz64(deps); 1284 if (!(hyperv_feat_enabled(cpu, dep_feat))) { 1285 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1286 kvm_hyperv_properties[feature].desc, 1287 kvm_hyperv_properties[dep_feat].desc); 1288 return false; 1289 } 1290 deps &= ~(1ull << dep_feat); 1291 } 1292 1293 return true; 1294 } 1295 1296 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg) 1297 { 1298 X86CPU *cpu = X86_CPU(cs); 1299 uint32_t r = 0; 1300 int i, j; 1301 1302 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) { 1303 if (!hyperv_feat_enabled(cpu, i)) { 1304 continue; 1305 } 1306 1307 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) { 1308 if (kvm_hyperv_properties[i].flags[j].func != func) { 1309 continue; 1310 } 1311 if (kvm_hyperv_properties[i].flags[j].reg != reg) { 1312 continue; 1313 } 1314 1315 r |= kvm_hyperv_properties[i].flags[j].bits; 1316 } 1317 } 1318 1319 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */ 1320 if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) { 1321 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1322 r |= DEFAULT_EVMCS_VERSION; 1323 } 1324 } 1325 1326 return r; 1327 } 1328 1329 /* 1330 * Expand Hyper-V CPU features. In partucular, check that all the requested 1331 * features are supported by the host and the sanity of the configuration 1332 * (that all the required dependencies are included). Also, this takes care 1333 * of 'hv_passthrough' mode and fills the environment with all supported 1334 * Hyper-V features. 1335 */ 1336 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) 1337 { 1338 CPUState *cs = CPU(cpu); 1339 Error *local_err = NULL; 1340 int feat; 1341 1342 if (!hyperv_enabled(cpu)) 1343 return true; 1344 1345 /* 1346 * When kvm_hyperv_expand_features is called at CPU feature expansion 1347 * time per-CPU kvm_state is not available yet so we can only proceed 1348 * when KVM_CAP_SYS_HYPERV_CPUID is supported. 1349 */ 1350 if (!cs->kvm_state && 1351 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID)) 1352 return true; 1353 1354 if (cpu->hyperv_passthrough) { 1355 cpu->hyperv_vendor_id[0] = 1356 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); 1357 cpu->hyperv_vendor_id[1] = 1358 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); 1359 cpu->hyperv_vendor_id[2] = 1360 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); 1361 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, 1362 sizeof(cpu->hyperv_vendor_id) + 1); 1363 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, 1364 sizeof(cpu->hyperv_vendor_id)); 1365 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; 1366 1367 cpu->hyperv_interface_id[0] = 1368 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX); 1369 cpu->hyperv_interface_id[1] = 1370 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX); 1371 cpu->hyperv_interface_id[2] = 1372 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX); 1373 cpu->hyperv_interface_id[3] = 1374 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); 1375 1376 cpu->hyperv_ver_id_build = 1377 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); 1378 cpu->hyperv_ver_id_major = 1379 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16; 1380 cpu->hyperv_ver_id_minor = 1381 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff; 1382 cpu->hyperv_ver_id_sp = 1383 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); 1384 cpu->hyperv_ver_id_sb = 1385 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24; 1386 cpu->hyperv_ver_id_sn = 1387 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff; 1388 1389 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, 1390 R_EAX); 1391 cpu->hyperv_limits[0] = 1392 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); 1393 cpu->hyperv_limits[1] = 1394 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); 1395 cpu->hyperv_limits[2] = 1396 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); 1397 1398 cpu->hyperv_spinlock_attempts = 1399 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); 1400 1401 /* 1402 * Mark feature as enabled in 'cpu->hyperv_features' as 1403 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs. 1404 */ 1405 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1406 if (hyperv_feature_supported(cs, feat)) { 1407 cpu->hyperv_features |= BIT(feat); 1408 } 1409 } 1410 } else { 1411 /* Check features availability and dependencies */ 1412 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) { 1413 /* If the feature was not requested skip it. */ 1414 if (!hyperv_feat_enabled(cpu, feat)) { 1415 continue; 1416 } 1417 1418 /* Check if the feature is supported by KVM */ 1419 if (!hyperv_feature_supported(cs, feat)) { 1420 error_setg(errp, "Hyper-V %s is not supported by kernel", 1421 kvm_hyperv_properties[feat].desc); 1422 return false; 1423 } 1424 1425 /* Check dependencies */ 1426 if (!hv_feature_check_deps(cpu, feat, &local_err)) { 1427 error_propagate(errp, local_err); 1428 return false; 1429 } 1430 } 1431 } 1432 1433 /* Additional dependencies not covered by kvm_hyperv_properties[] */ 1434 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1435 !cpu->hyperv_synic_kvm_only && 1436 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { 1437 error_setg(errp, "Hyper-V %s requires Hyper-V %s", 1438 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, 1439 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); 1440 return false; 1441 } 1442 1443 return true; 1444 } 1445 1446 /* 1447 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent. 1448 */ 1449 static int hyperv_fill_cpuids(CPUState *cs, 1450 struct kvm_cpuid_entry2 *cpuid_ent) 1451 { 1452 X86CPU *cpu = X86_CPU(cs); 1453 struct kvm_cpuid_entry2 *c; 1454 uint32_t signature[3]; 1455 uint32_t cpuid_i = 0, max_cpuid_leaf = 0; 1456 uint32_t nested_eax = 1457 hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX); 1458 1459 max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES : 1460 HV_CPUID_IMPLEMENT_LIMITS; 1461 1462 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { 1463 max_cpuid_leaf = 1464 MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); 1465 } 1466 1467 c = &cpuid_ent[cpuid_i++]; 1468 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; 1469 c->eax = max_cpuid_leaf; 1470 c->ebx = cpu->hyperv_vendor_id[0]; 1471 c->ecx = cpu->hyperv_vendor_id[1]; 1472 c->edx = cpu->hyperv_vendor_id[2]; 1473 1474 c = &cpuid_ent[cpuid_i++]; 1475 c->function = HV_CPUID_INTERFACE; 1476 c->eax = cpu->hyperv_interface_id[0]; 1477 c->ebx = cpu->hyperv_interface_id[1]; 1478 c->ecx = cpu->hyperv_interface_id[2]; 1479 c->edx = cpu->hyperv_interface_id[3]; 1480 1481 c = &cpuid_ent[cpuid_i++]; 1482 c->function = HV_CPUID_VERSION; 1483 c->eax = cpu->hyperv_ver_id_build; 1484 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | 1485 cpu->hyperv_ver_id_minor; 1486 c->ecx = cpu->hyperv_ver_id_sp; 1487 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | 1488 (cpu->hyperv_ver_id_sn & 0xffffff); 1489 1490 c = &cpuid_ent[cpuid_i++]; 1491 c->function = HV_CPUID_FEATURES; 1492 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX); 1493 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX); 1494 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX); 1495 1496 /* Unconditionally required with any Hyper-V enlightenment */ 1497 c->eax |= HV_HYPERCALL_AVAILABLE; 1498 1499 /* SynIC and Vmbus devices require messages/signals hypercalls */ 1500 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && 1501 !cpu->hyperv_synic_kvm_only) { 1502 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; 1503 } 1504 1505 1506 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ 1507 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; 1508 1509 c = &cpuid_ent[cpuid_i++]; 1510 c->function = HV_CPUID_ENLIGHTMENT_INFO; 1511 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); 1512 c->ebx = cpu->hyperv_spinlock_attempts; 1513 1514 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && 1515 !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) { 1516 c->eax |= HV_APIC_ACCESS_RECOMMENDED; 1517 } 1518 1519 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { 1520 c->eax |= HV_NO_NONARCH_CORESHARING; 1521 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { 1522 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & 1523 HV_NO_NONARCH_CORESHARING; 1524 } 1525 1526 c = &cpuid_ent[cpuid_i++]; 1527 c->function = HV_CPUID_IMPLEMENT_LIMITS; 1528 c->eax = cpu->hv_max_vps; 1529 c->ebx = cpu->hyperv_limits[0]; 1530 c->ecx = cpu->hyperv_limits[1]; 1531 c->edx = cpu->hyperv_limits[2]; 1532 1533 if (nested_eax) { 1534 uint32_t function; 1535 1536 /* Create zeroed 0x40000006..0x40000009 leaves */ 1537 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; 1538 function < HV_CPUID_NESTED_FEATURES; function++) { 1539 c = &cpuid_ent[cpuid_i++]; 1540 c->function = function; 1541 } 1542 1543 c = &cpuid_ent[cpuid_i++]; 1544 c->function = HV_CPUID_NESTED_FEATURES; 1545 c->eax = nested_eax; 1546 } 1547 1548 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { 1549 c = &cpuid_ent[cpuid_i++]; 1550 c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS; 1551 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? 1552 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; 1553 memcpy(signature, "Microsoft VS", 12); 1554 c->eax = 0; 1555 c->ebx = signature[0]; 1556 c->ecx = signature[1]; 1557 c->edx = signature[2]; 1558 1559 c = &cpuid_ent[cpuid_i++]; 1560 c->function = HV_CPUID_SYNDBG_INTERFACE; 1561 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); 1562 c->eax = signature[0]; 1563 c->ebx = 0; 1564 c->ecx = 0; 1565 c->edx = 0; 1566 1567 c = &cpuid_ent[cpuid_i++]; 1568 c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; 1569 c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; 1570 c->ebx = 0; 1571 c->ecx = 0; 1572 c->edx = 0; 1573 } 1574 1575 return cpuid_i; 1576 } 1577 1578 static Error *hv_passthrough_mig_blocker; 1579 static Error *hv_no_nonarch_cs_mig_blocker; 1580 1581 /* Checks that the exposed eVMCS version range is supported by KVM */ 1582 static bool evmcs_version_supported(uint16_t evmcs_version, 1583 uint16_t supported_evmcs_version) 1584 { 1585 uint8_t min_version = evmcs_version & 0xff; 1586 uint8_t max_version = evmcs_version >> 8; 1587 uint8_t min_supported_version = supported_evmcs_version & 0xff; 1588 uint8_t max_supported_version = supported_evmcs_version >> 8; 1589 1590 return (min_version >= min_supported_version) && 1591 (max_version <= max_supported_version); 1592 } 1593 1594 static int hyperv_init_vcpu(X86CPU *cpu) 1595 { 1596 CPUState *cs = CPU(cpu); 1597 Error *local_err = NULL; 1598 int ret; 1599 1600 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { 1601 error_setg(&hv_passthrough_mig_blocker, 1602 "'hv-passthrough' CPU flag prevents migration, use explicit" 1603 " set of hv-* flags instead"); 1604 ret = migrate_add_blocker(&hv_passthrough_mig_blocker, &local_err); 1605 if (ret < 0) { 1606 error_report_err(local_err); 1607 return ret; 1608 } 1609 } 1610 1611 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && 1612 hv_no_nonarch_cs_mig_blocker == NULL) { 1613 error_setg(&hv_no_nonarch_cs_mig_blocker, 1614 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" 1615 " use explicit 'hv-no-nonarch-coresharing=on' instead (but" 1616 " make sure SMT is disabled and/or that vCPUs are properly" 1617 " pinned)"); 1618 ret = migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker, &local_err); 1619 if (ret < 0) { 1620 error_report_err(local_err); 1621 return ret; 1622 } 1623 } 1624 1625 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { 1626 /* 1627 * the kernel doesn't support setting vp_index; assert that its value 1628 * is in sync 1629 */ 1630 uint64_t value; 1631 1632 ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value); 1633 if (ret < 0) { 1634 return ret; 1635 } 1636 1637 if (value != hyperv_vp_index(CPU(cpu))) { 1638 error_report("kernel's vp_index != QEMU's vp_index"); 1639 return -ENXIO; 1640 } 1641 } 1642 1643 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 1644 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? 1645 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; 1646 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); 1647 if (ret < 0) { 1648 error_report("failed to turn on HyperV SynIC in KVM: %s", 1649 strerror(-ret)); 1650 return ret; 1651 } 1652 1653 if (!cpu->hyperv_synic_kvm_only) { 1654 ret = hyperv_x86_synic_add(cpu); 1655 if (ret < 0) { 1656 error_report("failed to create HyperV SynIC: %s", 1657 strerror(-ret)); 1658 return ret; 1659 } 1660 } 1661 } 1662 1663 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { 1664 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION; 1665 uint16_t supported_evmcs_version; 1666 1667 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, 1668 (uintptr_t)&supported_evmcs_version); 1669 1670 /* 1671 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs' 1672 * option sets. Note: we hardcode the maximum supported eVMCS version 1673 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if) 1674 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have 1675 * to be added. 1676 */ 1677 if (ret < 0) { 1678 error_report("Hyper-V %s is not supported by kernel", 1679 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); 1680 return ret; 1681 } 1682 1683 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) { 1684 error_report("eVMCS version range [%d..%d] is not supported by " 1685 "kernel (supported: [%d..%d])", evmcs_version & 0xff, 1686 evmcs_version >> 8, supported_evmcs_version & 0xff, 1687 supported_evmcs_version >> 8); 1688 return -ENOTSUP; 1689 } 1690 } 1691 1692 if (cpu->hyperv_enforce_cpuid) { 1693 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1); 1694 if (ret < 0) { 1695 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s", 1696 strerror(-ret)); 1697 return ret; 1698 } 1699 } 1700 1701 /* Skip SynIC and VP_INDEX since they are hard deps already */ 1702 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_STIMER) && 1703 hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && 1704 hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) { 1705 hyperv_x86_set_vmbus_recommended_features_enabled(); 1706 } 1707 1708 return 0; 1709 } 1710 1711 static Error *invtsc_mig_blocker; 1712 1713 #define KVM_MAX_CPUID_ENTRIES 100 1714 1715 static void kvm_init_xsave(CPUX86State *env) 1716 { 1717 if (has_xsave2) { 1718 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); 1719 } else { 1720 env->xsave_buf_len = sizeof(struct kvm_xsave); 1721 } 1722 1723 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); 1724 memset(env->xsave_buf, 0, env->xsave_buf_len); 1725 /* 1726 * The allocated storage must be large enough for all of the 1727 * possible XSAVE state components. 1728 */ 1729 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= 1730 env->xsave_buf_len); 1731 } 1732 1733 static void kvm_init_nested_state(CPUX86State *env) 1734 { 1735 struct kvm_vmx_nested_state_hdr *vmx_hdr; 1736 uint32_t size; 1737 1738 if (!env->nested_state) { 1739 return; 1740 } 1741 1742 size = env->nested_state->size; 1743 1744 memset(env->nested_state, 0, size); 1745 env->nested_state->size = size; 1746 1747 if (cpu_has_vmx(env)) { 1748 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; 1749 vmx_hdr = &env->nested_state->hdr.vmx; 1750 vmx_hdr->vmxon_pa = -1ull; 1751 vmx_hdr->vmcs12_pa = -1ull; 1752 } else if (cpu_has_svm(env)) { 1753 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; 1754 } 1755 } 1756 1757 static uint32_t kvm_x86_build_cpuid(CPUX86State *env, 1758 struct kvm_cpuid_entry2 *entries, 1759 uint32_t cpuid_i) 1760 { 1761 uint32_t limit, i, j; 1762 uint32_t unused; 1763 struct kvm_cpuid_entry2 *c; 1764 1765 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); 1766 1767 for (i = 0; i <= limit; i++) { 1768 j = 0; 1769 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1770 goto full; 1771 } 1772 c = &entries[cpuid_i++]; 1773 switch (i) { 1774 case 2: { 1775 /* Keep reading function 2 till all the input is received */ 1776 int times; 1777 1778 c->function = i; 1779 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | 1780 KVM_CPUID_FLAG_STATE_READ_NEXT; 1781 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1782 times = c->eax & 0xff; 1783 1784 for (j = 1; j < times; ++j) { 1785 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1786 goto full; 1787 } 1788 c = &entries[cpuid_i++]; 1789 c->function = i; 1790 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; 1791 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1792 } 1793 break; 1794 } 1795 case 0x1f: 1796 if (!x86_has_extended_topo(env->avail_cpu_topo)) { 1797 cpuid_i--; 1798 break; 1799 } 1800 /* fallthrough */ 1801 case 4: 1802 case 0xb: 1803 case 0xd: 1804 for (j = 0; ; j++) { 1805 if (i == 0xd && j == 64) { 1806 break; 1807 } 1808 1809 c->function = i; 1810 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1811 c->index = j; 1812 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1813 1814 if (i == 4 && c->eax == 0) { 1815 break; 1816 } 1817 if (i == 0xb && !(c->ecx & 0xff00)) { 1818 break; 1819 } 1820 if (i == 0x1f && !(c->ecx & 0xff00)) { 1821 break; 1822 } 1823 if (i == 0xd && c->eax == 0) { 1824 continue; 1825 } 1826 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1827 goto full; 1828 } 1829 c = &entries[cpuid_i++]; 1830 } 1831 break; 1832 case 0x12: 1833 for (j = 0; ; j++) { 1834 c->function = i; 1835 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1836 c->index = j; 1837 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1838 1839 if (j > 1 && (c->eax & 0xf) != 1) { 1840 break; 1841 } 1842 1843 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1844 goto full; 1845 } 1846 c = &entries[cpuid_i++]; 1847 } 1848 break; 1849 case 0x7: 1850 case 0x14: 1851 case 0x1d: 1852 case 0x1e: { 1853 uint32_t times; 1854 1855 c->function = i; 1856 c->index = 0; 1857 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1858 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1859 times = c->eax; 1860 1861 for (j = 1; j <= times; ++j) { 1862 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1863 goto full; 1864 } 1865 c = &entries[cpuid_i++]; 1866 c->function = i; 1867 c->index = j; 1868 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1869 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1870 } 1871 break; 1872 } 1873 default: 1874 c->function = i; 1875 c->flags = 0; 1876 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1877 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1878 /* 1879 * KVM already returns all zeroes if a CPUID entry is missing, 1880 * so we can omit it and avoid hitting KVM's 80-entry limit. 1881 */ 1882 cpuid_i--; 1883 } 1884 break; 1885 } 1886 } 1887 1888 if (limit >= 0x0a) { 1889 uint32_t eax, edx; 1890 1891 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); 1892 1893 has_architectural_pmu_version = eax & 0xff; 1894 if (has_architectural_pmu_version > 0) { 1895 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; 1896 1897 /* Shouldn't be more than 32, since that's the number of bits 1898 * available in EBX to tell us _which_ counters are available. 1899 * Play it safe. 1900 */ 1901 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { 1902 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; 1903 } 1904 1905 if (has_architectural_pmu_version > 1) { 1906 num_architectural_pmu_fixed_counters = edx & 0x1f; 1907 1908 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { 1909 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; 1910 } 1911 } 1912 } 1913 } 1914 1915 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); 1916 1917 for (i = 0x80000000; i <= limit; i++) { 1918 j = 0; 1919 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1920 goto full; 1921 } 1922 c = &entries[cpuid_i++]; 1923 1924 switch (i) { 1925 case 0x8000001d: 1926 /* Query for all AMD cache information leaves */ 1927 for (j = 0; ; j++) { 1928 c->function = i; 1929 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 1930 c->index = j; 1931 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); 1932 1933 if (c->eax == 0) { 1934 break; 1935 } 1936 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1937 goto full; 1938 } 1939 c = &entries[cpuid_i++]; 1940 } 1941 break; 1942 default: 1943 c->function = i; 1944 c->flags = 0; 1945 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1946 if (!c->eax && !c->ebx && !c->ecx && !c->edx) { 1947 /* 1948 * KVM already returns all zeroes if a CPUID entry is missing, 1949 * so we can omit it and avoid hitting KVM's 80-entry limit. 1950 */ 1951 cpuid_i--; 1952 } 1953 break; 1954 } 1955 } 1956 1957 /* Call Centaur's CPUID instructions they are supported. */ 1958 if (env->cpuid_xlevel2 > 0) { 1959 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); 1960 1961 for (i = 0xC0000000; i <= limit; i++) { 1962 j = 0; 1963 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { 1964 goto full; 1965 } 1966 c = &entries[cpuid_i++]; 1967 1968 c->function = i; 1969 c->flags = 0; 1970 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); 1971 } 1972 } 1973 1974 return cpuid_i; 1975 1976 full: 1977 fprintf(stderr, "cpuid_data is full, no space for " 1978 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); 1979 abort(); 1980 } 1981 1982 int kvm_arch_init_vcpu(CPUState *cs) 1983 { 1984 struct { 1985 struct kvm_cpuid2 cpuid; 1986 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; 1987 } cpuid_data; 1988 /* 1989 * The kernel defines these structs with padding fields so there 1990 * should be no extra padding in our cpuid_data struct. 1991 */ 1992 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != 1993 sizeof(struct kvm_cpuid2) + 1994 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); 1995 1996 X86CPU *cpu = X86_CPU(cs); 1997 CPUX86State *env = &cpu->env; 1998 uint32_t cpuid_i; 1999 struct kvm_cpuid_entry2 *c; 2000 uint32_t signature[3]; 2001 int kvm_base = KVM_CPUID_SIGNATURE; 2002 int max_nested_state_len; 2003 int r; 2004 Error *local_err = NULL; 2005 2006 memset(&cpuid_data, 0, sizeof(cpuid_data)); 2007 2008 cpuid_i = 0; 2009 2010 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); 2011 2012 r = kvm_arch_set_tsc_khz(cs); 2013 if (r < 0) { 2014 return r; 2015 } 2016 2017 /* vcpu's TSC frequency is either specified by user, or following 2018 * the value used by KVM if the former is not present. In the 2019 * latter case, we query it from KVM and record in env->tsc_khz, 2020 * so that vcpu's TSC frequency can be migrated later via this field. 2021 */ 2022 if (!env->tsc_khz) { 2023 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? 2024 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : 2025 -ENOTSUP; 2026 if (r > 0) { 2027 env->tsc_khz = r; 2028 } 2029 } 2030 2031 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; 2032 2033 /* 2034 * kvm_hyperv_expand_features() is called here for the second time in case 2035 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle 2036 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to 2037 * check which Hyper-V enlightenments are supported and which are not, we 2038 * can still proceed and check/expand Hyper-V enlightenments here so legacy 2039 * behavior is preserved. 2040 */ 2041 if (!kvm_hyperv_expand_features(cpu, &local_err)) { 2042 error_report_err(local_err); 2043 return -ENOSYS; 2044 } 2045 2046 if (hyperv_enabled(cpu)) { 2047 r = hyperv_init_vcpu(cpu); 2048 if (r) { 2049 return r; 2050 } 2051 2052 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); 2053 kvm_base = KVM_CPUID_SIGNATURE_NEXT; 2054 has_msr_hv_hypercall = true; 2055 } 2056 2057 if (cs->kvm_state->xen_version) { 2058 #ifdef CONFIG_XEN_EMU 2059 struct kvm_cpuid_entry2 *xen_max_leaf; 2060 2061 memcpy(signature, "XenVMMXenVMM", 12); 2062 2063 xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; 2064 c->function = kvm_base + XEN_CPUID_SIGNATURE; 2065 c->eax = kvm_base + XEN_CPUID_TIME; 2066 c->ebx = signature[0]; 2067 c->ecx = signature[1]; 2068 c->edx = signature[2]; 2069 2070 c = &cpuid_data.entries[cpuid_i++]; 2071 c->function = kvm_base + XEN_CPUID_VENDOR; 2072 c->eax = cs->kvm_state->xen_version; 2073 c->ebx = 0; 2074 c->ecx = 0; 2075 c->edx = 0; 2076 2077 c = &cpuid_data.entries[cpuid_i++]; 2078 c->function = kvm_base + XEN_CPUID_HVM_MSR; 2079 /* Number of hypercall-transfer pages */ 2080 c->eax = 1; 2081 /* Hypercall MSR base address */ 2082 if (hyperv_enabled(cpu)) { 2083 c->ebx = XEN_HYPERCALL_MSR_HYPERV; 2084 kvm_xen_init(cs->kvm_state, c->ebx); 2085 } else { 2086 c->ebx = XEN_HYPERCALL_MSR; 2087 } 2088 c->ecx = 0; 2089 c->edx = 0; 2090 2091 c = &cpuid_data.entries[cpuid_i++]; 2092 c->function = kvm_base + XEN_CPUID_TIME; 2093 c->eax = ((!!tsc_is_stable_and_known(env) << 1) | 2094 (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); 2095 /* default=0 (emulate if necessary) */ 2096 c->ebx = 0; 2097 /* guest tsc frequency */ 2098 c->ecx = env->user_tsc_khz; 2099 /* guest tsc incarnation (migration count) */ 2100 c->edx = 0; 2101 2102 c = &cpuid_data.entries[cpuid_i++]; 2103 c->function = kvm_base + XEN_CPUID_HVM; 2104 xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; 2105 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { 2106 c->function = kvm_base + XEN_CPUID_HVM; 2107 2108 if (cpu->xen_vapic) { 2109 c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; 2110 c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; 2111 } 2112 2113 c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; 2114 2115 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { 2116 c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; 2117 c->ebx = cs->cpu_index; 2118 } 2119 2120 if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { 2121 c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; 2122 } 2123 } 2124 2125 r = kvm_xen_init_vcpu(cs); 2126 if (r) { 2127 return r; 2128 } 2129 2130 kvm_base += 0x100; 2131 #else /* CONFIG_XEN_EMU */ 2132 /* This should never happen as kvm_arch_init() would have died first. */ 2133 fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); 2134 abort(); 2135 #endif 2136 } else if (cpu->expose_kvm) { 2137 memcpy(signature, "KVMKVMKVM\0\0\0", 12); 2138 c = &cpuid_data.entries[cpuid_i++]; 2139 c->function = KVM_CPUID_SIGNATURE | kvm_base; 2140 c->eax = KVM_CPUID_FEATURES | kvm_base; 2141 c->ebx = signature[0]; 2142 c->ecx = signature[1]; 2143 c->edx = signature[2]; 2144 2145 c = &cpuid_data.entries[cpuid_i++]; 2146 c->function = KVM_CPUID_FEATURES | kvm_base; 2147 c->eax = env->features[FEAT_KVM]; 2148 c->edx = env->features[FEAT_KVM_HINTS]; 2149 } 2150 2151 if (cpu->kvm_pv_enforce_cpuid) { 2152 r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); 2153 if (r < 0) { 2154 fprintf(stderr, 2155 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", 2156 strerror(-r)); 2157 abort(); 2158 } 2159 } 2160 2161 cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i); 2162 cpuid_data.cpuid.nent = cpuid_i; 2163 2164 if (((env->cpuid_version >> 8)&0xF) >= 6 2165 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 2166 (CPUID_MCE | CPUID_MCA)) { 2167 uint64_t mcg_cap, unsupported_caps; 2168 int banks; 2169 int ret; 2170 2171 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); 2172 if (ret < 0) { 2173 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); 2174 return ret; 2175 } 2176 2177 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { 2178 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)", 2179 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); 2180 return -ENOTSUP; 2181 } 2182 2183 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); 2184 if (unsupported_caps) { 2185 if (unsupported_caps & MCG_LMCE_P) { 2186 error_report("kvm: LMCE not supported"); 2187 return -ENOTSUP; 2188 } 2189 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64, 2190 unsupported_caps); 2191 } 2192 2193 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; 2194 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); 2195 if (ret < 0) { 2196 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); 2197 return ret; 2198 } 2199 } 2200 2201 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env); 2202 2203 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); 2204 if (c) { 2205 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || 2206 !!(c->ecx & CPUID_EXT_SMX); 2207 } 2208 2209 c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0); 2210 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { 2211 has_msr_feature_control = true; 2212 } 2213 2214 if (env->mcg_cap & MCG_LMCE_P) { 2215 has_msr_mcg_ext_ctl = has_msr_feature_control = true; 2216 } 2217 2218 if (!env->user_tsc_khz) { 2219 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && 2220 invtsc_mig_blocker == NULL) { 2221 error_setg(&invtsc_mig_blocker, 2222 "State blocked by non-migratable CPU device" 2223 " (invtsc flag)"); 2224 r = migrate_add_blocker(&invtsc_mig_blocker, &local_err); 2225 if (r < 0) { 2226 error_report_err(local_err); 2227 return r; 2228 } 2229 } 2230 } 2231 2232 if (cpu->vmware_cpuid_freq 2233 /* Guests depend on 0x40000000 to detect this feature, so only expose 2234 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ 2235 && cpu->expose_kvm 2236 && kvm_base == KVM_CPUID_SIGNATURE 2237 /* TSC clock must be stable and known for this feature. */ 2238 && tsc_is_stable_and_known(env)) { 2239 2240 c = &cpuid_data.entries[cpuid_i++]; 2241 c->function = KVM_CPUID_SIGNATURE | 0x10; 2242 c->eax = env->tsc_khz; 2243 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ 2244 c->ecx = c->edx = 0; 2245 2246 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0); 2247 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); 2248 } 2249 2250 cpuid_data.cpuid.nent = cpuid_i; 2251 2252 cpuid_data.cpuid.padding = 0; 2253 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); 2254 if (r) { 2255 goto fail; 2256 } 2257 kvm_init_xsave(env); 2258 2259 max_nested_state_len = kvm_max_nested_state_length(); 2260 if (max_nested_state_len > 0) { 2261 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); 2262 2263 if (cpu_has_vmx(env) || cpu_has_svm(env)) { 2264 env->nested_state = g_malloc0(max_nested_state_len); 2265 env->nested_state->size = max_nested_state_len; 2266 2267 kvm_init_nested_state(env); 2268 } 2269 } 2270 2271 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); 2272 2273 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { 2274 has_msr_tsc_aux = false; 2275 } 2276 2277 kvm_init_msrs(cpu); 2278 2279 return 0; 2280 2281 fail: 2282 migrate_del_blocker(&invtsc_mig_blocker); 2283 2284 return r; 2285 } 2286 2287 int kvm_arch_destroy_vcpu(CPUState *cs) 2288 { 2289 X86CPU *cpu = X86_CPU(cs); 2290 CPUX86State *env = &cpu->env; 2291 2292 g_free(env->xsave_buf); 2293 2294 g_free(cpu->kvm_msr_buf); 2295 cpu->kvm_msr_buf = NULL; 2296 2297 g_free(env->nested_state); 2298 env->nested_state = NULL; 2299 2300 qemu_del_vm_change_state_handler(cpu->vmsentry); 2301 2302 return 0; 2303 } 2304 2305 void kvm_arch_reset_vcpu(X86CPU *cpu) 2306 { 2307 CPUX86State *env = &cpu->env; 2308 2309 env->xcr0 = 1; 2310 if (kvm_irqchip_in_kernel()) { 2311 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : 2312 KVM_MP_STATE_UNINITIALIZED; 2313 } else { 2314 env->mp_state = KVM_MP_STATE_RUNNABLE; 2315 } 2316 2317 /* enabled by default */ 2318 env->poll_control_msr = 1; 2319 2320 kvm_init_nested_state(env); 2321 2322 sev_es_set_reset_vector(CPU(cpu)); 2323 } 2324 2325 void kvm_arch_after_reset_vcpu(X86CPU *cpu) 2326 { 2327 CPUX86State *env = &cpu->env; 2328 int i; 2329 2330 /* 2331 * Reset SynIC after all other devices have been reset to let them remove 2332 * their SINT routes first. 2333 */ 2334 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 2335 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { 2336 env->msr_hv_synic_sint[i] = HV_SINT_MASKED; 2337 } 2338 2339 hyperv_x86_synic_reset(cpu); 2340 } 2341 } 2342 2343 void kvm_arch_do_init_vcpu(X86CPU *cpu) 2344 { 2345 CPUX86State *env = &cpu->env; 2346 2347 /* APs get directly into wait-for-SIPI state. */ 2348 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { 2349 env->mp_state = KVM_MP_STATE_INIT_RECEIVED; 2350 } 2351 } 2352 2353 static int kvm_get_supported_feature_msrs(KVMState *s) 2354 { 2355 int ret = 0; 2356 2357 if (kvm_feature_msrs != NULL) { 2358 return 0; 2359 } 2360 2361 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) { 2362 return 0; 2363 } 2364 2365 struct kvm_msr_list msr_list; 2366 2367 msr_list.nmsrs = 0; 2368 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list); 2369 if (ret < 0 && ret != -E2BIG) { 2370 error_report("Fetch KVM feature MSR list failed: %s", 2371 strerror(-ret)); 2372 return ret; 2373 } 2374 2375 assert(msr_list.nmsrs > 0); 2376 kvm_feature_msrs = g_malloc0(sizeof(msr_list) + 2377 msr_list.nmsrs * sizeof(msr_list.indices[0])); 2378 2379 kvm_feature_msrs->nmsrs = msr_list.nmsrs; 2380 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs); 2381 2382 if (ret < 0) { 2383 error_report("Fetch KVM feature MSR list failed: %s", 2384 strerror(-ret)); 2385 g_free(kvm_feature_msrs); 2386 kvm_feature_msrs = NULL; 2387 return ret; 2388 } 2389 2390 return 0; 2391 } 2392 2393 static int kvm_get_supported_msrs(KVMState *s) 2394 { 2395 int ret = 0; 2396 struct kvm_msr_list msr_list, *kvm_msr_list; 2397 2398 /* 2399 * Obtain MSR list from KVM. These are the MSRs that we must 2400 * save/restore. 2401 */ 2402 msr_list.nmsrs = 0; 2403 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); 2404 if (ret < 0 && ret != -E2BIG) { 2405 return ret; 2406 } 2407 /* 2408 * Old kernel modules had a bug and could write beyond the provided 2409 * memory. Allocate at least a safe amount of 1K. 2410 */ 2411 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + 2412 msr_list.nmsrs * 2413 sizeof(msr_list.indices[0]))); 2414 2415 kvm_msr_list->nmsrs = msr_list.nmsrs; 2416 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); 2417 if (ret >= 0) { 2418 int i; 2419 2420 for (i = 0; i < kvm_msr_list->nmsrs; i++) { 2421 switch (kvm_msr_list->indices[i]) { 2422 case MSR_STAR: 2423 has_msr_star = true; 2424 break; 2425 case MSR_VM_HSAVE_PA: 2426 has_msr_hsave_pa = true; 2427 break; 2428 case MSR_TSC_AUX: 2429 has_msr_tsc_aux = true; 2430 break; 2431 case MSR_TSC_ADJUST: 2432 has_msr_tsc_adjust = true; 2433 break; 2434 case MSR_IA32_TSCDEADLINE: 2435 has_msr_tsc_deadline = true; 2436 break; 2437 case MSR_IA32_SMBASE: 2438 has_msr_smbase = true; 2439 break; 2440 case MSR_SMI_COUNT: 2441 has_msr_smi_count = true; 2442 break; 2443 case MSR_IA32_MISC_ENABLE: 2444 has_msr_misc_enable = true; 2445 break; 2446 case MSR_IA32_BNDCFGS: 2447 has_msr_bndcfgs = true; 2448 break; 2449 case MSR_IA32_XSS: 2450 has_msr_xss = true; 2451 break; 2452 case MSR_IA32_UMWAIT_CONTROL: 2453 has_msr_umwait = true; 2454 break; 2455 case HV_X64_MSR_CRASH_CTL: 2456 has_msr_hv_crash = true; 2457 break; 2458 case HV_X64_MSR_RESET: 2459 has_msr_hv_reset = true; 2460 break; 2461 case HV_X64_MSR_VP_INDEX: 2462 has_msr_hv_vpindex = true; 2463 break; 2464 case HV_X64_MSR_VP_RUNTIME: 2465 has_msr_hv_runtime = true; 2466 break; 2467 case HV_X64_MSR_SCONTROL: 2468 has_msr_hv_synic = true; 2469 break; 2470 case HV_X64_MSR_STIMER0_CONFIG: 2471 has_msr_hv_stimer = true; 2472 break; 2473 case HV_X64_MSR_TSC_FREQUENCY: 2474 has_msr_hv_frequencies = true; 2475 break; 2476 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 2477 has_msr_hv_reenlightenment = true; 2478 break; 2479 case HV_X64_MSR_SYNDBG_OPTIONS: 2480 has_msr_hv_syndbg_options = true; 2481 break; 2482 case MSR_IA32_SPEC_CTRL: 2483 has_msr_spec_ctrl = true; 2484 break; 2485 case MSR_AMD64_TSC_RATIO: 2486 has_tsc_scale_msr = true; 2487 break; 2488 case MSR_IA32_TSX_CTRL: 2489 has_msr_tsx_ctrl = true; 2490 break; 2491 case MSR_VIRT_SSBD: 2492 has_msr_virt_ssbd = true; 2493 break; 2494 case MSR_IA32_ARCH_CAPABILITIES: 2495 has_msr_arch_capabs = true; 2496 break; 2497 case MSR_IA32_CORE_CAPABILITY: 2498 has_msr_core_capabs = true; 2499 break; 2500 case MSR_IA32_PERF_CAPABILITIES: 2501 has_msr_perf_capabs = true; 2502 break; 2503 case MSR_IA32_VMX_VMFUNC: 2504 has_msr_vmx_vmfunc = true; 2505 break; 2506 case MSR_IA32_UCODE_REV: 2507 has_msr_ucode_rev = true; 2508 break; 2509 case MSR_IA32_VMX_PROCBASED_CTLS2: 2510 has_msr_vmx_procbased_ctls2 = true; 2511 break; 2512 case MSR_IA32_PKRS: 2513 has_msr_pkrs = true; 2514 break; 2515 } 2516 } 2517 } 2518 2519 g_free(kvm_msr_list); 2520 2521 return ret; 2522 } 2523 2524 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr, 2525 uint64_t *val) 2526 { 2527 CPUState *cs = CPU(cpu); 2528 2529 *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */ 2530 *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */ 2531 2532 return true; 2533 } 2534 2535 static Notifier smram_machine_done; 2536 static KVMMemoryListener smram_listener; 2537 static AddressSpace smram_address_space; 2538 static MemoryRegion smram_as_root; 2539 static MemoryRegion smram_as_mem; 2540 2541 static void register_smram_listener(Notifier *n, void *unused) 2542 { 2543 MemoryRegion *smram = 2544 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 2545 2546 /* Outer container... */ 2547 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull); 2548 memory_region_set_enabled(&smram_as_root, true); 2549 2550 /* ... with two regions inside: normal system memory with low 2551 * priority, and... 2552 */ 2553 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram", 2554 get_system_memory(), 0, ~0ull); 2555 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); 2556 memory_region_set_enabled(&smram_as_mem, true); 2557 2558 if (smram) { 2559 /* ... SMRAM with higher priority */ 2560 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); 2561 memory_region_set_enabled(smram, true); 2562 } 2563 2564 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); 2565 kvm_memory_listener_register(kvm_state, &smram_listener, 2566 &smram_address_space, 1, "kvm-smram"); 2567 } 2568 2569 int kvm_arch_get_default_type(MachineState *ms) 2570 { 2571 return 0; 2572 } 2573 2574 int kvm_arch_init(MachineState *ms, KVMState *s) 2575 { 2576 uint64_t identity_base = 0xfffbc000; 2577 uint64_t shadow_mem; 2578 int ret; 2579 struct utsname utsname; 2580 Error *local_err = NULL; 2581 2582 /* 2583 * Initialize SEV context, if required 2584 * 2585 * If no memory encryption is requested (ms->cgs == NULL) this is 2586 * a no-op. 2587 * 2588 * It's also a no-op if a non-SEV confidential guest support 2589 * mechanism is selected. SEV is the only mechanism available to 2590 * select on x86 at present, so this doesn't arise, but if new 2591 * mechanisms are supported in future (e.g. TDX), they'll need 2592 * their own initialization either here or elsewhere. 2593 */ 2594 if (ms->cgs) { 2595 ret = confidential_guest_kvm_init(ms->cgs, &local_err); 2596 if (ret < 0) { 2597 error_report_err(local_err); 2598 return ret; 2599 } 2600 } 2601 2602 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); 2603 has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0; 2604 2605 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); 2606 2607 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); 2608 if (has_exception_payload) { 2609 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); 2610 if (ret < 0) { 2611 error_report("kvm: Failed to enable exception payload cap: %s", 2612 strerror(-ret)); 2613 return ret; 2614 } 2615 } 2616 2617 has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT); 2618 if (has_triple_fault_event) { 2619 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); 2620 if (ret < 0) { 2621 error_report("kvm: Failed to enable triple fault event cap: %s", 2622 strerror(-ret)); 2623 return ret; 2624 } 2625 } 2626 2627 if (s->xen_version) { 2628 #ifdef CONFIG_XEN_EMU 2629 if (!object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)) { 2630 error_report("kvm: Xen support only available in PC machine"); 2631 return -ENOTSUP; 2632 } 2633 /* hyperv_enabled() doesn't work yet. */ 2634 uint32_t msr = XEN_HYPERCALL_MSR; 2635 ret = kvm_xen_init(s, msr); 2636 if (ret < 0) { 2637 return ret; 2638 } 2639 #else 2640 error_report("kvm: Xen support not enabled in qemu"); 2641 return -ENOTSUP; 2642 #endif 2643 } 2644 2645 ret = kvm_get_supported_msrs(s); 2646 if (ret < 0) { 2647 return ret; 2648 } 2649 2650 kvm_get_supported_feature_msrs(s); 2651 2652 uname(&utsname); 2653 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; 2654 2655 /* 2656 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. 2657 * In order to use vm86 mode, an EPT identity map and a TSS are needed. 2658 * Since these must be part of guest physical memory, we need to allocate 2659 * them, both by setting their start addresses in the kernel and by 2660 * creating a corresponding e820 entry. We need 4 pages before the BIOS, 2661 * so this value allows up to 16M BIOSes. 2662 */ 2663 identity_base = 0xfeffc000; 2664 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); 2665 if (ret < 0) { 2666 return ret; 2667 } 2668 2669 /* Set TSS base one page after EPT identity map. */ 2670 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); 2671 if (ret < 0) { 2672 return ret; 2673 } 2674 2675 /* Tell fw_cfg to notify the BIOS to reserve the range. */ 2676 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED); 2677 if (ret < 0) { 2678 fprintf(stderr, "e820_add_entry() table is full\n"); 2679 return ret; 2680 } 2681 2682 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort); 2683 if (shadow_mem != -1) { 2684 shadow_mem /= 4096; 2685 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); 2686 if (ret < 0) { 2687 return ret; 2688 } 2689 } 2690 2691 if (kvm_check_extension(s, KVM_CAP_X86_SMM) && 2692 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && 2693 x86_machine_is_smm_enabled(X86_MACHINE(ms))) { 2694 smram_machine_done.notify = register_smram_listener; 2695 qemu_add_machine_init_done_notifier(&smram_machine_done); 2696 } 2697 2698 if (enable_cpu_pm) { 2699 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); 2700 /* Work around for kernel header with a typo. TODO: fix header and drop. */ 2701 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) 2702 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL 2703 #endif 2704 if (disable_exits) { 2705 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | 2706 KVM_X86_DISABLE_EXITS_HLT | 2707 KVM_X86_DISABLE_EXITS_PAUSE | 2708 KVM_X86_DISABLE_EXITS_CSTATE); 2709 } 2710 2711 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, 2712 disable_exits); 2713 if (ret < 0) { 2714 error_report("kvm: guest stopping CPU not supported: %s", 2715 strerror(-ret)); 2716 } 2717 } 2718 2719 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { 2720 X86MachineState *x86ms = X86_MACHINE(ms); 2721 2722 if (x86ms->bus_lock_ratelimit > 0) { 2723 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); 2724 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { 2725 error_report("kvm: bus lock detection unsupported"); 2726 return -ENOTSUP; 2727 } 2728 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, 2729 KVM_BUS_LOCK_DETECTION_EXIT); 2730 if (ret < 0) { 2731 error_report("kvm: Failed to enable bus lock detection cap: %s", 2732 strerror(-ret)); 2733 return ret; 2734 } 2735 ratelimit_init(&bus_lock_ratelimit_ctrl); 2736 ratelimit_set_speed(&bus_lock_ratelimit_ctrl, 2737 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME); 2738 } 2739 } 2740 2741 if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE && 2742 kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { 2743 uint64_t notify_window_flags = 2744 ((uint64_t)s->notify_window << 32) | 2745 KVM_X86_NOTIFY_VMEXIT_ENABLED | 2746 KVM_X86_NOTIFY_VMEXIT_USER; 2747 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, 2748 notify_window_flags); 2749 if (ret < 0) { 2750 error_report("kvm: Failed to enable notify vmexit cap: %s", 2751 strerror(-ret)); 2752 return ret; 2753 } 2754 } 2755 if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { 2756 bool r; 2757 2758 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, 2759 KVM_MSR_EXIT_REASON_FILTER); 2760 if (ret) { 2761 error_report("Could not enable user space MSRs: %s", 2762 strerror(-ret)); 2763 exit(1); 2764 } 2765 2766 r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, 2767 kvm_rdmsr_core_thread_count, NULL); 2768 if (!r) { 2769 error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s", 2770 strerror(-ret)); 2771 exit(1); 2772 } 2773 } 2774 2775 return 0; 2776 } 2777 2778 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2779 { 2780 lhs->selector = rhs->selector; 2781 lhs->base = rhs->base; 2782 lhs->limit = rhs->limit; 2783 lhs->type = 3; 2784 lhs->present = 1; 2785 lhs->dpl = 3; 2786 lhs->db = 0; 2787 lhs->s = 1; 2788 lhs->l = 0; 2789 lhs->g = 0; 2790 lhs->avl = 0; 2791 lhs->unusable = 0; 2792 } 2793 2794 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) 2795 { 2796 unsigned flags = rhs->flags; 2797 lhs->selector = rhs->selector; 2798 lhs->base = rhs->base; 2799 lhs->limit = rhs->limit; 2800 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; 2801 lhs->present = (flags & DESC_P_MASK) != 0; 2802 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; 2803 lhs->db = (flags >> DESC_B_SHIFT) & 1; 2804 lhs->s = (flags & DESC_S_MASK) != 0; 2805 lhs->l = (flags >> DESC_L_SHIFT) & 1; 2806 lhs->g = (flags & DESC_G_MASK) != 0; 2807 lhs->avl = (flags & DESC_AVL_MASK) != 0; 2808 lhs->unusable = !lhs->present; 2809 lhs->padding = 0; 2810 } 2811 2812 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) 2813 { 2814 lhs->selector = rhs->selector; 2815 lhs->base = rhs->base; 2816 lhs->limit = rhs->limit; 2817 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | 2818 ((rhs->present && !rhs->unusable) * DESC_P_MASK) | 2819 (rhs->dpl << DESC_DPL_SHIFT) | 2820 (rhs->db << DESC_B_SHIFT) | 2821 (rhs->s * DESC_S_MASK) | 2822 (rhs->l << DESC_L_SHIFT) | 2823 (rhs->g * DESC_G_MASK) | 2824 (rhs->avl * DESC_AVL_MASK); 2825 } 2826 2827 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) 2828 { 2829 if (set) { 2830 *kvm_reg = *qemu_reg; 2831 } else { 2832 *qemu_reg = *kvm_reg; 2833 } 2834 } 2835 2836 static int kvm_getput_regs(X86CPU *cpu, int set) 2837 { 2838 CPUX86State *env = &cpu->env; 2839 struct kvm_regs regs; 2840 int ret = 0; 2841 2842 if (!set) { 2843 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s); 2844 if (ret < 0) { 2845 return ret; 2846 } 2847 } 2848 2849 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); 2850 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); 2851 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); 2852 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); 2853 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); 2854 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); 2855 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); 2856 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); 2857 #ifdef TARGET_X86_64 2858 kvm_getput_reg(®s.r8, &env->regs[8], set); 2859 kvm_getput_reg(®s.r9, &env->regs[9], set); 2860 kvm_getput_reg(®s.r10, &env->regs[10], set); 2861 kvm_getput_reg(®s.r11, &env->regs[11], set); 2862 kvm_getput_reg(®s.r12, &env->regs[12], set); 2863 kvm_getput_reg(®s.r13, &env->regs[13], set); 2864 kvm_getput_reg(®s.r14, &env->regs[14], set); 2865 kvm_getput_reg(®s.r15, &env->regs[15], set); 2866 #endif 2867 2868 kvm_getput_reg(®s.rflags, &env->eflags, set); 2869 kvm_getput_reg(®s.rip, &env->eip, set); 2870 2871 if (set) { 2872 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s); 2873 } 2874 2875 return ret; 2876 } 2877 2878 static int kvm_put_xsave(X86CPU *cpu) 2879 { 2880 CPUX86State *env = &cpu->env; 2881 void *xsave = env->xsave_buf; 2882 2883 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len); 2884 2885 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); 2886 } 2887 2888 static int kvm_put_xcrs(X86CPU *cpu) 2889 { 2890 CPUX86State *env = &cpu->env; 2891 struct kvm_xcrs xcrs = {}; 2892 2893 if (!has_xcrs) { 2894 return 0; 2895 } 2896 2897 xcrs.nr_xcrs = 1; 2898 xcrs.flags = 0; 2899 xcrs.xcrs[0].xcr = 0; 2900 xcrs.xcrs[0].value = env->xcr0; 2901 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); 2902 } 2903 2904 static int kvm_put_sregs(X86CPU *cpu) 2905 { 2906 CPUX86State *env = &cpu->env; 2907 struct kvm_sregs sregs; 2908 2909 /* 2910 * The interrupt_bitmap is ignored because KVM_SET_SREGS is 2911 * always followed by KVM_SET_VCPU_EVENTS. 2912 */ 2913 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); 2914 2915 if ((env->eflags & VM_MASK)) { 2916 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 2917 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 2918 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 2919 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 2920 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 2921 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 2922 } else { 2923 set_seg(&sregs.cs, &env->segs[R_CS]); 2924 set_seg(&sregs.ds, &env->segs[R_DS]); 2925 set_seg(&sregs.es, &env->segs[R_ES]); 2926 set_seg(&sregs.fs, &env->segs[R_FS]); 2927 set_seg(&sregs.gs, &env->segs[R_GS]); 2928 set_seg(&sregs.ss, &env->segs[R_SS]); 2929 } 2930 2931 set_seg(&sregs.tr, &env->tr); 2932 set_seg(&sregs.ldt, &env->ldt); 2933 2934 sregs.idt.limit = env->idt.limit; 2935 sregs.idt.base = env->idt.base; 2936 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 2937 sregs.gdt.limit = env->gdt.limit; 2938 sregs.gdt.base = env->gdt.base; 2939 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 2940 2941 sregs.cr0 = env->cr[0]; 2942 sregs.cr2 = env->cr[2]; 2943 sregs.cr3 = env->cr[3]; 2944 sregs.cr4 = env->cr[4]; 2945 2946 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 2947 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 2948 2949 sregs.efer = env->efer; 2950 2951 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); 2952 } 2953 2954 static int kvm_put_sregs2(X86CPU *cpu) 2955 { 2956 CPUX86State *env = &cpu->env; 2957 struct kvm_sregs2 sregs; 2958 int i; 2959 2960 sregs.flags = 0; 2961 2962 if ((env->eflags & VM_MASK)) { 2963 set_v8086_seg(&sregs.cs, &env->segs[R_CS]); 2964 set_v8086_seg(&sregs.ds, &env->segs[R_DS]); 2965 set_v8086_seg(&sregs.es, &env->segs[R_ES]); 2966 set_v8086_seg(&sregs.fs, &env->segs[R_FS]); 2967 set_v8086_seg(&sregs.gs, &env->segs[R_GS]); 2968 set_v8086_seg(&sregs.ss, &env->segs[R_SS]); 2969 } else { 2970 set_seg(&sregs.cs, &env->segs[R_CS]); 2971 set_seg(&sregs.ds, &env->segs[R_DS]); 2972 set_seg(&sregs.es, &env->segs[R_ES]); 2973 set_seg(&sregs.fs, &env->segs[R_FS]); 2974 set_seg(&sregs.gs, &env->segs[R_GS]); 2975 set_seg(&sregs.ss, &env->segs[R_SS]); 2976 } 2977 2978 set_seg(&sregs.tr, &env->tr); 2979 set_seg(&sregs.ldt, &env->ldt); 2980 2981 sregs.idt.limit = env->idt.limit; 2982 sregs.idt.base = env->idt.base; 2983 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); 2984 sregs.gdt.limit = env->gdt.limit; 2985 sregs.gdt.base = env->gdt.base; 2986 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); 2987 2988 sregs.cr0 = env->cr[0]; 2989 sregs.cr2 = env->cr[2]; 2990 sregs.cr3 = env->cr[3]; 2991 sregs.cr4 = env->cr[4]; 2992 2993 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); 2994 sregs.apic_base = cpu_get_apic_base(cpu->apic_state); 2995 2996 sregs.efer = env->efer; 2997 2998 if (env->pdptrs_valid) { 2999 for (i = 0; i < 4; i++) { 3000 sregs.pdptrs[i] = env->pdptrs[i]; 3001 } 3002 sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 3003 } 3004 3005 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs); 3006 } 3007 3008 3009 static void kvm_msr_buf_reset(X86CPU *cpu) 3010 { 3011 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); 3012 } 3013 3014 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) 3015 { 3016 struct kvm_msrs *msrs = cpu->kvm_msr_buf; 3017 void *limit = ((void *)msrs) + MSR_BUF_SIZE; 3018 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; 3019 3020 assert((void *)(entry + 1) <= limit); 3021 3022 entry->index = index; 3023 entry->reserved = 0; 3024 entry->data = value; 3025 msrs->nmsrs++; 3026 } 3027 3028 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) 3029 { 3030 kvm_msr_buf_reset(cpu); 3031 kvm_msr_entry_add(cpu, index, value); 3032 3033 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 3034 } 3035 3036 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value) 3037 { 3038 int ret; 3039 struct { 3040 struct kvm_msrs info; 3041 struct kvm_msr_entry entries[1]; 3042 } msr_data = { 3043 .info.nmsrs = 1, 3044 .entries[0].index = index, 3045 }; 3046 3047 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); 3048 if (ret < 0) { 3049 return ret; 3050 } 3051 assert(ret == 1); 3052 *value = msr_data.entries[0].data; 3053 return ret; 3054 } 3055 void kvm_put_apicbase(X86CPU *cpu, uint64_t value) 3056 { 3057 int ret; 3058 3059 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); 3060 assert(ret == 1); 3061 } 3062 3063 static int kvm_put_tscdeadline_msr(X86CPU *cpu) 3064 { 3065 CPUX86State *env = &cpu->env; 3066 int ret; 3067 3068 if (!has_msr_tsc_deadline) { 3069 return 0; 3070 } 3071 3072 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); 3073 if (ret < 0) { 3074 return ret; 3075 } 3076 3077 assert(ret == 1); 3078 return 0; 3079 } 3080 3081 /* 3082 * Provide a separate write service for the feature control MSR in order to 3083 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done 3084 * before writing any other state because forcibly leaving nested mode 3085 * invalidates the VCPU state. 3086 */ 3087 static int kvm_put_msr_feature_control(X86CPU *cpu) 3088 { 3089 int ret; 3090 3091 if (!has_msr_feature_control) { 3092 return 0; 3093 } 3094 3095 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, 3096 cpu->env.msr_ia32_feature_control); 3097 if (ret < 0) { 3098 return ret; 3099 } 3100 3101 assert(ret == 1); 3102 return 0; 3103 } 3104 3105 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features) 3106 { 3107 uint32_t default1, can_be_one, can_be_zero; 3108 uint32_t must_be_one; 3109 3110 switch (index) { 3111 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 3112 default1 = 0x00000016; 3113 break; 3114 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 3115 default1 = 0x0401e172; 3116 break; 3117 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 3118 default1 = 0x000011ff; 3119 break; 3120 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 3121 default1 = 0x00036dff; 3122 break; 3123 case MSR_IA32_VMX_PROCBASED_CTLS2: 3124 default1 = 0; 3125 break; 3126 default: 3127 abort(); 3128 } 3129 3130 /* If a feature bit is set, the control can be either set or clear. 3131 * Otherwise the value is limited to either 0 or 1 by default1. 3132 */ 3133 can_be_one = features | default1; 3134 can_be_zero = features | ~default1; 3135 must_be_one = ~can_be_zero; 3136 3137 /* 3138 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one). 3139 * Bit 32:63 -> 1 if the control bit can be one. 3140 */ 3141 return must_be_one | (((uint64_t)can_be_one) << 32); 3142 } 3143 3144 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) 3145 { 3146 uint64_t kvm_vmx_basic = 3147 kvm_arch_get_supported_msr_feature(kvm_state, 3148 MSR_IA32_VMX_BASIC); 3149 3150 if (!kvm_vmx_basic) { 3151 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0), 3152 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail. 3153 */ 3154 return; 3155 } 3156 3157 uint64_t kvm_vmx_misc = 3158 kvm_arch_get_supported_msr_feature(kvm_state, 3159 MSR_IA32_VMX_MISC); 3160 uint64_t kvm_vmx_ept_vpid = 3161 kvm_arch_get_supported_msr_feature(kvm_state, 3162 MSR_IA32_VMX_EPT_VPID_CAP); 3163 3164 /* 3165 * If the guest is 64-bit, a value of 1 is allowed for the host address 3166 * space size vmexit control. 3167 */ 3168 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM 3169 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0; 3170 3171 /* 3172 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should 3173 * not change them for backwards compatibility. 3174 */ 3175 uint64_t fixed_vmx_basic = kvm_vmx_basic & 3176 (MSR_VMX_BASIC_VMCS_REVISION_MASK | 3177 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK | 3178 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK); 3179 3180 /* 3181 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can 3182 * change in the future but are always zero for now, clear them to be 3183 * future proof. Bits 32-63 in theory could change, though KVM does 3184 * not support dual-monitor treatment and probably never will; mask 3185 * them out as well. 3186 */ 3187 uint64_t fixed_vmx_misc = kvm_vmx_misc & 3188 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK | 3189 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK); 3190 3191 /* 3192 * EPT memory types should not change either, so we do not bother 3193 * adding features for them. 3194 */ 3195 uint64_t fixed_vmx_ept_mask = 3196 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ? 3197 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0); 3198 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask; 3199 3200 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 3201 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 3202 f[FEAT_VMX_PROCBASED_CTLS])); 3203 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS, 3204 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS, 3205 f[FEAT_VMX_PINBASED_CTLS])); 3206 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS, 3207 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS, 3208 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit); 3209 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS, 3210 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS, 3211 f[FEAT_VMX_ENTRY_CTLS])); 3212 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2, 3213 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2, 3214 f[FEAT_VMX_SECONDARY_CTLS])); 3215 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP, 3216 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid); 3217 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC, 3218 f[FEAT_VMX_BASIC] | fixed_vmx_basic); 3219 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC, 3220 f[FEAT_VMX_MISC] | fixed_vmx_misc); 3221 if (has_msr_vmx_vmfunc) { 3222 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]); 3223 } 3224 3225 /* 3226 * Just to be safe, write these with constant values. The CRn_FIXED1 3227 * MSRs are generated by KVM based on the vCPU's CPUID. 3228 */ 3229 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0, 3230 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK); 3231 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, 3232 CR4_VMXE_MASK); 3233 3234 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { 3235 /* TSC multiplier (0x2032). */ 3236 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); 3237 } else { 3238 /* Preemption timer (0x482E). */ 3239 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E); 3240 } 3241 } 3242 3243 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f) 3244 { 3245 uint64_t kvm_perf_cap = 3246 kvm_arch_get_supported_msr_feature(kvm_state, 3247 MSR_IA32_PERF_CAPABILITIES); 3248 3249 if (kvm_perf_cap) { 3250 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES, 3251 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]); 3252 } 3253 } 3254 3255 static int kvm_buf_set_msrs(X86CPU *cpu) 3256 { 3257 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); 3258 if (ret < 0) { 3259 return ret; 3260 } 3261 3262 if (ret < cpu->kvm_msr_buf->nmsrs) { 3263 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 3264 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, 3265 (uint32_t)e->index, (uint64_t)e->data); 3266 } 3267 3268 assert(ret == cpu->kvm_msr_buf->nmsrs); 3269 return 0; 3270 } 3271 3272 static void kvm_init_msrs(X86CPU *cpu) 3273 { 3274 CPUX86State *env = &cpu->env; 3275 3276 kvm_msr_buf_reset(cpu); 3277 if (has_msr_arch_capabs) { 3278 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, 3279 env->features[FEAT_ARCH_CAPABILITIES]); 3280 } 3281 3282 if (has_msr_core_capabs) { 3283 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, 3284 env->features[FEAT_CORE_CAPABILITY]); 3285 } 3286 3287 if (has_msr_perf_capabs && cpu->enable_pmu) { 3288 kvm_msr_entry_add_perf(cpu, env->features); 3289 } 3290 3291 if (has_msr_ucode_rev) { 3292 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); 3293 } 3294 3295 /* 3296 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but 3297 * all kernels with MSR features should have them. 3298 */ 3299 if (kvm_feature_msrs && cpu_has_vmx(env)) { 3300 kvm_msr_entry_add_vmx(cpu, env->features); 3301 } 3302 3303 assert(kvm_buf_set_msrs(cpu) == 0); 3304 } 3305 3306 static int kvm_put_msrs(X86CPU *cpu, int level) 3307 { 3308 CPUX86State *env = &cpu->env; 3309 int i; 3310 3311 kvm_msr_buf_reset(cpu); 3312 3313 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); 3314 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); 3315 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); 3316 kvm_msr_entry_add(cpu, MSR_PAT, env->pat); 3317 if (has_msr_star) { 3318 kvm_msr_entry_add(cpu, MSR_STAR, env->star); 3319 } 3320 if (has_msr_hsave_pa) { 3321 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); 3322 } 3323 if (has_msr_tsc_aux) { 3324 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); 3325 } 3326 if (has_msr_tsc_adjust) { 3327 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); 3328 } 3329 if (has_msr_misc_enable) { 3330 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 3331 env->msr_ia32_misc_enable); 3332 } 3333 if (has_msr_smbase) { 3334 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); 3335 } 3336 if (has_msr_smi_count) { 3337 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); 3338 } 3339 if (has_msr_pkrs) { 3340 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs); 3341 } 3342 if (has_msr_bndcfgs) { 3343 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); 3344 } 3345 if (has_msr_xss) { 3346 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); 3347 } 3348 if (has_msr_umwait) { 3349 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); 3350 } 3351 if (has_msr_spec_ctrl) { 3352 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); 3353 } 3354 if (has_tsc_scale_msr) { 3355 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); 3356 } 3357 3358 if (has_msr_tsx_ctrl) { 3359 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); 3360 } 3361 if (has_msr_virt_ssbd) { 3362 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); 3363 } 3364 3365 #ifdef TARGET_X86_64 3366 if (lm_capable_kernel) { 3367 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); 3368 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); 3369 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); 3370 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); 3371 } 3372 #endif 3373 3374 /* 3375 * The following MSRs have side effects on the guest or are too heavy 3376 * for normal writeback. Limit them to reset or full state updates. 3377 */ 3378 if (level >= KVM_PUT_RESET_STATE) { 3379 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); 3380 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); 3381 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); 3382 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3383 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr); 3384 } 3385 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3386 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); 3387 } 3388 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3389 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); 3390 } 3391 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3392 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); 3393 } 3394 3395 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3396 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); 3397 } 3398 3399 if (has_architectural_pmu_version > 0) { 3400 if (has_architectural_pmu_version > 1) { 3401 /* Stop the counter. */ 3402 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3403 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3404 } 3405 3406 /* Set the counter values. */ 3407 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3408 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 3409 env->msr_fixed_counters[i]); 3410 } 3411 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3412 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 3413 env->msr_gp_counters[i]); 3414 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 3415 env->msr_gp_evtsel[i]); 3416 } 3417 if (has_architectural_pmu_version > 1) { 3418 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 3419 env->msr_global_status); 3420 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 3421 env->msr_global_ovf_ctrl); 3422 3423 /* Now start the PMU. */ 3424 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 3425 env->msr_fixed_ctr_ctrl); 3426 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 3427 env->msr_global_ctrl); 3428 } 3429 } 3430 /* 3431 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, 3432 * only sync them to KVM on the first cpu 3433 */ 3434 if (current_cpu == first_cpu) { 3435 if (has_msr_hv_hypercall) { 3436 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 3437 env->msr_hv_guest_os_id); 3438 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 3439 env->msr_hv_hypercall); 3440 } 3441 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 3442 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 3443 env->msr_hv_tsc); 3444 } 3445 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 3446 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 3447 env->msr_hv_reenlightenment_control); 3448 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 3449 env->msr_hv_tsc_emulation_control); 3450 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 3451 env->msr_hv_tsc_emulation_status); 3452 } 3453 #ifdef CONFIG_SYNDBG 3454 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) && 3455 has_msr_hv_syndbg_options) { 3456 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 3457 hyperv_syndbg_query_options()); 3458 } 3459 #endif 3460 } 3461 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 3462 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 3463 env->msr_hv_vapic); 3464 } 3465 if (has_msr_hv_crash) { 3466 int j; 3467 3468 for (j = 0; j < HV_CRASH_PARAMS; j++) 3469 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 3470 env->msr_hv_crash_params[j]); 3471 3472 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); 3473 } 3474 if (has_msr_hv_runtime) { 3475 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); 3476 } 3477 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) 3478 && hv_vpindex_settable) { 3479 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, 3480 hyperv_vp_index(CPU(cpu))); 3481 } 3482 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3483 int j; 3484 3485 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); 3486 3487 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 3488 env->msr_hv_synic_control); 3489 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 3490 env->msr_hv_synic_evt_page); 3491 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 3492 env->msr_hv_synic_msg_page); 3493 3494 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { 3495 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, 3496 env->msr_hv_synic_sint[j]); 3497 } 3498 } 3499 if (has_msr_hv_stimer) { 3500 int j; 3501 3502 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { 3503 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, 3504 env->msr_hv_stimer_config[j]); 3505 } 3506 3507 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { 3508 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, 3509 env->msr_hv_stimer_count[j]); 3510 } 3511 } 3512 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3513 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); 3514 3515 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); 3516 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); 3517 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); 3518 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); 3519 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); 3520 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); 3521 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); 3522 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); 3523 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); 3524 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); 3525 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); 3526 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); 3527 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3528 /* The CPU GPs if we write to a bit above the physical limit of 3529 * the host CPU (and KVM emulates that) 3530 */ 3531 uint64_t mask = env->mtrr_var[i].mask; 3532 mask &= phys_mask; 3533 3534 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 3535 env->mtrr_var[i].base); 3536 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); 3537 } 3538 } 3539 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3540 int addr_num = kvm_arch_get_supported_cpuid(kvm_state, 3541 0x14, 1, R_EAX) & 0x7; 3542 3543 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 3544 env->msr_rtit_ctrl); 3545 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 3546 env->msr_rtit_status); 3547 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 3548 env->msr_rtit_output_base); 3549 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 3550 env->msr_rtit_output_mask); 3551 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 3552 env->msr_rtit_cr3_match); 3553 for (i = 0; i < addr_num; i++) { 3554 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 3555 env->msr_rtit_addrs[i]); 3556 } 3557 } 3558 3559 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 3560 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 3561 env->msr_ia32_sgxlepubkeyhash[0]); 3562 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 3563 env->msr_ia32_sgxlepubkeyhash[1]); 3564 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 3565 env->msr_ia32_sgxlepubkeyhash[2]); 3566 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 3567 env->msr_ia32_sgxlepubkeyhash[3]); 3568 } 3569 3570 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 3571 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 3572 env->msr_xfd); 3573 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 3574 env->msr_xfd_err); 3575 } 3576 3577 if (kvm_enabled() && cpu->enable_pmu && 3578 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { 3579 uint64_t depth; 3580 int ret; 3581 3582 /* 3583 * Only migrate Arch LBR states when the host Arch LBR depth 3584 * equals that of source guest's, this is to avoid mismatch 3585 * of guest/host config for the msr hence avoid unexpected 3586 * misbehavior. 3587 */ 3588 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); 3589 3590 if (ret == 1 && !!depth && depth == env->msr_lbr_depth) { 3591 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl); 3592 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth); 3593 3594 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { 3595 if (!env->lbr_records[i].from) { 3596 continue; 3597 } 3598 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 3599 env->lbr_records[i].from); 3600 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 3601 env->lbr_records[i].to); 3602 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 3603 env->lbr_records[i].info); 3604 } 3605 } 3606 } 3607 3608 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see 3609 * kvm_put_msr_feature_control. */ 3610 } 3611 3612 if (env->mcg_cap) { 3613 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); 3614 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); 3615 if (has_msr_mcg_ext_ctl) { 3616 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); 3617 } 3618 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3619 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); 3620 } 3621 } 3622 3623 return kvm_buf_set_msrs(cpu); 3624 } 3625 3626 3627 static int kvm_get_xsave(X86CPU *cpu) 3628 { 3629 CPUX86State *env = &cpu->env; 3630 void *xsave = env->xsave_buf; 3631 int type, ret; 3632 3633 type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; 3634 ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); 3635 if (ret < 0) { 3636 return ret; 3637 } 3638 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len); 3639 3640 return 0; 3641 } 3642 3643 static int kvm_get_xcrs(X86CPU *cpu) 3644 { 3645 CPUX86State *env = &cpu->env; 3646 int i, ret; 3647 struct kvm_xcrs xcrs; 3648 3649 if (!has_xcrs) { 3650 return 0; 3651 } 3652 3653 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); 3654 if (ret < 0) { 3655 return ret; 3656 } 3657 3658 for (i = 0; i < xcrs.nr_xcrs; i++) { 3659 /* Only support xcr0 now */ 3660 if (xcrs.xcrs[i].xcr == 0) { 3661 env->xcr0 = xcrs.xcrs[i].value; 3662 break; 3663 } 3664 } 3665 return 0; 3666 } 3667 3668 static int kvm_get_sregs(X86CPU *cpu) 3669 { 3670 CPUX86State *env = &cpu->env; 3671 struct kvm_sregs sregs; 3672 int ret; 3673 3674 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); 3675 if (ret < 0) { 3676 return ret; 3677 } 3678 3679 /* 3680 * The interrupt_bitmap is ignored because KVM_GET_SREGS is 3681 * always preceded by KVM_GET_VCPU_EVENTS. 3682 */ 3683 3684 get_seg(&env->segs[R_CS], &sregs.cs); 3685 get_seg(&env->segs[R_DS], &sregs.ds); 3686 get_seg(&env->segs[R_ES], &sregs.es); 3687 get_seg(&env->segs[R_FS], &sregs.fs); 3688 get_seg(&env->segs[R_GS], &sregs.gs); 3689 get_seg(&env->segs[R_SS], &sregs.ss); 3690 3691 get_seg(&env->tr, &sregs.tr); 3692 get_seg(&env->ldt, &sregs.ldt); 3693 3694 env->idt.limit = sregs.idt.limit; 3695 env->idt.base = sregs.idt.base; 3696 env->gdt.limit = sregs.gdt.limit; 3697 env->gdt.base = sregs.gdt.base; 3698 3699 env->cr[0] = sregs.cr0; 3700 env->cr[2] = sregs.cr2; 3701 env->cr[3] = sregs.cr3; 3702 env->cr[4] = sregs.cr4; 3703 3704 env->efer = sregs.efer; 3705 if (sev_es_enabled() && env->efer & MSR_EFER_LME && 3706 env->cr[0] & CR0_PG_MASK) { 3707 env->efer |= MSR_EFER_LMA; 3708 } 3709 3710 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 3711 x86_update_hflags(env); 3712 3713 return 0; 3714 } 3715 3716 static int kvm_get_sregs2(X86CPU *cpu) 3717 { 3718 CPUX86State *env = &cpu->env; 3719 struct kvm_sregs2 sregs; 3720 int i, ret; 3721 3722 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs); 3723 if (ret < 0) { 3724 return ret; 3725 } 3726 3727 get_seg(&env->segs[R_CS], &sregs.cs); 3728 get_seg(&env->segs[R_DS], &sregs.ds); 3729 get_seg(&env->segs[R_ES], &sregs.es); 3730 get_seg(&env->segs[R_FS], &sregs.fs); 3731 get_seg(&env->segs[R_GS], &sregs.gs); 3732 get_seg(&env->segs[R_SS], &sregs.ss); 3733 3734 get_seg(&env->tr, &sregs.tr); 3735 get_seg(&env->ldt, &sregs.ldt); 3736 3737 env->idt.limit = sregs.idt.limit; 3738 env->idt.base = sregs.idt.base; 3739 env->gdt.limit = sregs.gdt.limit; 3740 env->gdt.base = sregs.gdt.base; 3741 3742 env->cr[0] = sregs.cr0; 3743 env->cr[2] = sregs.cr2; 3744 env->cr[3] = sregs.cr3; 3745 env->cr[4] = sregs.cr4; 3746 3747 env->efer = sregs.efer; 3748 if (sev_es_enabled() && env->efer & MSR_EFER_LME && 3749 env->cr[0] & CR0_PG_MASK) { 3750 env->efer |= MSR_EFER_LMA; 3751 } 3752 3753 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 3754 3755 if (env->pdptrs_valid) { 3756 for (i = 0; i < 4; i++) { 3757 env->pdptrs[i] = sregs.pdptrs[i]; 3758 } 3759 } 3760 3761 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ 3762 x86_update_hflags(env); 3763 3764 return 0; 3765 } 3766 3767 static int kvm_get_msrs(X86CPU *cpu) 3768 { 3769 CPUX86State *env = &cpu->env; 3770 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; 3771 int ret, i; 3772 uint64_t mtrr_top_bits; 3773 3774 kvm_msr_buf_reset(cpu); 3775 3776 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); 3777 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); 3778 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); 3779 kvm_msr_entry_add(cpu, MSR_PAT, 0); 3780 if (has_msr_star) { 3781 kvm_msr_entry_add(cpu, MSR_STAR, 0); 3782 } 3783 if (has_msr_hsave_pa) { 3784 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); 3785 } 3786 if (has_msr_tsc_aux) { 3787 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); 3788 } 3789 if (has_msr_tsc_adjust) { 3790 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); 3791 } 3792 if (has_msr_tsc_deadline) { 3793 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); 3794 } 3795 if (has_msr_misc_enable) { 3796 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); 3797 } 3798 if (has_msr_smbase) { 3799 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); 3800 } 3801 if (has_msr_smi_count) { 3802 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0); 3803 } 3804 if (has_msr_feature_control) { 3805 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); 3806 } 3807 if (has_msr_pkrs) { 3808 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0); 3809 } 3810 if (has_msr_bndcfgs) { 3811 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); 3812 } 3813 if (has_msr_xss) { 3814 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); 3815 } 3816 if (has_msr_umwait) { 3817 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); 3818 } 3819 if (has_msr_spec_ctrl) { 3820 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); 3821 } 3822 if (has_tsc_scale_msr) { 3823 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0); 3824 } 3825 3826 if (has_msr_tsx_ctrl) { 3827 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); 3828 } 3829 if (has_msr_virt_ssbd) { 3830 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); 3831 } 3832 if (!env->tsc_valid) { 3833 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); 3834 env->tsc_valid = !runstate_is_running(); 3835 } 3836 3837 #ifdef TARGET_X86_64 3838 if (lm_capable_kernel) { 3839 kvm_msr_entry_add(cpu, MSR_CSTAR, 0); 3840 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); 3841 kvm_msr_entry_add(cpu, MSR_FMASK, 0); 3842 kvm_msr_entry_add(cpu, MSR_LSTAR, 0); 3843 } 3844 #endif 3845 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); 3846 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); 3847 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) { 3848 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0); 3849 } 3850 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { 3851 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); 3852 } 3853 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { 3854 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); 3855 } 3856 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { 3857 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); 3858 } 3859 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { 3860 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); 3861 } 3862 if (has_architectural_pmu_version > 0) { 3863 if (has_architectural_pmu_version > 1) { 3864 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); 3865 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); 3866 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); 3867 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); 3868 } 3869 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { 3870 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); 3871 } 3872 for (i = 0; i < num_architectural_pmu_gp_counters; i++) { 3873 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); 3874 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); 3875 } 3876 } 3877 3878 if (env->mcg_cap) { 3879 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); 3880 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); 3881 if (has_msr_mcg_ext_ctl) { 3882 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); 3883 } 3884 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { 3885 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); 3886 } 3887 } 3888 3889 if (has_msr_hv_hypercall) { 3890 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); 3891 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); 3892 } 3893 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { 3894 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); 3895 } 3896 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { 3897 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); 3898 } 3899 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { 3900 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); 3901 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); 3902 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); 3903 } 3904 if (has_msr_hv_syndbg_options) { 3905 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0); 3906 } 3907 if (has_msr_hv_crash) { 3908 int j; 3909 3910 for (j = 0; j < HV_CRASH_PARAMS; j++) { 3911 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); 3912 } 3913 } 3914 if (has_msr_hv_runtime) { 3915 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); 3916 } 3917 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { 3918 uint32_t msr; 3919 3920 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); 3921 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); 3922 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); 3923 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { 3924 kvm_msr_entry_add(cpu, msr, 0); 3925 } 3926 } 3927 if (has_msr_hv_stimer) { 3928 uint32_t msr; 3929 3930 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; 3931 msr++) { 3932 kvm_msr_entry_add(cpu, msr, 0); 3933 } 3934 } 3935 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 3936 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); 3937 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); 3938 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); 3939 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); 3940 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); 3941 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); 3942 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); 3943 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); 3944 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); 3945 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); 3946 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); 3947 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); 3948 for (i = 0; i < MSR_MTRRcap_VCNT; i++) { 3949 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); 3950 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); 3951 } 3952 } 3953 3954 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { 3955 int addr_num = 3956 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; 3957 3958 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); 3959 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); 3960 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); 3961 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); 3962 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); 3963 for (i = 0; i < addr_num; i++) { 3964 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); 3965 } 3966 } 3967 3968 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { 3969 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0); 3970 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0); 3971 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0); 3972 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); 3973 } 3974 3975 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { 3976 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); 3977 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); 3978 } 3979 3980 if (kvm_enabled() && cpu->enable_pmu && 3981 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { 3982 uint64_t depth; 3983 3984 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth); 3985 if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) { 3986 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0); 3987 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0); 3988 3989 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) { 3990 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0); 3991 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0); 3992 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0); 3993 } 3994 } 3995 } 3996 3997 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); 3998 if (ret < 0) { 3999 return ret; 4000 } 4001 4002 if (ret < cpu->kvm_msr_buf->nmsrs) { 4003 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; 4004 error_report("error: failed to get MSR 0x%" PRIx32, 4005 (uint32_t)e->index); 4006 } 4007 4008 assert(ret == cpu->kvm_msr_buf->nmsrs); 4009 /* 4010 * MTRR masks: Each mask consists of 5 parts 4011 * a 10..0: must be zero 4012 * b 11 : valid bit 4013 * c n-1.12: actual mask bits 4014 * d 51..n: reserved must be zero 4015 * e 63.52: reserved must be zero 4016 * 4017 * 'n' is the number of physical bits supported by the CPU and is 4018 * apparently always <= 52. We know our 'n' but don't know what 4019 * the destinations 'n' is; it might be smaller, in which case 4020 * it masks (c) on loading. It might be larger, in which case 4021 * we fill 'd' so that d..c is consistent irrespetive of the 'n' 4022 * we're migrating to. 4023 */ 4024 4025 if (cpu->fill_mtrr_mask) { 4026 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); 4027 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); 4028 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); 4029 } else { 4030 mtrr_top_bits = 0; 4031 } 4032 4033 for (i = 0; i < ret; i++) { 4034 uint32_t index = msrs[i].index; 4035 switch (index) { 4036 case MSR_IA32_SYSENTER_CS: 4037 env->sysenter_cs = msrs[i].data; 4038 break; 4039 case MSR_IA32_SYSENTER_ESP: 4040 env->sysenter_esp = msrs[i].data; 4041 break; 4042 case MSR_IA32_SYSENTER_EIP: 4043 env->sysenter_eip = msrs[i].data; 4044 break; 4045 case MSR_PAT: 4046 env->pat = msrs[i].data; 4047 break; 4048 case MSR_STAR: 4049 env->star = msrs[i].data; 4050 break; 4051 #ifdef TARGET_X86_64 4052 case MSR_CSTAR: 4053 env->cstar = msrs[i].data; 4054 break; 4055 case MSR_KERNELGSBASE: 4056 env->kernelgsbase = msrs[i].data; 4057 break; 4058 case MSR_FMASK: 4059 env->fmask = msrs[i].data; 4060 break; 4061 case MSR_LSTAR: 4062 env->lstar = msrs[i].data; 4063 break; 4064 #endif 4065 case MSR_IA32_TSC: 4066 env->tsc = msrs[i].data; 4067 break; 4068 case MSR_TSC_AUX: 4069 env->tsc_aux = msrs[i].data; 4070 break; 4071 case MSR_TSC_ADJUST: 4072 env->tsc_adjust = msrs[i].data; 4073 break; 4074 case MSR_IA32_TSCDEADLINE: 4075 env->tsc_deadline = msrs[i].data; 4076 break; 4077 case MSR_VM_HSAVE_PA: 4078 env->vm_hsave = msrs[i].data; 4079 break; 4080 case MSR_KVM_SYSTEM_TIME: 4081 env->system_time_msr = msrs[i].data; 4082 break; 4083 case MSR_KVM_WALL_CLOCK: 4084 env->wall_clock_msr = msrs[i].data; 4085 break; 4086 case MSR_MCG_STATUS: 4087 env->mcg_status = msrs[i].data; 4088 break; 4089 case MSR_MCG_CTL: 4090 env->mcg_ctl = msrs[i].data; 4091 break; 4092 case MSR_MCG_EXT_CTL: 4093 env->mcg_ext_ctl = msrs[i].data; 4094 break; 4095 case MSR_IA32_MISC_ENABLE: 4096 env->msr_ia32_misc_enable = msrs[i].data; 4097 break; 4098 case MSR_IA32_SMBASE: 4099 env->smbase = msrs[i].data; 4100 break; 4101 case MSR_SMI_COUNT: 4102 env->msr_smi_count = msrs[i].data; 4103 break; 4104 case MSR_IA32_FEATURE_CONTROL: 4105 env->msr_ia32_feature_control = msrs[i].data; 4106 break; 4107 case MSR_IA32_BNDCFGS: 4108 env->msr_bndcfgs = msrs[i].data; 4109 break; 4110 case MSR_IA32_XSS: 4111 env->xss = msrs[i].data; 4112 break; 4113 case MSR_IA32_UMWAIT_CONTROL: 4114 env->umwait = msrs[i].data; 4115 break; 4116 case MSR_IA32_PKRS: 4117 env->pkrs = msrs[i].data; 4118 break; 4119 default: 4120 if (msrs[i].index >= MSR_MC0_CTL && 4121 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { 4122 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; 4123 } 4124 break; 4125 case MSR_KVM_ASYNC_PF_EN: 4126 env->async_pf_en_msr = msrs[i].data; 4127 break; 4128 case MSR_KVM_ASYNC_PF_INT: 4129 env->async_pf_int_msr = msrs[i].data; 4130 break; 4131 case MSR_KVM_PV_EOI_EN: 4132 env->pv_eoi_en_msr = msrs[i].data; 4133 break; 4134 case MSR_KVM_STEAL_TIME: 4135 env->steal_time_msr = msrs[i].data; 4136 break; 4137 case MSR_KVM_POLL_CONTROL: { 4138 env->poll_control_msr = msrs[i].data; 4139 break; 4140 } 4141 case MSR_CORE_PERF_FIXED_CTR_CTRL: 4142 env->msr_fixed_ctr_ctrl = msrs[i].data; 4143 break; 4144 case MSR_CORE_PERF_GLOBAL_CTRL: 4145 env->msr_global_ctrl = msrs[i].data; 4146 break; 4147 case MSR_CORE_PERF_GLOBAL_STATUS: 4148 env->msr_global_status = msrs[i].data; 4149 break; 4150 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 4151 env->msr_global_ovf_ctrl = msrs[i].data; 4152 break; 4153 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: 4154 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; 4155 break; 4156 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: 4157 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; 4158 break; 4159 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: 4160 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; 4161 break; 4162 case HV_X64_MSR_HYPERCALL: 4163 env->msr_hv_hypercall = msrs[i].data; 4164 break; 4165 case HV_X64_MSR_GUEST_OS_ID: 4166 env->msr_hv_guest_os_id = msrs[i].data; 4167 break; 4168 case HV_X64_MSR_APIC_ASSIST_PAGE: 4169 env->msr_hv_vapic = msrs[i].data; 4170 break; 4171 case HV_X64_MSR_REFERENCE_TSC: 4172 env->msr_hv_tsc = msrs[i].data; 4173 break; 4174 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4175 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; 4176 break; 4177 case HV_X64_MSR_VP_RUNTIME: 4178 env->msr_hv_runtime = msrs[i].data; 4179 break; 4180 case HV_X64_MSR_SCONTROL: 4181 env->msr_hv_synic_control = msrs[i].data; 4182 break; 4183 case HV_X64_MSR_SIEFP: 4184 env->msr_hv_synic_evt_page = msrs[i].data; 4185 break; 4186 case HV_X64_MSR_SIMP: 4187 env->msr_hv_synic_msg_page = msrs[i].data; 4188 break; 4189 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 4190 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; 4191 break; 4192 case HV_X64_MSR_STIMER0_CONFIG: 4193 case HV_X64_MSR_STIMER1_CONFIG: 4194 case HV_X64_MSR_STIMER2_CONFIG: 4195 case HV_X64_MSR_STIMER3_CONFIG: 4196 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = 4197 msrs[i].data; 4198 break; 4199 case HV_X64_MSR_STIMER0_COUNT: 4200 case HV_X64_MSR_STIMER1_COUNT: 4201 case HV_X64_MSR_STIMER2_COUNT: 4202 case HV_X64_MSR_STIMER3_COUNT: 4203 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = 4204 msrs[i].data; 4205 break; 4206 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4207 env->msr_hv_reenlightenment_control = msrs[i].data; 4208 break; 4209 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4210 env->msr_hv_tsc_emulation_control = msrs[i].data; 4211 break; 4212 case HV_X64_MSR_TSC_EMULATION_STATUS: 4213 env->msr_hv_tsc_emulation_status = msrs[i].data; 4214 break; 4215 case HV_X64_MSR_SYNDBG_OPTIONS: 4216 env->msr_hv_syndbg_options = msrs[i].data; 4217 break; 4218 case MSR_MTRRdefType: 4219 env->mtrr_deftype = msrs[i].data; 4220 break; 4221 case MSR_MTRRfix64K_00000: 4222 env->mtrr_fixed[0] = msrs[i].data; 4223 break; 4224 case MSR_MTRRfix16K_80000: 4225 env->mtrr_fixed[1] = msrs[i].data; 4226 break; 4227 case MSR_MTRRfix16K_A0000: 4228 env->mtrr_fixed[2] = msrs[i].data; 4229 break; 4230 case MSR_MTRRfix4K_C0000: 4231 env->mtrr_fixed[3] = msrs[i].data; 4232 break; 4233 case MSR_MTRRfix4K_C8000: 4234 env->mtrr_fixed[4] = msrs[i].data; 4235 break; 4236 case MSR_MTRRfix4K_D0000: 4237 env->mtrr_fixed[5] = msrs[i].data; 4238 break; 4239 case MSR_MTRRfix4K_D8000: 4240 env->mtrr_fixed[6] = msrs[i].data; 4241 break; 4242 case MSR_MTRRfix4K_E0000: 4243 env->mtrr_fixed[7] = msrs[i].data; 4244 break; 4245 case MSR_MTRRfix4K_E8000: 4246 env->mtrr_fixed[8] = msrs[i].data; 4247 break; 4248 case MSR_MTRRfix4K_F0000: 4249 env->mtrr_fixed[9] = msrs[i].data; 4250 break; 4251 case MSR_MTRRfix4K_F8000: 4252 env->mtrr_fixed[10] = msrs[i].data; 4253 break; 4254 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): 4255 if (index & 1) { 4256 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | 4257 mtrr_top_bits; 4258 } else { 4259 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; 4260 } 4261 break; 4262 case MSR_IA32_SPEC_CTRL: 4263 env->spec_ctrl = msrs[i].data; 4264 break; 4265 case MSR_AMD64_TSC_RATIO: 4266 env->amd_tsc_scale_msr = msrs[i].data; 4267 break; 4268 case MSR_IA32_TSX_CTRL: 4269 env->tsx_ctrl = msrs[i].data; 4270 break; 4271 case MSR_VIRT_SSBD: 4272 env->virt_ssbd = msrs[i].data; 4273 break; 4274 case MSR_IA32_RTIT_CTL: 4275 env->msr_rtit_ctrl = msrs[i].data; 4276 break; 4277 case MSR_IA32_RTIT_STATUS: 4278 env->msr_rtit_status = msrs[i].data; 4279 break; 4280 case MSR_IA32_RTIT_OUTPUT_BASE: 4281 env->msr_rtit_output_base = msrs[i].data; 4282 break; 4283 case MSR_IA32_RTIT_OUTPUT_MASK: 4284 env->msr_rtit_output_mask = msrs[i].data; 4285 break; 4286 case MSR_IA32_RTIT_CR3_MATCH: 4287 env->msr_rtit_cr3_match = msrs[i].data; 4288 break; 4289 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 4290 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; 4291 break; 4292 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: 4293 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = 4294 msrs[i].data; 4295 break; 4296 case MSR_IA32_XFD: 4297 env->msr_xfd = msrs[i].data; 4298 break; 4299 case MSR_IA32_XFD_ERR: 4300 env->msr_xfd_err = msrs[i].data; 4301 break; 4302 case MSR_ARCH_LBR_CTL: 4303 env->msr_lbr_ctl = msrs[i].data; 4304 break; 4305 case MSR_ARCH_LBR_DEPTH: 4306 env->msr_lbr_depth = msrs[i].data; 4307 break; 4308 case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31: 4309 env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data; 4310 break; 4311 case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31: 4312 env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data; 4313 break; 4314 case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: 4315 env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; 4316 break; 4317 } 4318 } 4319 4320 return 0; 4321 } 4322 4323 static int kvm_put_mp_state(X86CPU *cpu) 4324 { 4325 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; 4326 4327 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 4328 } 4329 4330 static int kvm_get_mp_state(X86CPU *cpu) 4331 { 4332 CPUState *cs = CPU(cpu); 4333 CPUX86State *env = &cpu->env; 4334 struct kvm_mp_state mp_state; 4335 int ret; 4336 4337 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); 4338 if (ret < 0) { 4339 return ret; 4340 } 4341 env->mp_state = mp_state.mp_state; 4342 if (kvm_irqchip_in_kernel()) { 4343 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); 4344 } 4345 return 0; 4346 } 4347 4348 static int kvm_get_apic(X86CPU *cpu) 4349 { 4350 DeviceState *apic = cpu->apic_state; 4351 struct kvm_lapic_state kapic; 4352 int ret; 4353 4354 if (apic && kvm_irqchip_in_kernel()) { 4355 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); 4356 if (ret < 0) { 4357 return ret; 4358 } 4359 4360 kvm_get_apic_state(apic, &kapic); 4361 } 4362 return 0; 4363 } 4364 4365 static int kvm_put_vcpu_events(X86CPU *cpu, int level) 4366 { 4367 CPUState *cs = CPU(cpu); 4368 CPUX86State *env = &cpu->env; 4369 struct kvm_vcpu_events events = {}; 4370 4371 events.flags = 0; 4372 4373 if (has_exception_payload) { 4374 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 4375 events.exception.pending = env->exception_pending; 4376 events.exception_has_payload = env->exception_has_payload; 4377 events.exception_payload = env->exception_payload; 4378 } 4379 events.exception.nr = env->exception_nr; 4380 events.exception.injected = env->exception_injected; 4381 events.exception.has_error_code = env->has_error_code; 4382 events.exception.error_code = env->error_code; 4383 4384 events.interrupt.injected = (env->interrupt_injected >= 0); 4385 events.interrupt.nr = env->interrupt_injected; 4386 events.interrupt.soft = env->soft_interrupt; 4387 4388 events.nmi.injected = env->nmi_injected; 4389 events.nmi.pending = env->nmi_pending; 4390 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); 4391 4392 events.sipi_vector = env->sipi_vector; 4393 4394 if (has_msr_smbase) { 4395 events.smi.smm = !!(env->hflags & HF_SMM_MASK); 4396 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); 4397 if (kvm_irqchip_in_kernel()) { 4398 /* As soon as these are moved to the kernel, remove them 4399 * from cs->interrupt_request. 4400 */ 4401 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; 4402 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; 4403 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); 4404 } else { 4405 /* Keep these in cs->interrupt_request. */ 4406 events.smi.pending = 0; 4407 events.smi.latched_init = 0; 4408 } 4409 /* Stop SMI delivery on old machine types to avoid a reboot 4410 * on an inward migration of an old VM. 4411 */ 4412 if (!cpu->kvm_no_smi_migration) { 4413 events.flags |= KVM_VCPUEVENT_VALID_SMM; 4414 } 4415 } 4416 4417 if (level >= KVM_PUT_RESET_STATE) { 4418 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; 4419 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 4420 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR; 4421 } 4422 } 4423 4424 if (has_triple_fault_event) { 4425 events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 4426 events.triple_fault.pending = env->triple_fault_pending; 4427 } 4428 4429 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); 4430 } 4431 4432 static int kvm_get_vcpu_events(X86CPU *cpu) 4433 { 4434 CPUX86State *env = &cpu->env; 4435 struct kvm_vcpu_events events; 4436 int ret; 4437 4438 memset(&events, 0, sizeof(events)); 4439 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); 4440 if (ret < 0) { 4441 return ret; 4442 } 4443 4444 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 4445 env->exception_pending = events.exception.pending; 4446 env->exception_has_payload = events.exception_has_payload; 4447 env->exception_payload = events.exception_payload; 4448 } else { 4449 env->exception_pending = 0; 4450 env->exception_has_payload = false; 4451 } 4452 env->exception_injected = events.exception.injected; 4453 env->exception_nr = 4454 (env->exception_pending || env->exception_injected) ? 4455 events.exception.nr : -1; 4456 env->has_error_code = events.exception.has_error_code; 4457 env->error_code = events.exception.error_code; 4458 4459 env->interrupt_injected = 4460 events.interrupt.injected ? events.interrupt.nr : -1; 4461 env->soft_interrupt = events.interrupt.soft; 4462 4463 env->nmi_injected = events.nmi.injected; 4464 env->nmi_pending = events.nmi.pending; 4465 if (events.nmi.masked) { 4466 env->hflags2 |= HF2_NMI_MASK; 4467 } else { 4468 env->hflags2 &= ~HF2_NMI_MASK; 4469 } 4470 4471 if (events.flags & KVM_VCPUEVENT_VALID_SMM) { 4472 if (events.smi.smm) { 4473 env->hflags |= HF_SMM_MASK; 4474 } else { 4475 env->hflags &= ~HF_SMM_MASK; 4476 } 4477 if (events.smi.pending) { 4478 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 4479 } else { 4480 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); 4481 } 4482 if (events.smi.smm_inside_nmi) { 4483 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; 4484 } else { 4485 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; 4486 } 4487 if (events.smi.latched_init) { 4488 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 4489 } else { 4490 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); 4491 } 4492 } 4493 4494 if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 4495 env->triple_fault_pending = events.triple_fault.pending; 4496 } 4497 4498 env->sipi_vector = events.sipi_vector; 4499 4500 return 0; 4501 } 4502 4503 static int kvm_put_debugregs(X86CPU *cpu) 4504 { 4505 CPUX86State *env = &cpu->env; 4506 struct kvm_debugregs dbgregs; 4507 int i; 4508 4509 memset(&dbgregs, 0, sizeof(dbgregs)); 4510 for (i = 0; i < 4; i++) { 4511 dbgregs.db[i] = env->dr[i]; 4512 } 4513 dbgregs.dr6 = env->dr[6]; 4514 dbgregs.dr7 = env->dr[7]; 4515 dbgregs.flags = 0; 4516 4517 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); 4518 } 4519 4520 static int kvm_get_debugregs(X86CPU *cpu) 4521 { 4522 CPUX86State *env = &cpu->env; 4523 struct kvm_debugregs dbgregs; 4524 int i, ret; 4525 4526 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); 4527 if (ret < 0) { 4528 return ret; 4529 } 4530 for (i = 0; i < 4; i++) { 4531 env->dr[i] = dbgregs.db[i]; 4532 } 4533 env->dr[4] = env->dr[6] = dbgregs.dr6; 4534 env->dr[5] = env->dr[7] = dbgregs.dr7; 4535 4536 return 0; 4537 } 4538 4539 static int kvm_put_nested_state(X86CPU *cpu) 4540 { 4541 CPUX86State *env = &cpu->env; 4542 int max_nested_state_len = kvm_max_nested_state_length(); 4543 4544 if (!env->nested_state) { 4545 return 0; 4546 } 4547 4548 /* 4549 * Copy flags that are affected by reset from env->hflags and env->hflags2. 4550 */ 4551 if (env->hflags & HF_GUEST_MASK) { 4552 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE; 4553 } else { 4554 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE; 4555 } 4556 4557 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */ 4558 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) { 4559 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET; 4560 } else { 4561 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET; 4562 } 4563 4564 assert(env->nested_state->size <= max_nested_state_len); 4565 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); 4566 } 4567 4568 static int kvm_get_nested_state(X86CPU *cpu) 4569 { 4570 CPUX86State *env = &cpu->env; 4571 int max_nested_state_len = kvm_max_nested_state_length(); 4572 int ret; 4573 4574 if (!env->nested_state) { 4575 return 0; 4576 } 4577 4578 /* 4579 * It is possible that migration restored a smaller size into 4580 * nested_state->hdr.size than what our kernel support. 4581 * We preserve migration origin nested_state->hdr.size for 4582 * call to KVM_SET_NESTED_STATE but wish that our next call 4583 * to KVM_GET_NESTED_STATE will use max size our kernel support. 4584 */ 4585 env->nested_state->size = max_nested_state_len; 4586 4587 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); 4588 if (ret < 0) { 4589 return ret; 4590 } 4591 4592 /* 4593 * Copy flags that are affected by reset to env->hflags and env->hflags2. 4594 */ 4595 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { 4596 env->hflags |= HF_GUEST_MASK; 4597 } else { 4598 env->hflags &= ~HF_GUEST_MASK; 4599 } 4600 4601 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */ 4602 if (cpu_has_svm(env)) { 4603 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) { 4604 env->hflags2 |= HF2_GIF_MASK; 4605 } else { 4606 env->hflags2 &= ~HF2_GIF_MASK; 4607 } 4608 } 4609 4610 return ret; 4611 } 4612 4613 int kvm_arch_put_registers(CPUState *cpu, int level) 4614 { 4615 X86CPU *x86_cpu = X86_CPU(cpu); 4616 int ret; 4617 4618 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 4619 4620 /* 4621 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX 4622 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also 4623 * precede kvm_put_nested_state() when 'real' nested state is set. 4624 */ 4625 if (level >= KVM_PUT_RESET_STATE) { 4626 ret = kvm_put_msr_feature_control(x86_cpu); 4627 if (ret < 0) { 4628 return ret; 4629 } 4630 } 4631 4632 /* must be before kvm_put_nested_state so that EFER.SVME is set */ 4633 ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); 4634 if (ret < 0) { 4635 return ret; 4636 } 4637 4638 if (level >= KVM_PUT_RESET_STATE) { 4639 ret = kvm_put_nested_state(x86_cpu); 4640 if (ret < 0) { 4641 return ret; 4642 } 4643 } 4644 4645 if (level == KVM_PUT_FULL_STATE) { 4646 /* We don't check for kvm_arch_set_tsc_khz() errors here, 4647 * because TSC frequency mismatch shouldn't abort migration, 4648 * unless the user explicitly asked for a more strict TSC 4649 * setting (e.g. using an explicit "tsc-freq" option). 4650 */ 4651 kvm_arch_set_tsc_khz(cpu); 4652 } 4653 4654 #ifdef CONFIG_XEN_EMU 4655 if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { 4656 ret = kvm_put_xen_state(cpu); 4657 if (ret < 0) { 4658 return ret; 4659 } 4660 } 4661 #endif 4662 4663 ret = kvm_getput_regs(x86_cpu, 1); 4664 if (ret < 0) { 4665 return ret; 4666 } 4667 ret = kvm_put_xsave(x86_cpu); 4668 if (ret < 0) { 4669 return ret; 4670 } 4671 ret = kvm_put_xcrs(x86_cpu); 4672 if (ret < 0) { 4673 return ret; 4674 } 4675 ret = kvm_put_msrs(x86_cpu, level); 4676 if (ret < 0) { 4677 return ret; 4678 } 4679 ret = kvm_put_vcpu_events(x86_cpu, level); 4680 if (ret < 0) { 4681 return ret; 4682 } 4683 if (level >= KVM_PUT_RESET_STATE) { 4684 ret = kvm_put_mp_state(x86_cpu); 4685 if (ret < 0) { 4686 return ret; 4687 } 4688 } 4689 4690 ret = kvm_put_tscdeadline_msr(x86_cpu); 4691 if (ret < 0) { 4692 return ret; 4693 } 4694 ret = kvm_put_debugregs(x86_cpu); 4695 if (ret < 0) { 4696 return ret; 4697 } 4698 return 0; 4699 } 4700 4701 int kvm_arch_get_registers(CPUState *cs) 4702 { 4703 X86CPU *cpu = X86_CPU(cs); 4704 int ret; 4705 4706 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); 4707 4708 ret = kvm_get_vcpu_events(cpu); 4709 if (ret < 0) { 4710 goto out; 4711 } 4712 /* 4713 * KVM_GET_MPSTATE can modify CS and RIP, call it before 4714 * KVM_GET_REGS and KVM_GET_SREGS. 4715 */ 4716 ret = kvm_get_mp_state(cpu); 4717 if (ret < 0) { 4718 goto out; 4719 } 4720 ret = kvm_getput_regs(cpu, 0); 4721 if (ret < 0) { 4722 goto out; 4723 } 4724 ret = kvm_get_xsave(cpu); 4725 if (ret < 0) { 4726 goto out; 4727 } 4728 ret = kvm_get_xcrs(cpu); 4729 if (ret < 0) { 4730 goto out; 4731 } 4732 ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); 4733 if (ret < 0) { 4734 goto out; 4735 } 4736 ret = kvm_get_msrs(cpu); 4737 if (ret < 0) { 4738 goto out; 4739 } 4740 ret = kvm_get_apic(cpu); 4741 if (ret < 0) { 4742 goto out; 4743 } 4744 ret = kvm_get_debugregs(cpu); 4745 if (ret < 0) { 4746 goto out; 4747 } 4748 ret = kvm_get_nested_state(cpu); 4749 if (ret < 0) { 4750 goto out; 4751 } 4752 #ifdef CONFIG_XEN_EMU 4753 if (xen_mode == XEN_EMULATE) { 4754 ret = kvm_get_xen_state(cs); 4755 if (ret < 0) { 4756 goto out; 4757 } 4758 } 4759 #endif 4760 ret = 0; 4761 out: 4762 cpu_sync_bndcs_hflags(&cpu->env); 4763 return ret; 4764 } 4765 4766 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 4767 { 4768 X86CPU *x86_cpu = X86_CPU(cpu); 4769 CPUX86State *env = &x86_cpu->env; 4770 int ret; 4771 4772 /* Inject NMI */ 4773 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { 4774 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { 4775 bql_lock(); 4776 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 4777 bql_unlock(); 4778 DPRINTF("injected NMI\n"); 4779 ret = kvm_vcpu_ioctl(cpu, KVM_NMI); 4780 if (ret < 0) { 4781 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", 4782 strerror(-ret)); 4783 } 4784 } 4785 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 4786 bql_lock(); 4787 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 4788 bql_unlock(); 4789 DPRINTF("injected SMI\n"); 4790 ret = kvm_vcpu_ioctl(cpu, KVM_SMI); 4791 if (ret < 0) { 4792 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", 4793 strerror(-ret)); 4794 } 4795 } 4796 } 4797 4798 if (!kvm_pic_in_kernel()) { 4799 bql_lock(); 4800 } 4801 4802 /* Force the VCPU out of its inner loop to process any INIT requests 4803 * or (for userspace APIC, but it is cheap to combine the checks here) 4804 * pending TPR access reports. 4805 */ 4806 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 4807 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && 4808 !(env->hflags & HF_SMM_MASK)) { 4809 cpu->exit_request = 1; 4810 } 4811 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 4812 cpu->exit_request = 1; 4813 } 4814 } 4815 4816 if (!kvm_pic_in_kernel()) { 4817 /* Try to inject an interrupt if the guest can accept it */ 4818 if (run->ready_for_interrupt_injection && 4819 (cpu->interrupt_request & CPU_INTERRUPT_HARD) && 4820 (env->eflags & IF_MASK)) { 4821 int irq; 4822 4823 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 4824 irq = cpu_get_pic_interrupt(env); 4825 if (irq >= 0) { 4826 struct kvm_interrupt intr; 4827 4828 intr.irq = irq; 4829 DPRINTF("injected interrupt %d\n", irq); 4830 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); 4831 if (ret < 0) { 4832 fprintf(stderr, 4833 "KVM: injection failed, interrupt lost (%s)\n", 4834 strerror(-ret)); 4835 } 4836 } 4837 } 4838 4839 /* If we have an interrupt but the guest is not ready to receive an 4840 * interrupt, request an interrupt window exit. This will 4841 * cause a return to userspace as soon as the guest is ready to 4842 * receive interrupts. */ 4843 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { 4844 run->request_interrupt_window = 1; 4845 } else { 4846 run->request_interrupt_window = 0; 4847 } 4848 4849 DPRINTF("setting tpr\n"); 4850 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); 4851 4852 bql_unlock(); 4853 } 4854 } 4855 4856 static void kvm_rate_limit_on_bus_lock(void) 4857 { 4858 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1); 4859 4860 if (delay_ns) { 4861 g_usleep(delay_ns / SCALE_US); 4862 } 4863 } 4864 4865 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) 4866 { 4867 X86CPU *x86_cpu = X86_CPU(cpu); 4868 CPUX86State *env = &x86_cpu->env; 4869 4870 if (run->flags & KVM_RUN_X86_SMM) { 4871 env->hflags |= HF_SMM_MASK; 4872 } else { 4873 env->hflags &= ~HF_SMM_MASK; 4874 } 4875 if (run->if_flag) { 4876 env->eflags |= IF_MASK; 4877 } else { 4878 env->eflags &= ~IF_MASK; 4879 } 4880 if (run->flags & KVM_RUN_X86_BUS_LOCK) { 4881 kvm_rate_limit_on_bus_lock(); 4882 } 4883 4884 #ifdef CONFIG_XEN_EMU 4885 /* 4886 * If the callback is asserted as a GSI (or PCI INTx) then check if 4887 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert 4888 * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC 4889 * EOI and only resample then, exactly how the VFIO eventfd pairs 4890 * are designed to work for level triggered interrupts. 4891 */ 4892 if (x86_cpu->env.xen_callback_asserted) { 4893 kvm_xen_maybe_deassert_callback(cpu); 4894 } 4895 #endif 4896 4897 /* We need to protect the apic state against concurrent accesses from 4898 * different threads in case the userspace irqchip is used. */ 4899 if (!kvm_irqchip_in_kernel()) { 4900 bql_lock(); 4901 } 4902 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); 4903 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); 4904 if (!kvm_irqchip_in_kernel()) { 4905 bql_unlock(); 4906 } 4907 return cpu_get_mem_attrs(env); 4908 } 4909 4910 int kvm_arch_process_async_events(CPUState *cs) 4911 { 4912 X86CPU *cpu = X86_CPU(cs); 4913 CPUX86State *env = &cpu->env; 4914 4915 if (cs->interrupt_request & CPU_INTERRUPT_MCE) { 4916 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ 4917 assert(env->mcg_cap); 4918 4919 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 4920 4921 kvm_cpu_synchronize_state(cs); 4922 4923 if (env->exception_nr == EXCP08_DBLE) { 4924 /* this means triple fault */ 4925 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 4926 cs->exit_request = 1; 4927 return 0; 4928 } 4929 kvm_queue_exception(env, EXCP12_MCHK, 0, 0); 4930 env->has_error_code = 0; 4931 4932 cs->halted = 0; 4933 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { 4934 env->mp_state = KVM_MP_STATE_RUNNABLE; 4935 } 4936 } 4937 4938 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && 4939 !(env->hflags & HF_SMM_MASK)) { 4940 kvm_cpu_synchronize_state(cs); 4941 do_cpu_init(cpu); 4942 } 4943 4944 if (kvm_irqchip_in_kernel()) { 4945 return 0; 4946 } 4947 4948 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { 4949 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 4950 apic_poll_irq(cpu->apic_state); 4951 } 4952 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4953 (env->eflags & IF_MASK)) || 4954 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4955 cs->halted = 0; 4956 } 4957 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { 4958 kvm_cpu_synchronize_state(cs); 4959 do_cpu_sipi(cpu); 4960 } 4961 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { 4962 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; 4963 kvm_cpu_synchronize_state(cs); 4964 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 4965 env->tpr_access_type); 4966 } 4967 4968 return cs->halted; 4969 } 4970 4971 static int kvm_handle_halt(X86CPU *cpu) 4972 { 4973 CPUState *cs = CPU(cpu); 4974 CPUX86State *env = &cpu->env; 4975 4976 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && 4977 (env->eflags & IF_MASK)) && 4978 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { 4979 cs->halted = 1; 4980 return EXCP_HLT; 4981 } 4982 4983 return 0; 4984 } 4985 4986 static int kvm_handle_tpr_access(X86CPU *cpu) 4987 { 4988 CPUState *cs = CPU(cpu); 4989 struct kvm_run *run = cs->kvm_run; 4990 4991 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, 4992 run->tpr_access.is_write ? TPR_ACCESS_WRITE 4993 : TPR_ACCESS_READ); 4994 return 1; 4995 } 4996 4997 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 4998 { 4999 static const uint8_t int3 = 0xcc; 5000 5001 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || 5002 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { 5003 return -EINVAL; 5004 } 5005 return 0; 5006 } 5007 5008 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 5009 { 5010 uint8_t int3; 5011 5012 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) { 5013 return -EINVAL; 5014 } 5015 if (int3 != 0xcc) { 5016 return 0; 5017 } 5018 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { 5019 return -EINVAL; 5020 } 5021 return 0; 5022 } 5023 5024 static struct { 5025 target_ulong addr; 5026 int len; 5027 int type; 5028 } hw_breakpoint[4]; 5029 5030 static int nb_hw_breakpoint; 5031 5032 static int find_hw_breakpoint(target_ulong addr, int len, int type) 5033 { 5034 int n; 5035 5036 for (n = 0; n < nb_hw_breakpoint; n++) { 5037 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && 5038 (hw_breakpoint[n].len == len || len == -1)) { 5039 return n; 5040 } 5041 } 5042 return -1; 5043 } 5044 5045 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 5046 { 5047 switch (type) { 5048 case GDB_BREAKPOINT_HW: 5049 len = 1; 5050 break; 5051 case GDB_WATCHPOINT_WRITE: 5052 case GDB_WATCHPOINT_ACCESS: 5053 switch (len) { 5054 case 1: 5055 break; 5056 case 2: 5057 case 4: 5058 case 8: 5059 if (addr & (len - 1)) { 5060 return -EINVAL; 5061 } 5062 break; 5063 default: 5064 return -EINVAL; 5065 } 5066 break; 5067 default: 5068 return -ENOSYS; 5069 } 5070 5071 if (nb_hw_breakpoint == 4) { 5072 return -ENOBUFS; 5073 } 5074 if (find_hw_breakpoint(addr, len, type) >= 0) { 5075 return -EEXIST; 5076 } 5077 hw_breakpoint[nb_hw_breakpoint].addr = addr; 5078 hw_breakpoint[nb_hw_breakpoint].len = len; 5079 hw_breakpoint[nb_hw_breakpoint].type = type; 5080 nb_hw_breakpoint++; 5081 5082 return 0; 5083 } 5084 5085 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 5086 { 5087 int n; 5088 5089 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); 5090 if (n < 0) { 5091 return -ENOENT; 5092 } 5093 nb_hw_breakpoint--; 5094 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; 5095 5096 return 0; 5097 } 5098 5099 void kvm_arch_remove_all_hw_breakpoints(void) 5100 { 5101 nb_hw_breakpoint = 0; 5102 } 5103 5104 static CPUWatchpoint hw_watchpoint; 5105 5106 static int kvm_handle_debug(X86CPU *cpu, 5107 struct kvm_debug_exit_arch *arch_info) 5108 { 5109 CPUState *cs = CPU(cpu); 5110 CPUX86State *env = &cpu->env; 5111 int ret = 0; 5112 int n; 5113 5114 if (arch_info->exception == EXCP01_DB) { 5115 if (arch_info->dr6 & DR6_BS) { 5116 if (cs->singlestep_enabled) { 5117 ret = EXCP_DEBUG; 5118 } 5119 } else { 5120 for (n = 0; n < 4; n++) { 5121 if (arch_info->dr6 & (1 << n)) { 5122 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { 5123 case 0x0: 5124 ret = EXCP_DEBUG; 5125 break; 5126 case 0x1: 5127 ret = EXCP_DEBUG; 5128 cs->watchpoint_hit = &hw_watchpoint; 5129 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 5130 hw_watchpoint.flags = BP_MEM_WRITE; 5131 break; 5132 case 0x3: 5133 ret = EXCP_DEBUG; 5134 cs->watchpoint_hit = &hw_watchpoint; 5135 hw_watchpoint.vaddr = hw_breakpoint[n].addr; 5136 hw_watchpoint.flags = BP_MEM_ACCESS; 5137 break; 5138 } 5139 } 5140 } 5141 } 5142 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { 5143 ret = EXCP_DEBUG; 5144 } 5145 if (ret == 0) { 5146 cpu_synchronize_state(cs); 5147 assert(env->exception_nr == -1); 5148 5149 /* pass to guest */ 5150 kvm_queue_exception(env, arch_info->exception, 5151 arch_info->exception == EXCP01_DB, 5152 arch_info->dr6); 5153 env->has_error_code = 0; 5154 } 5155 5156 return ret; 5157 } 5158 5159 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 5160 { 5161 const uint8_t type_code[] = { 5162 [GDB_BREAKPOINT_HW] = 0x0, 5163 [GDB_WATCHPOINT_WRITE] = 0x1, 5164 [GDB_WATCHPOINT_ACCESS] = 0x3 5165 }; 5166 const uint8_t len_code[] = { 5167 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 5168 }; 5169 int n; 5170 5171 if (kvm_sw_breakpoints_active(cpu)) { 5172 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; 5173 } 5174 if (nb_hw_breakpoint > 0) { 5175 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 5176 dbg->arch.debugreg[7] = 0x0600; 5177 for (n = 0; n < nb_hw_breakpoint; n++) { 5178 dbg->arch.debugreg[n] = hw_breakpoint[n].addr; 5179 dbg->arch.debugreg[7] |= (2 << (n * 2)) | 5180 (type_code[hw_breakpoint[n].type] << (16 + n*4)) | 5181 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); 5182 } 5183 } 5184 } 5185 5186 static bool kvm_install_msr_filters(KVMState *s) 5187 { 5188 uint64_t zero = 0; 5189 struct kvm_msr_filter filter = { 5190 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW, 5191 }; 5192 int r, i, j = 0; 5193 5194 for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) { 5195 KVMMSRHandlers *handler = &msr_handlers[i]; 5196 if (handler->msr) { 5197 struct kvm_msr_filter_range *range = &filter.ranges[j++]; 5198 5199 *range = (struct kvm_msr_filter_range) { 5200 .flags = 0, 5201 .nmsrs = 1, 5202 .base = handler->msr, 5203 .bitmap = (__u8 *)&zero, 5204 }; 5205 5206 if (handler->rdmsr) { 5207 range->flags |= KVM_MSR_FILTER_READ; 5208 } 5209 5210 if (handler->wrmsr) { 5211 range->flags |= KVM_MSR_FILTER_WRITE; 5212 } 5213 } 5214 } 5215 5216 r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter); 5217 if (r) { 5218 return false; 5219 } 5220 5221 return true; 5222 } 5223 5224 bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, 5225 QEMUWRMSRHandler *wrmsr) 5226 { 5227 int i; 5228 5229 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5230 if (!msr_handlers[i].msr) { 5231 msr_handlers[i] = (KVMMSRHandlers) { 5232 .msr = msr, 5233 .rdmsr = rdmsr, 5234 .wrmsr = wrmsr, 5235 }; 5236 5237 if (!kvm_install_msr_filters(s)) { 5238 msr_handlers[i] = (KVMMSRHandlers) { }; 5239 return false; 5240 } 5241 5242 return true; 5243 } 5244 } 5245 5246 return false; 5247 } 5248 5249 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run) 5250 { 5251 int i; 5252 bool r; 5253 5254 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5255 KVMMSRHandlers *handler = &msr_handlers[i]; 5256 if (run->msr.index == handler->msr) { 5257 if (handler->rdmsr) { 5258 r = handler->rdmsr(cpu, handler->msr, 5259 (uint64_t *)&run->msr.data); 5260 run->msr.error = r ? 0 : 1; 5261 return 0; 5262 } 5263 } 5264 } 5265 5266 assert(false); 5267 } 5268 5269 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run) 5270 { 5271 int i; 5272 bool r; 5273 5274 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) { 5275 KVMMSRHandlers *handler = &msr_handlers[i]; 5276 if (run->msr.index == handler->msr) { 5277 if (handler->wrmsr) { 5278 r = handler->wrmsr(cpu, handler->msr, run->msr.data); 5279 run->msr.error = r ? 0 : 1; 5280 return 0; 5281 } 5282 } 5283 } 5284 5285 assert(false); 5286 } 5287 5288 static bool has_sgx_provisioning; 5289 5290 static bool __kvm_enable_sgx_provisioning(KVMState *s) 5291 { 5292 int fd, ret; 5293 5294 if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) { 5295 return false; 5296 } 5297 5298 fd = qemu_open_old("/dev/sgx_provision", O_RDONLY); 5299 if (fd < 0) { 5300 return false; 5301 } 5302 5303 ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd); 5304 if (ret) { 5305 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); 5306 exit(1); 5307 } 5308 close(fd); 5309 return true; 5310 } 5311 5312 bool kvm_enable_sgx_provisioning(KVMState *s) 5313 { 5314 return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning); 5315 } 5316 5317 static bool host_supports_vmx(void) 5318 { 5319 uint32_t ecx, unused; 5320 5321 host_cpuid(1, 0, &unused, &unused, &ecx, &unused); 5322 return ecx & CPUID_EXT_VMX; 5323 } 5324 5325 #define VMX_INVALID_GUEST_STATE 0x80000021 5326 5327 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 5328 { 5329 X86CPU *cpu = X86_CPU(cs); 5330 uint64_t code; 5331 int ret; 5332 bool ctx_invalid; 5333 char str[256]; 5334 KVMState *state; 5335 5336 switch (run->exit_reason) { 5337 case KVM_EXIT_HLT: 5338 DPRINTF("handle_hlt\n"); 5339 bql_lock(); 5340 ret = kvm_handle_halt(cpu); 5341 bql_unlock(); 5342 break; 5343 case KVM_EXIT_SET_TPR: 5344 ret = 0; 5345 break; 5346 case KVM_EXIT_TPR_ACCESS: 5347 bql_lock(); 5348 ret = kvm_handle_tpr_access(cpu); 5349 bql_unlock(); 5350 break; 5351 case KVM_EXIT_FAIL_ENTRY: 5352 code = run->fail_entry.hardware_entry_failure_reason; 5353 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n", 5354 code); 5355 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { 5356 fprintf(stderr, 5357 "\nIf you're running a guest on an Intel machine without " 5358 "unrestricted mode\n" 5359 "support, the failure can be most likely due to the guest " 5360 "entering an invalid\n" 5361 "state for Intel VT. For example, the guest maybe running " 5362 "in big real mode\n" 5363 "which is not supported on less recent Intel processors." 5364 "\n\n"); 5365 } 5366 ret = -1; 5367 break; 5368 case KVM_EXIT_EXCEPTION: 5369 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n", 5370 run->ex.exception, run->ex.error_code); 5371 ret = -1; 5372 break; 5373 case KVM_EXIT_DEBUG: 5374 DPRINTF("kvm_exit_debug\n"); 5375 bql_lock(); 5376 ret = kvm_handle_debug(cpu, &run->debug.arch); 5377 bql_unlock(); 5378 break; 5379 case KVM_EXIT_HYPERV: 5380 ret = kvm_hv_handle_exit(cpu, &run->hyperv); 5381 break; 5382 case KVM_EXIT_IOAPIC_EOI: 5383 ioapic_eoi_broadcast(run->eoi.vector); 5384 ret = 0; 5385 break; 5386 case KVM_EXIT_X86_BUS_LOCK: 5387 /* already handled in kvm_arch_post_run */ 5388 ret = 0; 5389 break; 5390 case KVM_EXIT_NOTIFY: 5391 ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID); 5392 state = KVM_STATE(current_accel()); 5393 sprintf(str, "Encounter a notify exit with %svalid context in" 5394 " guest. There can be possible misbehaves in guest." 5395 " Please have a look.", ctx_invalid ? "in" : ""); 5396 if (ctx_invalid || 5397 state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) { 5398 warn_report("KVM internal error: %s", str); 5399 ret = -1; 5400 } else { 5401 warn_report_once("KVM: %s", str); 5402 ret = 0; 5403 } 5404 break; 5405 case KVM_EXIT_X86_RDMSR: 5406 /* We only enable MSR filtering, any other exit is bogus */ 5407 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); 5408 ret = kvm_handle_rdmsr(cpu, run); 5409 break; 5410 case KVM_EXIT_X86_WRMSR: 5411 /* We only enable MSR filtering, any other exit is bogus */ 5412 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); 5413 ret = kvm_handle_wrmsr(cpu, run); 5414 break; 5415 #ifdef CONFIG_XEN_EMU 5416 case KVM_EXIT_XEN: 5417 ret = kvm_xen_handle_exit(cpu, &run->xen); 5418 break; 5419 #endif 5420 default: 5421 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); 5422 ret = -1; 5423 break; 5424 } 5425 5426 return ret; 5427 } 5428 5429 bool kvm_arch_stop_on_emulation_error(CPUState *cs) 5430 { 5431 X86CPU *cpu = X86_CPU(cs); 5432 CPUX86State *env = &cpu->env; 5433 5434 kvm_cpu_synchronize_state(cs); 5435 return !(env->cr[0] & CR0_PE_MASK) || 5436 ((env->segs[R_CS].selector & 3) != 3); 5437 } 5438 5439 void kvm_arch_init_irq_routing(KVMState *s) 5440 { 5441 /* We know at this point that we're using the in-kernel 5442 * irqchip, so we can use irqfds, and on x86 we know 5443 * we can use msi via irqfd and GSI routing. 5444 */ 5445 kvm_msi_via_irqfd_allowed = true; 5446 kvm_gsi_routing_allowed = true; 5447 5448 if (kvm_irqchip_is_split()) { 5449 KVMRouteChange c = kvm_irqchip_begin_route_changes(s); 5450 int i; 5451 5452 /* If the ioapic is in QEMU and the lapics are in KVM, reserve 5453 MSI routes for signaling interrupts to the local apics. */ 5454 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 5455 if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) { 5456 error_report("Could not enable split IRQ mode."); 5457 exit(1); 5458 } 5459 } 5460 kvm_irqchip_commit_route_changes(&c); 5461 } 5462 } 5463 5464 int kvm_arch_irqchip_create(KVMState *s) 5465 { 5466 int ret; 5467 if (kvm_kernel_irqchip_split()) { 5468 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); 5469 if (ret) { 5470 error_report("Could not enable split irqchip mode: %s", 5471 strerror(-ret)); 5472 exit(1); 5473 } else { 5474 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); 5475 kvm_split_irqchip = true; 5476 return 1; 5477 } 5478 } else { 5479 return 0; 5480 } 5481 } 5482 5483 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address) 5484 { 5485 CPUX86State *env; 5486 uint64_t ext_id; 5487 5488 if (!first_cpu) { 5489 return address; 5490 } 5491 env = &X86_CPU(first_cpu)->env; 5492 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) { 5493 return address; 5494 } 5495 5496 /* 5497 * If the remappable format bit is set, or the upper bits are 5498 * already set in address_hi, or the low extended bits aren't 5499 * there anyway, do nothing. 5500 */ 5501 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT); 5502 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) { 5503 return address; 5504 } 5505 5506 address &= ~ext_id; 5507 address |= ext_id << 35; 5508 return address; 5509 } 5510 5511 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 5512 uint64_t address, uint32_t data, PCIDevice *dev) 5513 { 5514 X86IOMMUState *iommu = x86_iommu_get_default(); 5515 5516 if (iommu) { 5517 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu); 5518 5519 if (class->int_remap) { 5520 int ret; 5521 MSIMessage src, dst; 5522 5523 src.address = route->u.msi.address_hi; 5524 src.address <<= VTD_MSI_ADDR_HI_SHIFT; 5525 src.address |= route->u.msi.address_lo; 5526 src.data = route->u.msi.data; 5527 5528 ret = class->int_remap(iommu, &src, &dst, dev ? \ 5529 pci_requester_id(dev) : \ 5530 X86_IOMMU_SID_INVALID); 5531 if (ret) { 5532 trace_kvm_x86_fixup_msi_error(route->gsi); 5533 return 1; 5534 } 5535 5536 /* 5537 * Handled untranslated compatibility format interrupt with 5538 * extended destination ID in the low bits 11-5. */ 5539 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address); 5540 5541 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; 5542 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; 5543 route->u.msi.data = dst.data; 5544 return 0; 5545 } 5546 } 5547 5548 #ifdef CONFIG_XEN_EMU 5549 if (xen_mode == XEN_EMULATE) { 5550 int handled = xen_evtchn_translate_pirq_msi(route, address, data); 5551 5552 /* 5553 * If it was a PIRQ and successfully routed (handled == 0) or it was 5554 * an error (handled < 0), return. If it wasn't a PIRQ, keep going. 5555 */ 5556 if (handled <= 0) { 5557 return handled; 5558 } 5559 } 5560 #endif 5561 5562 address = kvm_swizzle_msi_ext_dest_id(address); 5563 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT; 5564 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK; 5565 return 0; 5566 } 5567 5568 typedef struct MSIRouteEntry MSIRouteEntry; 5569 5570 struct MSIRouteEntry { 5571 PCIDevice *dev; /* Device pointer */ 5572 int vector; /* MSI/MSIX vector index */ 5573 int virq; /* Virtual IRQ index */ 5574 QLIST_ENTRY(MSIRouteEntry) list; 5575 }; 5576 5577 /* List of used GSI routes */ 5578 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ 5579 QLIST_HEAD_INITIALIZER(msi_route_list); 5580 5581 void kvm_update_msi_routes_all(void *private, bool global, 5582 uint32_t index, uint32_t mask) 5583 { 5584 int cnt = 0, vector; 5585 MSIRouteEntry *entry; 5586 MSIMessage msg; 5587 PCIDevice *dev; 5588 5589 /* TODO: explicit route update */ 5590 QLIST_FOREACH(entry, &msi_route_list, list) { 5591 cnt++; 5592 vector = entry->vector; 5593 dev = entry->dev; 5594 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) { 5595 msg = msix_get_message(dev, vector); 5596 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) { 5597 msg = msi_get_message(dev, vector); 5598 } else { 5599 /* 5600 * Either MSI/MSIX is disabled for the device, or the 5601 * specific message was masked out. Skip this one. 5602 */ 5603 continue; 5604 } 5605 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); 5606 } 5607 kvm_irqchip_commit_routes(kvm_state); 5608 trace_kvm_x86_update_msi_routes(cnt); 5609 } 5610 5611 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 5612 int vector, PCIDevice *dev) 5613 { 5614 static bool notify_list_inited = false; 5615 MSIRouteEntry *entry; 5616 5617 if (!dev) { 5618 /* These are (possibly) IOAPIC routes only used for split 5619 * kernel irqchip mode, while what we are housekeeping are 5620 * PCI devices only. */ 5621 return 0; 5622 } 5623 5624 entry = g_new0(MSIRouteEntry, 1); 5625 entry->dev = dev; 5626 entry->vector = vector; 5627 entry->virq = route->gsi; 5628 QLIST_INSERT_HEAD(&msi_route_list, entry, list); 5629 5630 trace_kvm_x86_add_msi_route(route->gsi); 5631 5632 if (!notify_list_inited) { 5633 /* For the first time we do add route, add ourselves into 5634 * IOMMU's IEC notify list if needed. */ 5635 X86IOMMUState *iommu = x86_iommu_get_default(); 5636 if (iommu) { 5637 x86_iommu_iec_register_notifier(iommu, 5638 kvm_update_msi_routes_all, 5639 NULL); 5640 } 5641 notify_list_inited = true; 5642 } 5643 return 0; 5644 } 5645 5646 int kvm_arch_release_virq_post(int virq) 5647 { 5648 MSIRouteEntry *entry, *next; 5649 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { 5650 if (entry->virq == virq) { 5651 trace_kvm_x86_remove_msi_route(virq); 5652 QLIST_REMOVE(entry, list); 5653 g_free(entry); 5654 break; 5655 } 5656 } 5657 return 0; 5658 } 5659 5660 int kvm_arch_msi_data_to_gsi(uint32_t data) 5661 { 5662 abort(); 5663 } 5664 5665 bool kvm_has_waitpkg(void) 5666 { 5667 return has_msr_umwait; 5668 } 5669 5670 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 5671 5672 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) 5673 { 5674 KVMState *s = kvm_state; 5675 uint64_t supported; 5676 5677 mask &= XSTATE_DYNAMIC_MASK; 5678 if (!mask) { 5679 return; 5680 } 5681 /* 5682 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. 5683 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned 5684 * about them already because they are not supported features. 5685 */ 5686 supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); 5687 supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; 5688 mask &= supported; 5689 5690 while (mask) { 5691 int bit = ctz64(mask); 5692 int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); 5693 if (rc) { 5694 /* 5695 * Older kernel version (<5.17) do not support 5696 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return 5697 * any dynamic feature from kvm_arch_get_supported_cpuid. 5698 */ 5699 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " 5700 "for feature bit %d", bit); 5701 } 5702 mask &= ~BIT_ULL(bit); 5703 } 5704 } 5705 5706 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp) 5707 { 5708 KVMState *s = KVM_STATE(obj); 5709 return s->notify_vmexit; 5710 } 5711 5712 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp) 5713 { 5714 KVMState *s = KVM_STATE(obj); 5715 5716 if (s->fd != -1) { 5717 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 5718 return; 5719 } 5720 5721 s->notify_vmexit = value; 5722 } 5723 5724 static void kvm_arch_get_notify_window(Object *obj, Visitor *v, 5725 const char *name, void *opaque, 5726 Error **errp) 5727 { 5728 KVMState *s = KVM_STATE(obj); 5729 uint32_t value = s->notify_window; 5730 5731 visit_type_uint32(v, name, &value, errp); 5732 } 5733 5734 static void kvm_arch_set_notify_window(Object *obj, Visitor *v, 5735 const char *name, void *opaque, 5736 Error **errp) 5737 { 5738 KVMState *s = KVM_STATE(obj); 5739 uint32_t value; 5740 5741 if (s->fd != -1) { 5742 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 5743 return; 5744 } 5745 5746 if (!visit_type_uint32(v, name, &value, errp)) { 5747 return; 5748 } 5749 5750 s->notify_window = value; 5751 } 5752 5753 static void kvm_arch_get_xen_version(Object *obj, Visitor *v, 5754 const char *name, void *opaque, 5755 Error **errp) 5756 { 5757 KVMState *s = KVM_STATE(obj); 5758 uint32_t value = s->xen_version; 5759 5760 visit_type_uint32(v, name, &value, errp); 5761 } 5762 5763 static void kvm_arch_set_xen_version(Object *obj, Visitor *v, 5764 const char *name, void *opaque, 5765 Error **errp) 5766 { 5767 KVMState *s = KVM_STATE(obj); 5768 Error *error = NULL; 5769 uint32_t value; 5770 5771 visit_type_uint32(v, name, &value, &error); 5772 if (error) { 5773 error_propagate(errp, error); 5774 return; 5775 } 5776 5777 s->xen_version = value; 5778 if (value && xen_mode == XEN_DISABLED) { 5779 xen_mode = XEN_EMULATE; 5780 } 5781 } 5782 5783 static void kvm_arch_get_xen_gnttab_max_frames(Object *obj, Visitor *v, 5784 const char *name, void *opaque, 5785 Error **errp) 5786 { 5787 KVMState *s = KVM_STATE(obj); 5788 uint16_t value = s->xen_gnttab_max_frames; 5789 5790 visit_type_uint16(v, name, &value, errp); 5791 } 5792 5793 static void kvm_arch_set_xen_gnttab_max_frames(Object *obj, Visitor *v, 5794 const char *name, void *opaque, 5795 Error **errp) 5796 { 5797 KVMState *s = KVM_STATE(obj); 5798 Error *error = NULL; 5799 uint16_t value; 5800 5801 visit_type_uint16(v, name, &value, &error); 5802 if (error) { 5803 error_propagate(errp, error); 5804 return; 5805 } 5806 5807 s->xen_gnttab_max_frames = value; 5808 } 5809 5810 static void kvm_arch_get_xen_evtchn_max_pirq(Object *obj, Visitor *v, 5811 const char *name, void *opaque, 5812 Error **errp) 5813 { 5814 KVMState *s = KVM_STATE(obj); 5815 uint16_t value = s->xen_evtchn_max_pirq; 5816 5817 visit_type_uint16(v, name, &value, errp); 5818 } 5819 5820 static void kvm_arch_set_xen_evtchn_max_pirq(Object *obj, Visitor *v, 5821 const char *name, void *opaque, 5822 Error **errp) 5823 { 5824 KVMState *s = KVM_STATE(obj); 5825 Error *error = NULL; 5826 uint16_t value; 5827 5828 visit_type_uint16(v, name, &value, &error); 5829 if (error) { 5830 error_propagate(errp, error); 5831 return; 5832 } 5833 5834 s->xen_evtchn_max_pirq = value; 5835 } 5836 5837 void kvm_arch_accel_class_init(ObjectClass *oc) 5838 { 5839 object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", 5840 &NotifyVmexitOption_lookup, 5841 kvm_arch_get_notify_vmexit, 5842 kvm_arch_set_notify_vmexit); 5843 object_class_property_set_description(oc, "notify-vmexit", 5844 "Enable notify VM exit"); 5845 5846 object_class_property_add(oc, "notify-window", "uint32", 5847 kvm_arch_get_notify_window, 5848 kvm_arch_set_notify_window, 5849 NULL, NULL); 5850 object_class_property_set_description(oc, "notify-window", 5851 "Clock cycles without an event window " 5852 "after which a notification VM exit occurs"); 5853 5854 object_class_property_add(oc, "xen-version", "uint32", 5855 kvm_arch_get_xen_version, 5856 kvm_arch_set_xen_version, 5857 NULL, NULL); 5858 object_class_property_set_description(oc, "xen-version", 5859 "Xen version to be emulated " 5860 "(in XENVER_version form " 5861 "e.g. 0x4000a for 4.10)"); 5862 5863 object_class_property_add(oc, "xen-gnttab-max-frames", "uint16", 5864 kvm_arch_get_xen_gnttab_max_frames, 5865 kvm_arch_set_xen_gnttab_max_frames, 5866 NULL, NULL); 5867 object_class_property_set_description(oc, "xen-gnttab-max-frames", 5868 "Maximum number of grant table frames"); 5869 5870 object_class_property_add(oc, "xen-evtchn-max-pirq", "uint16", 5871 kvm_arch_get_xen_evtchn_max_pirq, 5872 kvm_arch_set_xen_evtchn_max_pirq, 5873 NULL, NULL); 5874 object_class_property_set_description(oc, "xen-evtchn-max-pirq", 5875 "Maximum number of Xen PIRQs"); 5876 } 5877 5878 void kvm_set_max_apic_id(uint32_t max_apic_id) 5879 { 5880 kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id); 5881 } 5882