1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * derived from drivers/kvm/kvm_main.c 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright (C) 2008 Qumranet, Inc. 9 * Copyright IBM Corporation, 2008 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 */ 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include <linux/kvm_host.h> 21 #include "irq.h" 22 #include "ioapic.h" 23 #include "mmu.h" 24 #include "i8254.h" 25 #include "tss.h" 26 #include "kvm_cache_regs.h" 27 #include "kvm_emulate.h" 28 #include "x86.h" 29 #include "cpuid.h" 30 #include "pmu.h" 31 #include "hyperv.h" 32 #include "lapic.h" 33 #include "xen.h" 34 #include "smm.h" 35 36 #include <linux/clocksource.h> 37 #include <linux/interrupt.h> 38 #include <linux/kvm.h> 39 #include <linux/fs.h> 40 #include <linux/vmalloc.h> 41 #include <linux/export.h> 42 #include <linux/moduleparam.h> 43 #include <linux/mman.h> 44 #include <linux/highmem.h> 45 #include <linux/iommu.h> 46 #include <linux/cpufreq.h> 47 #include <linux/user-return-notifier.h> 48 #include <linux/srcu.h> 49 #include <linux/slab.h> 50 #include <linux/perf_event.h> 51 #include <linux/uaccess.h> 52 #include <linux/hash.h> 53 #include <linux/pci.h> 54 #include <linux/timekeeper_internal.h> 55 #include <linux/pvclock_gtod.h> 56 #include <linux/kvm_irqfd.h> 57 #include <linux/irqbypass.h> 58 #include <linux/sched/stat.h> 59 #include <linux/sched/isolation.h> 60 #include <linux/mem_encrypt.h> 61 #include <linux/entry-kvm.h> 62 #include <linux/suspend.h> 63 #include <linux/smp.h> 64 65 #include <trace/events/ipi.h> 66 #include <trace/events/kvm.h> 67 68 #include <asm/debugreg.h> 69 #include <asm/msr.h> 70 #include <asm/desc.h> 71 #include <asm/mce.h> 72 #include <asm/pkru.h> 73 #include <linux/kernel_stat.h> 74 #include <asm/fpu/api.h> 75 #include <asm/fpu/xcr.h> 76 #include <asm/fpu/xstate.h> 77 #include <asm/pvclock.h> 78 #include <asm/div64.h> 79 #include <asm/irq_remapping.h> 80 #include <asm/mshyperv.h> 81 #include <asm/hypervisor.h> 82 #include <asm/tlbflush.h> 83 #include <asm/intel_pt.h> 84 #include <asm/emulate_prefix.h> 85 #include <asm/sgx.h> 86 #include <clocksource/hyperv_timer.h> 87 88 #define CREATE_TRACE_POINTS 89 #include "trace.h" 90 91 #define MAX_IO_MSRS 256 92 #define KVM_MAX_MCE_BANKS 32 93 94 struct kvm_caps kvm_caps __read_mostly = { 95 .supported_mce_cap = MCG_CTL_P | MCG_SER_P, 96 }; 97 EXPORT_SYMBOL_GPL(kvm_caps); 98 99 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 100 101 #define emul_to_vcpu(ctxt) \ 102 ((struct kvm_vcpu *)(ctxt)->vcpu) 103 104 /* EFER defaults: 105 * - enable syscall per default because its emulated by KVM 106 * - enable LME and LMA per default on 64 bit KVM 107 */ 108 #ifdef CONFIG_X86_64 109 static 110 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); 111 #else 112 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 113 #endif 114 115 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; 116 117 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) 118 119 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE 120 121 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 122 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 123 124 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 125 static void process_nmi(struct kvm_vcpu *vcpu); 126 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 127 static void store_regs(struct kvm_vcpu *vcpu); 128 static int sync_regs(struct kvm_vcpu *vcpu); 129 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu); 130 131 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 132 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2); 133 134 static DEFINE_MUTEX(vendor_module_lock); 135 struct kvm_x86_ops kvm_x86_ops __read_mostly; 136 137 #define KVM_X86_OP(func) \ 138 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \ 139 *(((struct kvm_x86_ops *)0)->func)); 140 #define KVM_X86_OP_OPTIONAL KVM_X86_OP 141 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP 142 #include <asm/kvm-x86-ops.h> 143 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); 144 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); 145 146 static bool __read_mostly ignore_msrs = 0; 147 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); 148 149 bool __read_mostly report_ignored_msrs = true; 150 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); 151 EXPORT_SYMBOL_GPL(report_ignored_msrs); 152 153 unsigned int min_timer_period_us = 200; 154 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); 155 156 static bool __read_mostly kvmclock_periodic_sync = true; 157 module_param(kvmclock_periodic_sync, bool, S_IRUGO); 158 159 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ 160 static u32 __read_mostly tsc_tolerance_ppm = 250; 161 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); 162 163 /* 164 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables 165 * adaptive tuning starting from default advancement of 1000ns. '0' disables 166 * advancement entirely. Any other value is used as-is and disables adaptive 167 * tuning, i.e. allows privileged userspace to set an exact advancement time. 168 */ 169 static int __read_mostly lapic_timer_advance_ns = -1; 170 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR); 171 172 static bool __read_mostly vector_hashing = true; 173 module_param(vector_hashing, bool, S_IRUGO); 174 175 bool __read_mostly enable_vmware_backdoor = false; 176 module_param(enable_vmware_backdoor, bool, S_IRUGO); 177 EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 178 179 /* 180 * Flags to manipulate forced emulation behavior (any non-zero value will 181 * enable forced emulation). 182 */ 183 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1) 184 static int __read_mostly force_emulation_prefix; 185 module_param(force_emulation_prefix, int, 0644); 186 187 int __read_mostly pi_inject_timer = -1; 188 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR); 189 190 /* Enable/disable PMU virtualization */ 191 bool __read_mostly enable_pmu = true; 192 EXPORT_SYMBOL_GPL(enable_pmu); 193 module_param(enable_pmu, bool, 0444); 194 195 bool __read_mostly eager_page_split = true; 196 module_param(eager_page_split, bool, 0644); 197 198 /* Enable/disable SMT_RSB bug mitigation */ 199 static bool __read_mostly mitigate_smt_rsb; 200 module_param(mitigate_smt_rsb, bool, 0444); 201 202 /* 203 * Restoring the host value for MSRs that are only consumed when running in 204 * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU 205 * returns to userspace, i.e. the kernel can run with the guest's value. 206 */ 207 #define KVM_MAX_NR_USER_RETURN_MSRS 16 208 209 struct kvm_user_return_msrs { 210 struct user_return_notifier urn; 211 bool registered; 212 struct kvm_user_return_msr_values { 213 u64 host; 214 u64 curr; 215 } values[KVM_MAX_NR_USER_RETURN_MSRS]; 216 }; 217 218 u32 __read_mostly kvm_nr_uret_msrs; 219 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 220 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 221 static struct kvm_user_return_msrs __percpu *user_return_msrs; 222 223 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 224 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 225 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \ 226 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) 227 228 u64 __read_mostly host_efer; 229 EXPORT_SYMBOL_GPL(host_efer); 230 231 bool __read_mostly allow_smaller_maxphyaddr = 0; 232 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 233 234 bool __read_mostly enable_apicv = true; 235 EXPORT_SYMBOL_GPL(enable_apicv); 236 237 u64 __read_mostly host_xss; 238 EXPORT_SYMBOL_GPL(host_xss); 239 240 u64 __read_mostly host_arch_capabilities; 241 EXPORT_SYMBOL_GPL(host_arch_capabilities); 242 243 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 244 KVM_GENERIC_VM_STATS(), 245 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 246 STATS_DESC_COUNTER(VM, mmu_pte_write), 247 STATS_DESC_COUNTER(VM, mmu_pde_zapped), 248 STATS_DESC_COUNTER(VM, mmu_flooded), 249 STATS_DESC_COUNTER(VM, mmu_recycled), 250 STATS_DESC_COUNTER(VM, mmu_cache_miss), 251 STATS_DESC_ICOUNTER(VM, mmu_unsync), 252 STATS_DESC_ICOUNTER(VM, pages_4k), 253 STATS_DESC_ICOUNTER(VM, pages_2m), 254 STATS_DESC_ICOUNTER(VM, pages_1g), 255 STATS_DESC_ICOUNTER(VM, nx_lpage_splits), 256 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size), 257 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions) 258 }; 259 260 const struct kvm_stats_header kvm_vm_stats_header = { 261 .name_size = KVM_STATS_NAME_SIZE, 262 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 263 .id_offset = sizeof(struct kvm_stats_header), 264 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 265 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 266 sizeof(kvm_vm_stats_desc), 267 }; 268 269 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 270 KVM_GENERIC_VCPU_STATS(), 271 STATS_DESC_COUNTER(VCPU, pf_taken), 272 STATS_DESC_COUNTER(VCPU, pf_fixed), 273 STATS_DESC_COUNTER(VCPU, pf_emulate), 274 STATS_DESC_COUNTER(VCPU, pf_spurious), 275 STATS_DESC_COUNTER(VCPU, pf_fast), 276 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created), 277 STATS_DESC_COUNTER(VCPU, pf_guest), 278 STATS_DESC_COUNTER(VCPU, tlb_flush), 279 STATS_DESC_COUNTER(VCPU, invlpg), 280 STATS_DESC_COUNTER(VCPU, exits), 281 STATS_DESC_COUNTER(VCPU, io_exits), 282 STATS_DESC_COUNTER(VCPU, mmio_exits), 283 STATS_DESC_COUNTER(VCPU, signal_exits), 284 STATS_DESC_COUNTER(VCPU, irq_window_exits), 285 STATS_DESC_COUNTER(VCPU, nmi_window_exits), 286 STATS_DESC_COUNTER(VCPU, l1d_flush), 287 STATS_DESC_COUNTER(VCPU, halt_exits), 288 STATS_DESC_COUNTER(VCPU, request_irq_exits), 289 STATS_DESC_COUNTER(VCPU, irq_exits), 290 STATS_DESC_COUNTER(VCPU, host_state_reload), 291 STATS_DESC_COUNTER(VCPU, fpu_reload), 292 STATS_DESC_COUNTER(VCPU, insn_emulation), 293 STATS_DESC_COUNTER(VCPU, insn_emulation_fail), 294 STATS_DESC_COUNTER(VCPU, hypercalls), 295 STATS_DESC_COUNTER(VCPU, irq_injections), 296 STATS_DESC_COUNTER(VCPU, nmi_injections), 297 STATS_DESC_COUNTER(VCPU, req_event), 298 STATS_DESC_COUNTER(VCPU, nested_run), 299 STATS_DESC_COUNTER(VCPU, directed_yield_attempted), 300 STATS_DESC_COUNTER(VCPU, directed_yield_successful), 301 STATS_DESC_COUNTER(VCPU, preemption_reported), 302 STATS_DESC_COUNTER(VCPU, preemption_other), 303 STATS_DESC_IBOOLEAN(VCPU, guest_mode), 304 STATS_DESC_COUNTER(VCPU, notify_window_exits), 305 }; 306 307 const struct kvm_stats_header kvm_vcpu_stats_header = { 308 .name_size = KVM_STATS_NAME_SIZE, 309 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 310 .id_offset = sizeof(struct kvm_stats_header), 311 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 312 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 313 sizeof(kvm_vcpu_stats_desc), 314 }; 315 316 u64 __read_mostly host_xcr0; 317 318 static struct kmem_cache *x86_emulator_cache; 319 320 /* 321 * When called, it means the previous get/set msr reached an invalid msr. 322 * Return true if we want to ignore/silent this failed msr access. 323 */ 324 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 325 { 326 const char *op = write ? "wrmsr" : "rdmsr"; 327 328 if (ignore_msrs) { 329 if (report_ignored_msrs) 330 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", 331 op, msr, data); 332 /* Mask the error */ 333 return true; 334 } else { 335 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", 336 op, msr, data); 337 return false; 338 } 339 } 340 341 static struct kmem_cache *kvm_alloc_emulator_cache(void) 342 { 343 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); 344 unsigned int size = sizeof(struct x86_emulate_ctxt); 345 346 return kmem_cache_create_usercopy("x86_emulator", size, 347 __alignof__(struct x86_emulate_ctxt), 348 SLAB_ACCOUNT, useroffset, 349 size - useroffset, NULL); 350 } 351 352 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 353 354 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) 355 { 356 int i; 357 for (i = 0; i < ASYNC_PF_PER_VCPU; i++) 358 vcpu->arch.apf.gfns[i] = ~0; 359 } 360 361 static void kvm_on_user_return(struct user_return_notifier *urn) 362 { 363 unsigned slot; 364 struct kvm_user_return_msrs *msrs 365 = container_of(urn, struct kvm_user_return_msrs, urn); 366 struct kvm_user_return_msr_values *values; 367 unsigned long flags; 368 369 /* 370 * Disabling irqs at this point since the following code could be 371 * interrupted and executed through kvm_arch_hardware_disable() 372 */ 373 local_irq_save(flags); 374 if (msrs->registered) { 375 msrs->registered = false; 376 user_return_notifier_unregister(urn); 377 } 378 local_irq_restore(flags); 379 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) { 380 values = &msrs->values[slot]; 381 if (values->host != values->curr) { 382 wrmsrl(kvm_uret_msrs_list[slot], values->host); 383 values->curr = values->host; 384 } 385 } 386 } 387 388 static int kvm_probe_user_return_msr(u32 msr) 389 { 390 u64 val; 391 int ret; 392 393 preempt_disable(); 394 ret = rdmsrl_safe(msr, &val); 395 if (ret) 396 goto out; 397 ret = wrmsrl_safe(msr, val); 398 out: 399 preempt_enable(); 400 return ret; 401 } 402 403 int kvm_add_user_return_msr(u32 msr) 404 { 405 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS); 406 407 if (kvm_probe_user_return_msr(msr)) 408 return -1; 409 410 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 411 return kvm_nr_uret_msrs++; 412 } 413 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 414 415 int kvm_find_user_return_msr(u32 msr) 416 { 417 int i; 418 419 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 420 if (kvm_uret_msrs_list[i] == msr) 421 return i; 422 } 423 return -1; 424 } 425 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 426 427 static void kvm_user_return_msr_cpu_online(void) 428 { 429 unsigned int cpu = smp_processor_id(); 430 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 431 u64 value; 432 int i; 433 434 for (i = 0; i < kvm_nr_uret_msrs; ++i) { 435 rdmsrl_safe(kvm_uret_msrs_list[i], &value); 436 msrs->values[i].host = value; 437 msrs->values[i].curr = value; 438 } 439 } 440 441 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) 442 { 443 unsigned int cpu = smp_processor_id(); 444 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 445 int err; 446 447 value = (value & mask) | (msrs->values[slot].host & ~mask); 448 if (value == msrs->values[slot].curr) 449 return 0; 450 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value); 451 if (err) 452 return 1; 453 454 msrs->values[slot].curr = value; 455 if (!msrs->registered) { 456 msrs->urn.on_user_return = kvm_on_user_return; 457 user_return_notifier_register(&msrs->urn); 458 msrs->registered = true; 459 } 460 return 0; 461 } 462 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 463 464 static void drop_user_return_notifiers(void) 465 { 466 unsigned int cpu = smp_processor_id(); 467 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu); 468 469 if (msrs->registered) 470 kvm_on_user_return(&msrs->urn); 471 } 472 473 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) 474 { 475 return vcpu->arch.apic_base; 476 } 477 478 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) 479 { 480 return kvm_apic_mode(kvm_get_apic_base(vcpu)); 481 } 482 EXPORT_SYMBOL_GPL(kvm_get_apic_mode); 483 484 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 485 { 486 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); 487 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); 488 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | 489 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); 490 491 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) 492 return 1; 493 if (!msr_info->host_initiated) { 494 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) 495 return 1; 496 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) 497 return 1; 498 } 499 500 kvm_lapic_set_base(vcpu, msr_info->data); 501 kvm_recalculate_apic_map(vcpu->kvm); 502 return 0; 503 } 504 505 /* 506 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 507 * 508 * Hardware virtualization extension instructions may fault if a reboot turns 509 * off virtualization while processes are running. Usually after catching the 510 * fault we just panic; during reboot instead the instruction is ignored. 511 */ 512 noinstr void kvm_spurious_fault(void) 513 { 514 /* Fault while not rebooting. We want the trace. */ 515 BUG_ON(!kvm_rebooting); 516 } 517 EXPORT_SYMBOL_GPL(kvm_spurious_fault); 518 519 #define EXCPT_BENIGN 0 520 #define EXCPT_CONTRIBUTORY 1 521 #define EXCPT_PF 2 522 523 static int exception_class(int vector) 524 { 525 switch (vector) { 526 case PF_VECTOR: 527 return EXCPT_PF; 528 case DE_VECTOR: 529 case TS_VECTOR: 530 case NP_VECTOR: 531 case SS_VECTOR: 532 case GP_VECTOR: 533 return EXCPT_CONTRIBUTORY; 534 default: 535 break; 536 } 537 return EXCPT_BENIGN; 538 } 539 540 #define EXCPT_FAULT 0 541 #define EXCPT_TRAP 1 542 #define EXCPT_ABORT 2 543 #define EXCPT_INTERRUPT 3 544 #define EXCPT_DB 4 545 546 static int exception_type(int vector) 547 { 548 unsigned int mask; 549 550 if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) 551 return EXCPT_INTERRUPT; 552 553 mask = 1 << vector; 554 555 /* 556 * #DBs can be trap-like or fault-like, the caller must check other CPU 557 * state, e.g. DR6, to determine whether a #DB is a trap or fault. 558 */ 559 if (mask & (1 << DB_VECTOR)) 560 return EXCPT_DB; 561 562 if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR))) 563 return EXCPT_TRAP; 564 565 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) 566 return EXCPT_ABORT; 567 568 /* Reserved exceptions will result in fault */ 569 return EXCPT_FAULT; 570 } 571 572 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 573 struct kvm_queued_exception *ex) 574 { 575 if (!ex->has_payload) 576 return; 577 578 switch (ex->vector) { 579 case DB_VECTOR: 580 /* 581 * "Certain debug exceptions may clear bit 0-3. The 582 * remaining contents of the DR6 register are never 583 * cleared by the processor". 584 */ 585 vcpu->arch.dr6 &= ~DR_TRAP_BITS; 586 /* 587 * In order to reflect the #DB exception payload in guest 588 * dr6, three components need to be considered: active low 589 * bit, FIXED_1 bits and active high bits (e.g. DR6_BD, 590 * DR6_BS and DR6_BT) 591 * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits. 592 * In the target guest dr6: 593 * FIXED_1 bits should always be set. 594 * Active low bits should be cleared if 1-setting in payload. 595 * Active high bits should be set if 1-setting in payload. 596 * 597 * Note, the payload is compatible with the pending debug 598 * exceptions/exit qualification under VMX, that active_low bits 599 * are active high in payload. 600 * So they need to be flipped for DR6. 601 */ 602 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; 603 vcpu->arch.dr6 |= ex->payload; 604 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; 605 606 /* 607 * The #DB payload is defined as compatible with the 'pending 608 * debug exceptions' field under VMX, not DR6. While bit 12 is 609 * defined in the 'pending debug exceptions' field (enabled 610 * breakpoint), it is reserved and must be zero in DR6. 611 */ 612 vcpu->arch.dr6 &= ~BIT(12); 613 break; 614 case PF_VECTOR: 615 vcpu->arch.cr2 = ex->payload; 616 break; 617 } 618 619 ex->has_payload = false; 620 ex->payload = 0; 621 } 622 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 623 624 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, 625 bool has_error_code, u32 error_code, 626 bool has_payload, unsigned long payload) 627 { 628 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; 629 630 ex->vector = vector; 631 ex->injected = false; 632 ex->pending = true; 633 ex->has_error_code = has_error_code; 634 ex->error_code = error_code; 635 ex->has_payload = has_payload; 636 ex->payload = payload; 637 } 638 639 /* Forcibly leave the nested mode in cases like a vCPU reset */ 640 static void kvm_leave_nested(struct kvm_vcpu *vcpu) 641 { 642 kvm_x86_ops.nested_ops->leave_nested(vcpu); 643 } 644 645 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, 646 unsigned nr, bool has_error, u32 error_code, 647 bool has_payload, unsigned long payload, bool reinject) 648 { 649 u32 prev_nr; 650 int class1, class2; 651 652 kvm_make_request(KVM_REQ_EVENT, vcpu); 653 654 /* 655 * If the exception is destined for L2 and isn't being reinjected, 656 * morph it to a VM-Exit if L1 wants to intercept the exception. A 657 * previously injected exception is not checked because it was checked 658 * when it was original queued, and re-checking is incorrect if _L1_ 659 * injected the exception, in which case it's exempt from interception. 660 */ 661 if (!reinject && is_guest_mode(vcpu) && 662 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { 663 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, 664 has_payload, payload); 665 return; 666 } 667 668 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { 669 queue: 670 if (reinject) { 671 /* 672 * On VM-Entry, an exception can be pending if and only 673 * if event injection was blocked by nested_run_pending. 674 * In that case, however, vcpu_enter_guest() requests an 675 * immediate exit, and the guest shouldn't proceed far 676 * enough to need reinjection. 677 */ 678 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); 679 vcpu->arch.exception.injected = true; 680 if (WARN_ON_ONCE(has_payload)) { 681 /* 682 * A reinjected event has already 683 * delivered its payload. 684 */ 685 has_payload = false; 686 payload = 0; 687 } 688 } else { 689 vcpu->arch.exception.pending = true; 690 vcpu->arch.exception.injected = false; 691 } 692 vcpu->arch.exception.has_error_code = has_error; 693 vcpu->arch.exception.vector = nr; 694 vcpu->arch.exception.error_code = error_code; 695 vcpu->arch.exception.has_payload = has_payload; 696 vcpu->arch.exception.payload = payload; 697 if (!is_guest_mode(vcpu)) 698 kvm_deliver_exception_payload(vcpu, 699 &vcpu->arch.exception); 700 return; 701 } 702 703 /* to check exception */ 704 prev_nr = vcpu->arch.exception.vector; 705 if (prev_nr == DF_VECTOR) { 706 /* triple fault -> shutdown */ 707 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 708 return; 709 } 710 class1 = exception_class(prev_nr); 711 class2 = exception_class(nr); 712 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || 713 (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { 714 /* 715 * Synthesize #DF. Clear the previously injected or pending 716 * exception so as not to incorrectly trigger shutdown. 717 */ 718 vcpu->arch.exception.injected = false; 719 vcpu->arch.exception.pending = false; 720 721 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); 722 } else { 723 /* replace previous exception with a new one in a hope 724 that instruction re-execution will regenerate lost 725 exception */ 726 goto queue; 727 } 728 } 729 730 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) 731 { 732 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); 733 } 734 EXPORT_SYMBOL_GPL(kvm_queue_exception); 735 736 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) 737 { 738 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); 739 } 740 EXPORT_SYMBOL_GPL(kvm_requeue_exception); 741 742 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, 743 unsigned long payload) 744 { 745 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); 746 } 747 EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 748 749 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 750 u32 error_code, unsigned long payload) 751 { 752 kvm_multiple_exception(vcpu, nr, true, error_code, 753 true, payload, false); 754 } 755 756 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 757 { 758 if (err) 759 kvm_inject_gp(vcpu, 0); 760 else 761 return kvm_skip_emulated_instruction(vcpu); 762 763 return 1; 764 } 765 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 766 767 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 768 { 769 if (err) { 770 kvm_inject_gp(vcpu, 0); 771 return 1; 772 } 773 774 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 775 EMULTYPE_COMPLETE_USER_EXIT); 776 } 777 778 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) 779 { 780 ++vcpu->stat.pf_guest; 781 782 /* 783 * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of 784 * whether or not L1 wants to intercept "regular" #PF. 785 */ 786 if (is_guest_mode(vcpu) && fault->async_page_fault) 787 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, 788 true, fault->error_code, 789 true, fault->address); 790 else 791 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, 792 fault->address); 793 } 794 795 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, 796 struct x86_exception *fault) 797 { 798 struct kvm_mmu *fault_mmu; 799 WARN_ON_ONCE(fault->vector != PF_VECTOR); 800 801 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : 802 vcpu->arch.walk_mmu; 803 804 /* 805 * Invalidate the TLB entry for the faulting address, if it exists, 806 * else the access will fault indefinitely (and to emulate hardware). 807 */ 808 if ((fault->error_code & PFERR_PRESENT_MASK) && 809 !(fault->error_code & PFERR_RSVD_MASK)) 810 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, 811 KVM_MMU_ROOT_CURRENT); 812 813 fault_mmu->inject_page_fault(vcpu, fault); 814 } 815 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 816 817 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 818 { 819 atomic_inc(&vcpu->arch.nmi_queued); 820 kvm_make_request(KVM_REQ_NMI, vcpu); 821 } 822 823 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 824 { 825 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); 826 } 827 EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 828 829 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) 830 { 831 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); 832 } 833 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); 834 835 /* 836 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue 837 * a #GP and return false. 838 */ 839 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 840 { 841 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 842 return true; 843 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 844 return false; 845 } 846 847 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) 848 { 849 if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE)) 850 return true; 851 852 kvm_queue_exception(vcpu, UD_VECTOR); 853 return false; 854 } 855 EXPORT_SYMBOL_GPL(kvm_require_dr); 856 857 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 858 { 859 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); 860 } 861 862 /* 863 * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise. 864 */ 865 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) 866 { 867 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 868 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 869 gpa_t real_gpa; 870 int i; 871 int ret; 872 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; 873 874 /* 875 * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated 876 * to an L1 GPA. 877 */ 878 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), 879 PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); 880 if (real_gpa == INVALID_GPA) 881 return 0; 882 883 /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ 884 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, 885 cr3 & GENMASK(11, 5), sizeof(pdpte)); 886 if (ret < 0) 887 return 0; 888 889 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 890 if ((pdpte[i] & PT_PRESENT_MASK) && 891 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { 892 return 0; 893 } 894 } 895 896 /* 897 * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled. 898 * Shadow page roots need to be reconstructed instead. 899 */ 900 if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs))) 901 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); 902 903 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); 904 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 905 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 906 vcpu->arch.pdptrs_from_userspace = false; 907 908 return 1; 909 } 910 EXPORT_SYMBOL_GPL(load_pdptrs); 911 912 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 913 { 914 #ifdef CONFIG_X86_64 915 if (cr0 & 0xffffffff00000000UL) 916 return false; 917 #endif 918 919 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) 920 return false; 921 922 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) 923 return false; 924 925 return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); 926 } 927 928 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) 929 { 930 /* 931 * CR0.WP is incorporated into the MMU role, but only for non-nested, 932 * indirect shadow MMUs. If paging is disabled, no updates are needed 933 * as there are no permission bits to emulate. If TDP is enabled, the 934 * MMU's metadata needs to be updated, e.g. so that emulating guest 935 * translations does the right thing, but there's no need to unload the 936 * root as CR0.WP doesn't affect SPTEs. 937 */ 938 if ((cr0 ^ old_cr0) == X86_CR0_WP) { 939 if (!(cr0 & X86_CR0_PG)) 940 return; 941 942 if (tdp_enabled) { 943 kvm_init_mmu(vcpu); 944 return; 945 } 946 } 947 948 if ((cr0 ^ old_cr0) & X86_CR0_PG) { 949 kvm_clear_async_pf_completion_queue(vcpu); 950 kvm_async_pf_hash_reset(vcpu); 951 952 /* 953 * Clearing CR0.PG is defined to flush the TLB from the guest's 954 * perspective. 955 */ 956 if (!(cr0 & X86_CR0_PG)) 957 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 958 } 959 960 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 961 kvm_mmu_reset_context(vcpu); 962 963 if (((cr0 ^ old_cr0) & X86_CR0_CD) && 964 kvm_arch_has_noncoherent_dma(vcpu->kvm) && 965 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 966 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); 967 } 968 EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 969 970 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 971 { 972 unsigned long old_cr0 = kvm_read_cr0(vcpu); 973 974 if (!kvm_is_valid_cr0(vcpu, cr0)) 975 return 1; 976 977 cr0 |= X86_CR0_ET; 978 979 /* Write to CR0 reserved bits are ignored, even on Intel. */ 980 cr0 &= ~CR0_RESERVED_BITS; 981 982 #ifdef CONFIG_X86_64 983 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && 984 (cr0 & X86_CR0_PG)) { 985 int cs_db, cs_l; 986 987 if (!is_pae(vcpu)) 988 return 1; 989 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 990 if (cs_l) 991 return 1; 992 } 993 #endif 994 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && 995 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && 996 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 997 return 1; 998 999 if (!(cr0 & X86_CR0_PG) && 1000 (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) 1001 return 1; 1002 1003 static_call(kvm_x86_set_cr0)(vcpu, cr0); 1004 1005 kvm_post_set_cr0(vcpu, old_cr0, cr0); 1006 1007 return 0; 1008 } 1009 EXPORT_SYMBOL_GPL(kvm_set_cr0); 1010 1011 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 1012 { 1013 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 1014 } 1015 EXPORT_SYMBOL_GPL(kvm_lmsw); 1016 1017 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 1018 { 1019 if (vcpu->arch.guest_state_protected) 1020 return; 1021 1022 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { 1023 1024 if (vcpu->arch.xcr0 != host_xcr0) 1025 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); 1026 1027 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && 1028 vcpu->arch.ia32_xss != host_xss) 1029 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); 1030 } 1031 1032 if (cpu_feature_enabled(X86_FEATURE_PKU) && 1033 vcpu->arch.pkru != vcpu->arch.host_pkru && 1034 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1035 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) 1036 write_pkru(vcpu->arch.pkru); 1037 } 1038 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 1039 1040 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 1041 { 1042 if (vcpu->arch.guest_state_protected) 1043 return; 1044 1045 if (cpu_feature_enabled(X86_FEATURE_PKU) && 1046 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || 1047 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) { 1048 vcpu->arch.pkru = rdpkru(); 1049 if (vcpu->arch.pkru != vcpu->arch.host_pkru) 1050 write_pkru(vcpu->arch.host_pkru); 1051 } 1052 1053 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { 1054 1055 if (vcpu->arch.xcr0 != host_xcr0) 1056 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); 1057 1058 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && 1059 vcpu->arch.ia32_xss != host_xss) 1060 wrmsrl(MSR_IA32_XSS, host_xss); 1061 } 1062 1063 } 1064 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1065 1066 #ifdef CONFIG_X86_64 1067 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) 1068 { 1069 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; 1070 } 1071 #endif 1072 1073 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 1074 { 1075 u64 xcr0 = xcr; 1076 u64 old_xcr0 = vcpu->arch.xcr0; 1077 u64 valid_bits; 1078 1079 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ 1080 if (index != XCR_XFEATURE_ENABLED_MASK) 1081 return 1; 1082 if (!(xcr0 & XFEATURE_MASK_FP)) 1083 return 1; 1084 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE)) 1085 return 1; 1086 1087 /* 1088 * Do not allow the guest to set bits that we do not support 1089 * saving. However, xcr0 bit 0 is always set, even if the 1090 * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). 1091 */ 1092 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; 1093 if (xcr0 & ~valid_bits) 1094 return 1; 1095 1096 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) != 1097 (!(xcr0 & XFEATURE_MASK_BNDCSR))) 1098 return 1; 1099 1100 if (xcr0 & XFEATURE_MASK_AVX512) { 1101 if (!(xcr0 & XFEATURE_MASK_YMM)) 1102 return 1; 1103 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512) 1104 return 1; 1105 } 1106 1107 if ((xcr0 & XFEATURE_MASK_XTILE) && 1108 ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE)) 1109 return 1; 1110 1111 vcpu->arch.xcr0 = xcr0; 1112 1113 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) 1114 kvm_update_cpuid_runtime(vcpu); 1115 return 0; 1116 } 1117 1118 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1119 { 1120 /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ 1121 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 1122 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { 1123 kvm_inject_gp(vcpu, 0); 1124 return 1; 1125 } 1126 1127 return kvm_skip_emulated_instruction(vcpu); 1128 } 1129 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1130 1131 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1132 { 1133 if (cr4 & cr4_reserved_bits) 1134 return false; 1135 1136 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1137 return false; 1138 1139 return true; 1140 } 1141 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); 1142 1143 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1144 { 1145 return __kvm_is_valid_cr4(vcpu, cr4) && 1146 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1147 } 1148 1149 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) 1150 { 1151 if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) 1152 kvm_mmu_reset_context(vcpu); 1153 1154 /* 1155 * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB 1156 * according to the SDM; however, stale prev_roots could be reused 1157 * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we 1158 * free them all. This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST 1159 * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed, 1160 * so fall through. 1161 */ 1162 if (!tdp_enabled && 1163 (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) 1164 kvm_mmu_unload(vcpu); 1165 1166 /* 1167 * The TLB has to be flushed for all PCIDs if any of the following 1168 * (architecturally required) changes happen: 1169 * - CR4.PCIDE is changed from 1 to 0 1170 * - CR4.PGE is toggled 1171 * 1172 * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT. 1173 */ 1174 if (((cr4 ^ old_cr4) & X86_CR4_PGE) || 1175 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 1176 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1177 1178 /* 1179 * The TLB has to be flushed for the current PCID if any of the 1180 * following (architecturally required) changes happen: 1181 * - CR4.SMEP is changed from 0 to 1 1182 * - CR4.PAE is toggled 1183 */ 1184 else if (((cr4 ^ old_cr4) & X86_CR4_PAE) || 1185 ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP))) 1186 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1187 1188 } 1189 EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1190 1191 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1192 { 1193 unsigned long old_cr4 = kvm_read_cr4(vcpu); 1194 1195 if (!kvm_is_valid_cr4(vcpu, cr4)) 1196 return 1; 1197 1198 if (is_long_mode(vcpu)) { 1199 if (!(cr4 & X86_CR4_PAE)) 1200 return 1; 1201 if ((cr4 ^ old_cr4) & X86_CR4_LA57) 1202 return 1; 1203 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 1204 && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) 1205 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) 1206 return 1; 1207 1208 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { 1209 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ 1210 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) 1211 return 1; 1212 } 1213 1214 static_call(kvm_x86_set_cr4)(vcpu, cr4); 1215 1216 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1217 1218 return 0; 1219 } 1220 EXPORT_SYMBOL_GPL(kvm_set_cr4); 1221 1222 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1223 { 1224 struct kvm_mmu *mmu = vcpu->arch.mmu; 1225 unsigned long roots_to_free = 0; 1226 int i; 1227 1228 /* 1229 * MOV CR3 and INVPCID are usually not intercepted when using TDP, but 1230 * this is reachable when running EPT=1 and unrestricted_guest=0, and 1231 * also via the emulator. KVM's TDP page tables are not in the scope of 1232 * the invalidation, but the guest's TLB entries need to be flushed as 1233 * the CPU may have cached entries in its TLB for the target PCID. 1234 */ 1235 if (unlikely(tdp_enabled)) { 1236 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 1237 return; 1238 } 1239 1240 /* 1241 * If neither the current CR3 nor any of the prev_roots use the given 1242 * PCID, then nothing needs to be done here because a resync will 1243 * happen anyway before switching to any other CR3. 1244 */ 1245 if (kvm_get_active_pcid(vcpu) == pcid) { 1246 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 1247 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1248 } 1249 1250 /* 1251 * If PCID is disabled, there is no need to free prev_roots even if the 1252 * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB 1253 * with PCIDE=0. 1254 */ 1255 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) 1256 return; 1257 1258 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 1259 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) 1260 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); 1261 1262 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); 1263 } 1264 1265 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1266 { 1267 bool skip_tlb_flush = false; 1268 unsigned long pcid = 0; 1269 #ifdef CONFIG_X86_64 1270 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) { 1271 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; 1272 cr3 &= ~X86_CR3_PCID_NOFLUSH; 1273 pcid = cr3 & X86_CR3_PCID_MASK; 1274 } 1275 #endif 1276 1277 /* PDPTRs are always reloaded for PAE paging. */ 1278 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) 1279 goto handle_tlb_flush; 1280 1281 /* 1282 * Do not condition the GPA check on long mode, this helper is used to 1283 * stuff CR3, e.g. for RSM emulation, and there is no guarantee that 1284 * the current vCPU mode is accurate. 1285 */ 1286 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) 1287 return 1; 1288 1289 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) 1290 return 1; 1291 1292 if (cr3 != kvm_read_cr3(vcpu)) 1293 kvm_mmu_new_pgd(vcpu, cr3); 1294 1295 vcpu->arch.cr3 = cr3; 1296 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 1297 /* Do not call post_set_cr3, we do not get here for confidential guests. */ 1298 1299 handle_tlb_flush: 1300 /* 1301 * A load of CR3 that flushes the TLB flushes only the current PCID, 1302 * even if PCID is disabled, in which case PCID=0 is flushed. It's a 1303 * moot point in the end because _disabling_ PCID will flush all PCIDs, 1304 * and it's impossible to use a non-zero PCID when PCID is disabled, 1305 * i.e. only PCID=0 can be relevant. 1306 */ 1307 if (!skip_tlb_flush) 1308 kvm_invalidate_pcid(vcpu, pcid); 1309 1310 return 0; 1311 } 1312 EXPORT_SYMBOL_GPL(kvm_set_cr3); 1313 1314 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1315 { 1316 if (cr8 & CR8_RESERVED_BITS) 1317 return 1; 1318 if (lapic_in_kernel(vcpu)) 1319 kvm_lapic_set_tpr(vcpu, cr8); 1320 else 1321 vcpu->arch.cr8 = cr8; 1322 return 0; 1323 } 1324 EXPORT_SYMBOL_GPL(kvm_set_cr8); 1325 1326 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1327 { 1328 if (lapic_in_kernel(vcpu)) 1329 return kvm_lapic_get_cr8(vcpu); 1330 else 1331 return vcpu->arch.cr8; 1332 } 1333 EXPORT_SYMBOL_GPL(kvm_get_cr8); 1334 1335 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1336 { 1337 int i; 1338 1339 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1340 for (i = 0; i < KVM_NR_DB_REGS; i++) 1341 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 1342 } 1343 } 1344 1345 void kvm_update_dr7(struct kvm_vcpu *vcpu) 1346 { 1347 unsigned long dr7; 1348 1349 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1350 dr7 = vcpu->arch.guest_debug_dr7; 1351 else 1352 dr7 = vcpu->arch.dr7; 1353 static_call(kvm_x86_set_dr7)(vcpu, dr7); 1354 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1355 if (dr7 & DR7_BP_EN_MASK) 1356 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1357 } 1358 EXPORT_SYMBOL_GPL(kvm_update_dr7); 1359 1360 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1361 { 1362 u64 fixed = DR6_FIXED_1; 1363 1364 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) 1365 fixed |= DR6_RTM; 1366 1367 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) 1368 fixed |= DR6_BUS_LOCK; 1369 return fixed; 1370 } 1371 1372 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 1373 { 1374 size_t size = ARRAY_SIZE(vcpu->arch.db); 1375 1376 switch (dr) { 1377 case 0 ... 3: 1378 vcpu->arch.db[array_index_nospec(dr, size)] = val; 1379 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1380 vcpu->arch.eff_db[dr] = val; 1381 break; 1382 case 4: 1383 case 6: 1384 if (!kvm_dr6_valid(val)) 1385 return 1; /* #GP */ 1386 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); 1387 break; 1388 case 5: 1389 default: /* 7 */ 1390 if (!kvm_dr7_valid(val)) 1391 return 1; /* #GP */ 1392 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 1393 kvm_update_dr7(vcpu); 1394 break; 1395 } 1396 1397 return 0; 1398 } 1399 EXPORT_SYMBOL_GPL(kvm_set_dr); 1400 1401 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) 1402 { 1403 size_t size = ARRAY_SIZE(vcpu->arch.db); 1404 1405 switch (dr) { 1406 case 0 ... 3: 1407 *val = vcpu->arch.db[array_index_nospec(dr, size)]; 1408 break; 1409 case 4: 1410 case 6: 1411 *val = vcpu->arch.dr6; 1412 break; 1413 case 5: 1414 default: /* 7 */ 1415 *val = vcpu->arch.dr7; 1416 break; 1417 } 1418 } 1419 EXPORT_SYMBOL_GPL(kvm_get_dr); 1420 1421 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1422 { 1423 u32 ecx = kvm_rcx_read(vcpu); 1424 u64 data; 1425 1426 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { 1427 kvm_inject_gp(vcpu, 0); 1428 return 1; 1429 } 1430 1431 kvm_rax_write(vcpu, (u32)data); 1432 kvm_rdx_write(vcpu, data >> 32); 1433 return kvm_skip_emulated_instruction(vcpu); 1434 } 1435 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1436 1437 /* 1438 * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track 1439 * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS, 1440 * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. msrs_to_save holds MSRs that 1441 * require host support, i.e. should be probed via RDMSR. emulated_msrs holds 1442 * MSRs that KVM emulates without strictly requiring host support. 1443 * msr_based_features holds MSRs that enumerate features, i.e. are effectively 1444 * CPUID leafs. Note, msr_based_features isn't mutually exclusive with 1445 * msrs_to_save and emulated_msrs. 1446 */ 1447 1448 static const u32 msrs_to_save_base[] = { 1449 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1450 MSR_STAR, 1451 #ifdef CONFIG_X86_64 1452 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1453 #endif 1454 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1455 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1456 MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL, 1457 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1458 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1459 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1460 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, 1461 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, 1462 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, 1463 MSR_IA32_UMWAIT_CONTROL, 1464 1465 MSR_IA32_XFD, MSR_IA32_XFD_ERR, 1466 }; 1467 1468 static const u32 msrs_to_save_pmu[] = { 1469 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, 1470 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, 1471 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, 1472 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 1473 MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, 1474 1475 /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ 1476 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, 1477 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, 1478 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, 1479 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7, 1480 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1481 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1482 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1483 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7, 1484 1485 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1486 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1487 1488 /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ 1489 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1490 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1491 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1492 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1493 1494 MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 1495 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, 1496 MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, 1497 }; 1498 1499 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) + 1500 ARRAY_SIZE(msrs_to_save_pmu)]; 1501 static unsigned num_msrs_to_save; 1502 1503 static const u32 emulated_msrs_all[] = { 1504 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 1505 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, 1506 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 1507 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, 1508 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY, 1509 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2, 1510 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL, 1511 HV_X64_MSR_RESET, 1512 HV_X64_MSR_VP_INDEX, 1513 HV_X64_MSR_VP_RUNTIME, 1514 HV_X64_MSR_SCONTROL, 1515 HV_X64_MSR_STIMER0_CONFIG, 1516 HV_X64_MSR_VP_ASSIST_PAGE, 1517 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL, 1518 HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL, 1519 HV_X64_MSR_SYNDBG_OPTIONS, 1520 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS, 1521 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER, 1522 HV_X64_MSR_SYNDBG_PENDING_BUFFER, 1523 1524 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, 1525 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK, 1526 1527 MSR_IA32_TSC_ADJUST, 1528 MSR_IA32_TSC_DEADLINE, 1529 MSR_IA32_ARCH_CAPABILITIES, 1530 MSR_IA32_PERF_CAPABILITIES, 1531 MSR_IA32_MISC_ENABLE, 1532 MSR_IA32_MCG_STATUS, 1533 MSR_IA32_MCG_CTL, 1534 MSR_IA32_MCG_EXT_CTL, 1535 MSR_IA32_SMBASE, 1536 MSR_SMI_COUNT, 1537 MSR_PLATFORM_INFO, 1538 MSR_MISC_FEATURES_ENABLES, 1539 MSR_AMD64_VIRT_SPEC_CTRL, 1540 MSR_AMD64_TSC_RATIO, 1541 MSR_IA32_POWER_CTL, 1542 MSR_IA32_UCODE_REV, 1543 1544 /* 1545 * KVM always supports the "true" VMX control MSRs, even if the host 1546 * does not. The VMX MSRs as a whole are considered "emulated" as KVM 1547 * doesn't strictly require them to exist in the host (ignoring that 1548 * KVM would refuse to load in the first place if the core set of MSRs 1549 * aren't supported). 1550 */ 1551 MSR_IA32_VMX_BASIC, 1552 MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1553 MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1554 MSR_IA32_VMX_TRUE_EXIT_CTLS, 1555 MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1556 MSR_IA32_VMX_MISC, 1557 MSR_IA32_VMX_CR0_FIXED0, 1558 MSR_IA32_VMX_CR4_FIXED0, 1559 MSR_IA32_VMX_VMCS_ENUM, 1560 MSR_IA32_VMX_PROCBASED_CTLS2, 1561 MSR_IA32_VMX_EPT_VPID_CAP, 1562 MSR_IA32_VMX_VMFUNC, 1563 1564 MSR_K7_HWCR, 1565 MSR_KVM_POLL_CONTROL, 1566 }; 1567 1568 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; 1569 static unsigned num_emulated_msrs; 1570 1571 /* 1572 * List of MSRs that control the existence of MSR-based features, i.e. MSRs 1573 * that are effectively CPUID leafs. VMX MSRs are also included in the set of 1574 * feature MSRs, but are handled separately to allow expedited lookups. 1575 */ 1576 static const u32 msr_based_features_all_except_vmx[] = { 1577 MSR_AMD64_DE_CFG, 1578 MSR_IA32_UCODE_REV, 1579 MSR_IA32_ARCH_CAPABILITIES, 1580 MSR_IA32_PERF_CAPABILITIES, 1581 }; 1582 1583 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + 1584 (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; 1585 static unsigned int num_msr_based_features; 1586 1587 /* 1588 * All feature MSRs except uCode revID, which tracks the currently loaded uCode 1589 * patch, are immutable once the vCPU model is defined. 1590 */ 1591 static bool kvm_is_immutable_feature_msr(u32 msr) 1592 { 1593 int i; 1594 1595 if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) 1596 return true; 1597 1598 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { 1599 if (msr == msr_based_features_all_except_vmx[i]) 1600 return msr != MSR_IA32_UCODE_REV; 1601 } 1602 1603 return false; 1604 } 1605 1606 /* 1607 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM 1608 * does not yet virtualize. These include: 1609 * 10 - MISC_PACKAGE_CTRLS 1610 * 11 - ENERGY_FILTERING_CTL 1611 * 12 - DOITM 1612 * 18 - FB_CLEAR_CTRL 1613 * 21 - XAPIC_DISABLE_STATUS 1614 * 23 - OVERCLOCKING_STATUS 1615 */ 1616 1617 #define KVM_SUPPORTED_ARCH_CAP \ 1618 (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \ 1619 ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ 1620 ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ 1621 ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ 1622 ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO) 1623 1624 static u64 kvm_get_arch_capabilities(void) 1625 { 1626 u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP; 1627 1628 /* 1629 * If nx_huge_pages is enabled, KVM's shadow paging will ensure that 1630 * the nested hypervisor runs with NX huge pages. If it is not, 1631 * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other 1632 * L1 guests, so it need not worry about its own (L2) guests. 1633 */ 1634 data |= ARCH_CAP_PSCHANGE_MC_NO; 1635 1636 /* 1637 * If we're doing cache flushes (either "always" or "cond") 1638 * we will do one whenever the guest does a vmlaunch/vmresume. 1639 * If an outer hypervisor is doing the cache flush for us 1640 * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that 1641 * capability to the guest too, and if EPT is disabled we're not 1642 * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will 1643 * require a nested hypervisor to do a flush of its own. 1644 */ 1645 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) 1646 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; 1647 1648 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 1649 data |= ARCH_CAP_RDCL_NO; 1650 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 1651 data |= ARCH_CAP_SSB_NO; 1652 if (!boot_cpu_has_bug(X86_BUG_MDS)) 1653 data |= ARCH_CAP_MDS_NO; 1654 1655 if (!boot_cpu_has(X86_FEATURE_RTM)) { 1656 /* 1657 * If RTM=0 because the kernel has disabled TSX, the host might 1658 * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0 1659 * and therefore knows that there cannot be TAA) but keep 1660 * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts, 1661 * and we want to allow migrating those guests to tsx=off hosts. 1662 */ 1663 data &= ~ARCH_CAP_TAA_NO; 1664 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) { 1665 data |= ARCH_CAP_TAA_NO; 1666 } else { 1667 /* 1668 * Nothing to do here; we emulate TSX_CTRL if present on the 1669 * host so the guest can choose between disabling TSX or 1670 * using VERW to clear CPU buffers. 1671 */ 1672 } 1673 1674 if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) 1675 data |= ARCH_CAP_GDS_NO; 1676 1677 return data; 1678 } 1679 1680 static int kvm_get_msr_feature(struct kvm_msr_entry *msr) 1681 { 1682 switch (msr->index) { 1683 case MSR_IA32_ARCH_CAPABILITIES: 1684 msr->data = kvm_get_arch_capabilities(); 1685 break; 1686 case MSR_IA32_PERF_CAPABILITIES: 1687 msr->data = kvm_caps.supported_perf_cap; 1688 break; 1689 case MSR_IA32_UCODE_REV: 1690 rdmsrl_safe(msr->index, &msr->data); 1691 break; 1692 default: 1693 return static_call(kvm_x86_get_msr_feature)(msr); 1694 } 1695 return 0; 1696 } 1697 1698 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 1699 { 1700 struct kvm_msr_entry msr; 1701 int r; 1702 1703 msr.index = index; 1704 r = kvm_get_msr_feature(&msr); 1705 1706 if (r == KVM_MSR_RET_INVALID) { 1707 /* Unconditionally clear the output for simplicity */ 1708 *data = 0; 1709 if (kvm_msr_ignored_check(index, 0, false)) 1710 r = 0; 1711 } 1712 1713 if (r) 1714 return r; 1715 1716 *data = msr.data; 1717 1718 return 0; 1719 } 1720 1721 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1722 { 1723 if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS)) 1724 return false; 1725 1726 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) 1727 return false; 1728 1729 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 1730 return false; 1731 1732 if (efer & (EFER_LME | EFER_LMA) && 1733 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) 1734 return false; 1735 1736 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) 1737 return false; 1738 1739 return true; 1740 1741 } 1742 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) 1743 { 1744 if (efer & efer_reserved_bits) 1745 return false; 1746 1747 return __kvm_valid_efer(vcpu, efer); 1748 } 1749 EXPORT_SYMBOL_GPL(kvm_valid_efer); 1750 1751 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1752 { 1753 u64 old_efer = vcpu->arch.efer; 1754 u64 efer = msr_info->data; 1755 int r; 1756 1757 if (efer & efer_reserved_bits) 1758 return 1; 1759 1760 if (!msr_info->host_initiated) { 1761 if (!__kvm_valid_efer(vcpu, efer)) 1762 return 1; 1763 1764 if (is_paging(vcpu) && 1765 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) 1766 return 1; 1767 } 1768 1769 efer &= ~EFER_LMA; 1770 efer |= vcpu->arch.efer & EFER_LMA; 1771 1772 r = static_call(kvm_x86_set_efer)(vcpu, efer); 1773 if (r) { 1774 WARN_ON(r > 0); 1775 return r; 1776 } 1777 1778 if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS) 1779 kvm_mmu_reset_context(vcpu); 1780 1781 return 0; 1782 } 1783 1784 void kvm_enable_efer_bits(u64 mask) 1785 { 1786 efer_reserved_bits &= ~mask; 1787 } 1788 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1789 1790 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1791 { 1792 struct kvm_x86_msr_filter *msr_filter; 1793 struct msr_bitmap_range *ranges; 1794 struct kvm *kvm = vcpu->kvm; 1795 bool allowed; 1796 int idx; 1797 u32 i; 1798 1799 /* x2APIC MSRs do not support filtering. */ 1800 if (index >= 0x800 && index <= 0x8ff) 1801 return true; 1802 1803 idx = srcu_read_lock(&kvm->srcu); 1804 1805 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); 1806 if (!msr_filter) { 1807 allowed = true; 1808 goto out; 1809 } 1810 1811 allowed = msr_filter->default_allow; 1812 ranges = msr_filter->ranges; 1813 1814 for (i = 0; i < msr_filter->count; i++) { 1815 u32 start = ranges[i].base; 1816 u32 end = start + ranges[i].nmsrs; 1817 u32 flags = ranges[i].flags; 1818 unsigned long *bitmap = ranges[i].bitmap; 1819 1820 if ((index >= start) && (index < end) && (flags & type)) { 1821 allowed = test_bit(index - start, bitmap); 1822 break; 1823 } 1824 } 1825 1826 out: 1827 srcu_read_unlock(&kvm->srcu, idx); 1828 1829 return allowed; 1830 } 1831 EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1832 1833 /* 1834 * Write @data into the MSR specified by @index. Select MSR specific fault 1835 * checks are bypassed if @host_initiated is %true. 1836 * Returns 0 on success, non-0 otherwise. 1837 * Assumes vcpu_load() was already called. 1838 */ 1839 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, 1840 bool host_initiated) 1841 { 1842 struct msr_data msr; 1843 1844 switch (index) { 1845 case MSR_FS_BASE: 1846 case MSR_GS_BASE: 1847 case MSR_KERNEL_GS_BASE: 1848 case MSR_CSTAR: 1849 case MSR_LSTAR: 1850 if (is_noncanonical_address(data, vcpu)) 1851 return 1; 1852 break; 1853 case MSR_IA32_SYSENTER_EIP: 1854 case MSR_IA32_SYSENTER_ESP: 1855 /* 1856 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if 1857 * non-canonical address is written on Intel but not on 1858 * AMD (which ignores the top 32-bits, because it does 1859 * not implement 64-bit SYSENTER). 1860 * 1861 * 64-bit code should hence be able to write a non-canonical 1862 * value on AMD. Making the address canonical ensures that 1863 * vmentry does not fail on Intel after writing a non-canonical 1864 * value, and that something deterministic happens if the guest 1865 * invokes 64-bit SYSENTER. 1866 */ 1867 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); 1868 break; 1869 case MSR_TSC_AUX: 1870 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1871 return 1; 1872 1873 if (!host_initiated && 1874 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1875 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1876 return 1; 1877 1878 /* 1879 * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has 1880 * incomplete and conflicting architectural behavior. Current 1881 * AMD CPUs completely ignore bits 63:32, i.e. they aren't 1882 * reserved and always read as zeros. Enforce Intel's reserved 1883 * bits check if and only if the guest CPU is Intel, and clear 1884 * the bits in all other cases. This ensures cross-vendor 1885 * migration will provide consistent behavior for the guest. 1886 */ 1887 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) 1888 return 1; 1889 1890 data = (u32)data; 1891 break; 1892 } 1893 1894 msr.data = data; 1895 msr.index = index; 1896 msr.host_initiated = host_initiated; 1897 1898 return static_call(kvm_x86_set_msr)(vcpu, &msr); 1899 } 1900 1901 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, 1902 u32 index, u64 data, bool host_initiated) 1903 { 1904 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1905 1906 if (ret == KVM_MSR_RET_INVALID) 1907 if (kvm_msr_ignored_check(index, data, true)) 1908 ret = 0; 1909 1910 return ret; 1911 } 1912 1913 /* 1914 * Read the MSR specified by @index into @data. Select MSR specific fault 1915 * checks are bypassed if @host_initiated is %true. 1916 * Returns 0 on success, non-0 otherwise. 1917 * Assumes vcpu_load() was already called. 1918 */ 1919 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, 1920 bool host_initiated) 1921 { 1922 struct msr_data msr; 1923 int ret; 1924 1925 switch (index) { 1926 case MSR_TSC_AUX: 1927 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) 1928 return 1; 1929 1930 if (!host_initiated && 1931 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && 1932 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) 1933 return 1; 1934 break; 1935 } 1936 1937 msr.index = index; 1938 msr.host_initiated = host_initiated; 1939 1940 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1941 if (!ret) 1942 *data = msr.data; 1943 return ret; 1944 } 1945 1946 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, 1947 u32 index, u64 *data, bool host_initiated) 1948 { 1949 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); 1950 1951 if (ret == KVM_MSR_RET_INVALID) { 1952 /* Unconditionally clear *data for simplicity */ 1953 *data = 0; 1954 if (kvm_msr_ignored_check(index, 0, false)) 1955 ret = 0; 1956 } 1957 1958 return ret; 1959 } 1960 1961 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1962 { 1963 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) 1964 return KVM_MSR_RET_FILTERED; 1965 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1966 } 1967 1968 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) 1969 { 1970 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) 1971 return KVM_MSR_RET_FILTERED; 1972 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1973 } 1974 1975 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) 1976 { 1977 return kvm_get_msr_ignored_check(vcpu, index, data, false); 1978 } 1979 EXPORT_SYMBOL_GPL(kvm_get_msr); 1980 1981 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) 1982 { 1983 return kvm_set_msr_ignored_check(vcpu, index, data, false); 1984 } 1985 EXPORT_SYMBOL_GPL(kvm_set_msr); 1986 1987 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) 1988 { 1989 if (!vcpu->run->msr.error) { 1990 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); 1991 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1992 } 1993 } 1994 1995 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) 1996 { 1997 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); 1998 } 1999 2000 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) 2001 { 2002 complete_userspace_rdmsr(vcpu); 2003 return complete_emulated_msr_access(vcpu); 2004 } 2005 2006 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) 2007 { 2008 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 2009 } 2010 2011 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) 2012 { 2013 complete_userspace_rdmsr(vcpu); 2014 return complete_fast_msr_access(vcpu); 2015 } 2016 2017 static u64 kvm_msr_reason(int r) 2018 { 2019 switch (r) { 2020 case KVM_MSR_RET_INVALID: 2021 return KVM_MSR_EXIT_REASON_UNKNOWN; 2022 case KVM_MSR_RET_FILTERED: 2023 return KVM_MSR_EXIT_REASON_FILTER; 2024 default: 2025 return KVM_MSR_EXIT_REASON_INVAL; 2026 } 2027 } 2028 2029 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, 2030 u32 exit_reason, u64 data, 2031 int (*completion)(struct kvm_vcpu *vcpu), 2032 int r) 2033 { 2034 u64 msr_reason = kvm_msr_reason(r); 2035 2036 /* Check if the user wanted to know about this MSR fault */ 2037 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) 2038 return 0; 2039 2040 vcpu->run->exit_reason = exit_reason; 2041 vcpu->run->msr.error = 0; 2042 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); 2043 vcpu->run->msr.reason = msr_reason; 2044 vcpu->run->msr.index = index; 2045 vcpu->run->msr.data = data; 2046 vcpu->arch.complete_userspace_io = completion; 2047 2048 return 1; 2049 } 2050 2051 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) 2052 { 2053 u32 ecx = kvm_rcx_read(vcpu); 2054 u64 data; 2055 int r; 2056 2057 r = kvm_get_msr_with_filter(vcpu, ecx, &data); 2058 2059 if (!r) { 2060 trace_kvm_msr_read(ecx, data); 2061 2062 kvm_rax_write(vcpu, data & -1u); 2063 kvm_rdx_write(vcpu, (data >> 32) & -1u); 2064 } else { 2065 /* MSR read failed? See if we should ask user space */ 2066 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, 2067 complete_fast_rdmsr, r)) 2068 return 0; 2069 trace_kvm_msr_read_ex(ecx); 2070 } 2071 2072 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2073 } 2074 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2075 2076 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) 2077 { 2078 u32 ecx = kvm_rcx_read(vcpu); 2079 u64 data = kvm_read_edx_eax(vcpu); 2080 int r; 2081 2082 r = kvm_set_msr_with_filter(vcpu, ecx, data); 2083 2084 if (!r) { 2085 trace_kvm_msr_write(ecx, data); 2086 } else { 2087 /* MSR write failed? See if we should ask user space */ 2088 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, 2089 complete_fast_msr_access, r)) 2090 return 0; 2091 /* Signal all other negative errors to userspace */ 2092 if (r < 0) 2093 return r; 2094 trace_kvm_msr_write_ex(ecx, data); 2095 } 2096 2097 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 2098 } 2099 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2100 2101 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2102 { 2103 return kvm_skip_emulated_instruction(vcpu); 2104 } 2105 2106 int kvm_emulate_invd(struct kvm_vcpu *vcpu) 2107 { 2108 /* Treat an INVD instruction as a NOP and just skip it. */ 2109 return kvm_emulate_as_nop(vcpu); 2110 } 2111 EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2112 2113 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2114 { 2115 kvm_queue_exception(vcpu, UD_VECTOR); 2116 return 1; 2117 } 2118 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2119 2120 2121 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) 2122 { 2123 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && 2124 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2125 return kvm_handle_invalid_op(vcpu); 2126 2127 pr_warn_once("%s instruction emulated as NOP!\n", insn); 2128 return kvm_emulate_as_nop(vcpu); 2129 } 2130 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) 2131 { 2132 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2133 } 2134 EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2135 2136 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2137 { 2138 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2139 } 2140 EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2141 2142 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2143 { 2144 xfer_to_guest_mode_prepare(); 2145 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || 2146 xfer_to_guest_mode_work_pending(); 2147 } 2148 2149 /* 2150 * The fast path for frequent and performance sensitive wrmsr emulation, 2151 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces 2152 * the latency of virtual IPI by avoiding the expensive bits of transitioning 2153 * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the 2154 * other cases which must be called after interrupts are enabled on the host. 2155 */ 2156 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) 2157 { 2158 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) 2159 return 1; 2160 2161 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && 2162 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && 2163 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) && 2164 ((u32)(data >> 32) != X2APIC_BROADCAST)) 2165 return kvm_x2apic_icr_write(vcpu->arch.apic, data); 2166 2167 return 1; 2168 } 2169 2170 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) 2171 { 2172 if (!kvm_can_use_hv_timer(vcpu)) 2173 return 1; 2174 2175 kvm_set_lapic_tscdeadline_msr(vcpu, data); 2176 return 0; 2177 } 2178 2179 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) 2180 { 2181 u32 msr = kvm_rcx_read(vcpu); 2182 u64 data; 2183 fastpath_t ret = EXIT_FASTPATH_NONE; 2184 2185 kvm_vcpu_srcu_read_lock(vcpu); 2186 2187 switch (msr) { 2188 case APIC_BASE_MSR + (APIC_ICR >> 4): 2189 data = kvm_read_edx_eax(vcpu); 2190 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { 2191 kvm_skip_emulated_instruction(vcpu); 2192 ret = EXIT_FASTPATH_EXIT_HANDLED; 2193 } 2194 break; 2195 case MSR_IA32_TSC_DEADLINE: 2196 data = kvm_read_edx_eax(vcpu); 2197 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { 2198 kvm_skip_emulated_instruction(vcpu); 2199 ret = EXIT_FASTPATH_REENTER_GUEST; 2200 } 2201 break; 2202 default: 2203 break; 2204 } 2205 2206 if (ret != EXIT_FASTPATH_NONE) 2207 trace_kvm_msr_write(msr, data); 2208 2209 kvm_vcpu_srcu_read_unlock(vcpu); 2210 2211 return ret; 2212 } 2213 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); 2214 2215 /* 2216 * Adapt set_msr() to msr_io()'s calling convention 2217 */ 2218 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2219 { 2220 return kvm_get_msr_ignored_check(vcpu, index, data, true); 2221 } 2222 2223 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 2224 { 2225 u64 val; 2226 2227 /* 2228 * Disallow writes to immutable feature MSRs after KVM_RUN. KVM does 2229 * not support modifying the guest vCPU model on the fly, e.g. changing 2230 * the nVMX capabilities while L2 is running is nonsensical. Ignore 2231 * writes of the same value, e.g. to allow userspace to blindly stuff 2232 * all MSRs when emulating RESET. 2233 */ 2234 if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) { 2235 if (do_get_msr(vcpu, index, &val) || *data != val) 2236 return -EINVAL; 2237 2238 return 0; 2239 } 2240 2241 return kvm_set_msr_ignored_check(vcpu, index, *data, true); 2242 } 2243 2244 #ifdef CONFIG_X86_64 2245 struct pvclock_clock { 2246 int vclock_mode; 2247 u64 cycle_last; 2248 u64 mask; 2249 u32 mult; 2250 u32 shift; 2251 u64 base_cycles; 2252 u64 offset; 2253 }; 2254 2255 struct pvclock_gtod_data { 2256 seqcount_t seq; 2257 2258 struct pvclock_clock clock; /* extract of a clocksource struct */ 2259 struct pvclock_clock raw_clock; /* extract of a clocksource struct */ 2260 2261 ktime_t offs_boot; 2262 u64 wall_time_sec; 2263 }; 2264 2265 static struct pvclock_gtod_data pvclock_gtod_data; 2266 2267 static void update_pvclock_gtod(struct timekeeper *tk) 2268 { 2269 struct pvclock_gtod_data *vdata = &pvclock_gtod_data; 2270 2271 write_seqcount_begin(&vdata->seq); 2272 2273 /* copy pvclock gtod data */ 2274 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; 2275 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 2276 vdata->clock.mask = tk->tkr_mono.mask; 2277 vdata->clock.mult = tk->tkr_mono.mult; 2278 vdata->clock.shift = tk->tkr_mono.shift; 2279 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; 2280 vdata->clock.offset = tk->tkr_mono.base; 2281 2282 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; 2283 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; 2284 vdata->raw_clock.mask = tk->tkr_raw.mask; 2285 vdata->raw_clock.mult = tk->tkr_raw.mult; 2286 vdata->raw_clock.shift = tk->tkr_raw.shift; 2287 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; 2288 vdata->raw_clock.offset = tk->tkr_raw.base; 2289 2290 vdata->wall_time_sec = tk->xtime_sec; 2291 2292 vdata->offs_boot = tk->offs_boot; 2293 2294 write_seqcount_end(&vdata->seq); 2295 } 2296 2297 static s64 get_kvmclock_base_ns(void) 2298 { 2299 /* Count up from boot time, but with the frequency of the raw clock. */ 2300 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot)); 2301 } 2302 #else 2303 static s64 get_kvmclock_base_ns(void) 2304 { 2305 /* Master clock not used, so we can just use CLOCK_BOOTTIME. */ 2306 return ktime_get_boottime_ns(); 2307 } 2308 #endif 2309 2310 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs) 2311 { 2312 int version; 2313 int r; 2314 struct pvclock_wall_clock wc; 2315 u32 wc_sec_hi; 2316 u64 wall_nsec; 2317 2318 if (!wall_clock) 2319 return; 2320 2321 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); 2322 if (r) 2323 return; 2324 2325 if (version & 1) 2326 ++version; /* first time write, random junk */ 2327 2328 ++version; 2329 2330 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version))) 2331 return; 2332 2333 /* 2334 * The guest calculates current wall clock time by adding 2335 * system time (updated by kvm_guest_time_update below) to the 2336 * wall clock specified here. We do the reverse here. 2337 */ 2338 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); 2339 2340 wc.nsec = do_div(wall_nsec, 1000000000); 2341 wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */ 2342 wc.version = version; 2343 2344 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); 2345 2346 if (sec_hi_ofs) { 2347 wc_sec_hi = wall_nsec >> 32; 2348 kvm_write_guest(kvm, wall_clock + sec_hi_ofs, 2349 &wc_sec_hi, sizeof(wc_sec_hi)); 2350 } 2351 2352 version++; 2353 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 2354 } 2355 2356 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, 2357 bool old_msr, bool host_initiated) 2358 { 2359 struct kvm_arch *ka = &vcpu->kvm->arch; 2360 2361 if (vcpu->vcpu_id == 0 && !host_initiated) { 2362 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) 2363 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2364 2365 ka->boot_vcpu_runs_old_kvmclock = old_msr; 2366 } 2367 2368 vcpu->arch.time = system_time; 2369 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 2370 2371 /* we verify if the enable bit is set... */ 2372 if (system_time & 1) 2373 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, 2374 sizeof(struct pvclock_vcpu_time_info)); 2375 else 2376 kvm_gpc_deactivate(&vcpu->arch.pv_time); 2377 2378 return; 2379 } 2380 2381 static uint32_t div_frac(uint32_t dividend, uint32_t divisor) 2382 { 2383 do_shl32_div32(dividend, divisor); 2384 return dividend; 2385 } 2386 2387 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz, 2388 s8 *pshift, u32 *pmultiplier) 2389 { 2390 uint64_t scaled64; 2391 int32_t shift = 0; 2392 uint64_t tps64; 2393 uint32_t tps32; 2394 2395 tps64 = base_hz; 2396 scaled64 = scaled_hz; 2397 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { 2398 tps64 >>= 1; 2399 shift--; 2400 } 2401 2402 tps32 = (uint32_t)tps64; 2403 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { 2404 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) 2405 scaled64 >>= 1; 2406 else 2407 tps32 <<= 1; 2408 shift++; 2409 } 2410 2411 *pshift = shift; 2412 *pmultiplier = div_frac(scaled64, tps32); 2413 } 2414 2415 #ifdef CONFIG_X86_64 2416 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); 2417 #endif 2418 2419 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 2420 static unsigned long max_tsc_khz; 2421 2422 static u32 adjust_tsc_khz(u32 khz, s32 ppm) 2423 { 2424 u64 v = (u64)khz * (1000000 + ppm); 2425 do_div(v, 1000000); 2426 return v; 2427 } 2428 2429 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier); 2430 2431 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) 2432 { 2433 u64 ratio; 2434 2435 /* Guest TSC same frequency as host TSC? */ 2436 if (!scale) { 2437 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2438 return 0; 2439 } 2440 2441 /* TSC scaling supported? */ 2442 if (!kvm_caps.has_tsc_control) { 2443 if (user_tsc_khz > tsc_khz) { 2444 vcpu->arch.tsc_catchup = 1; 2445 vcpu->arch.tsc_always_catchup = 1; 2446 return 0; 2447 } else { 2448 pr_warn_ratelimited("user requested TSC rate below hardware speed\n"); 2449 return -1; 2450 } 2451 } 2452 2453 /* TSC scaling required - calculate ratio */ 2454 ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits, 2455 user_tsc_khz, tsc_khz); 2456 2457 if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) { 2458 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 2459 user_tsc_khz); 2460 return -1; 2461 } 2462 2463 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); 2464 return 0; 2465 } 2466 2467 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) 2468 { 2469 u32 thresh_lo, thresh_hi; 2470 int use_scaling = 0; 2471 2472 /* tsc_khz can be zero if TSC calibration fails */ 2473 if (user_tsc_khz == 0) { 2474 /* set tsc_scaling_ratio to a safe value */ 2475 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); 2476 return -1; 2477 } 2478 2479 /* Compute a scale to convert nanoseconds in TSC cycles */ 2480 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, 2481 &vcpu->arch.virtual_tsc_shift, 2482 &vcpu->arch.virtual_tsc_mult); 2483 vcpu->arch.virtual_tsc_khz = user_tsc_khz; 2484 2485 /* 2486 * Compute the variation in TSC rate which is acceptable 2487 * within the range of tolerance and decide if the 2488 * rate being applied is within that bounds of the hardware 2489 * rate. If so, no scaling or compensation need be done. 2490 */ 2491 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2492 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2493 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2494 pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n", 2495 user_tsc_khz, thresh_lo, thresh_hi); 2496 use_scaling = 1; 2497 } 2498 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); 2499 } 2500 2501 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 2502 { 2503 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, 2504 vcpu->arch.virtual_tsc_mult, 2505 vcpu->arch.virtual_tsc_shift); 2506 tsc += vcpu->arch.this_tsc_write; 2507 return tsc; 2508 } 2509 2510 #ifdef CONFIG_X86_64 2511 static inline int gtod_is_based_on_tsc(int mode) 2512 { 2513 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; 2514 } 2515 #endif 2516 2517 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) 2518 { 2519 #ifdef CONFIG_X86_64 2520 bool vcpus_matched; 2521 struct kvm_arch *ka = &vcpu->kvm->arch; 2522 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2523 2524 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2525 atomic_read(&vcpu->kvm->online_vcpus)); 2526 2527 /* 2528 * Once the masterclock is enabled, always perform request in 2529 * order to update it. 2530 * 2531 * In order to enable masterclock, the host clocksource must be TSC 2532 * and the vcpus need to have matched TSCs. When that happens, 2533 * perform request to enable masterclock. 2534 */ 2535 if (ka->use_master_clock || 2536 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) 2537 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 2538 2539 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, 2540 atomic_read(&vcpu->kvm->online_vcpus), 2541 ka->use_master_clock, gtod->clock.vclock_mode); 2542 #endif 2543 } 2544 2545 /* 2546 * Multiply tsc by a fixed point number represented by ratio. 2547 * 2548 * The most significant 64-N bits (mult) of ratio represent the 2549 * integral part of the fixed point number; the remaining N bits 2550 * (frac) represent the fractional part, ie. ratio represents a fixed 2551 * point number (mult + frac * 2^(-N)). 2552 * 2553 * N equals to kvm_caps.tsc_scaling_ratio_frac_bits. 2554 */ 2555 static inline u64 __scale_tsc(u64 ratio, u64 tsc) 2556 { 2557 return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits); 2558 } 2559 2560 u64 kvm_scale_tsc(u64 tsc, u64 ratio) 2561 { 2562 u64 _tsc = tsc; 2563 2564 if (ratio != kvm_caps.default_tsc_scaling_ratio) 2565 _tsc = __scale_tsc(ratio, tsc); 2566 2567 return _tsc; 2568 } 2569 2570 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) 2571 { 2572 u64 tsc; 2573 2574 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); 2575 2576 return target_tsc - tsc; 2577 } 2578 2579 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2580 { 2581 return vcpu->arch.l1_tsc_offset + 2582 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2583 } 2584 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2585 2586 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2587 { 2588 u64 nested_offset; 2589 2590 if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio) 2591 nested_offset = l1_offset; 2592 else 2593 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier, 2594 kvm_caps.tsc_scaling_ratio_frac_bits); 2595 2596 nested_offset += l2_offset; 2597 return nested_offset; 2598 } 2599 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2600 2601 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2602 { 2603 if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio) 2604 return mul_u64_u64_shr(l1_multiplier, l2_multiplier, 2605 kvm_caps.tsc_scaling_ratio_frac_bits); 2606 2607 return l1_multiplier; 2608 } 2609 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2610 2611 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2612 { 2613 trace_kvm_write_tsc_offset(vcpu->vcpu_id, 2614 vcpu->arch.l1_tsc_offset, 2615 l1_offset); 2616 2617 vcpu->arch.l1_tsc_offset = l1_offset; 2618 2619 /* 2620 * If we are here because L1 chose not to trap WRMSR to TSC then 2621 * according to the spec this should set L1's TSC (as opposed to 2622 * setting L1's offset for L2). 2623 */ 2624 if (is_guest_mode(vcpu)) 2625 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 2626 l1_offset, 2627 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), 2628 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2629 else 2630 vcpu->arch.tsc_offset = l1_offset; 2631 2632 static_call(kvm_x86_write_tsc_offset)(vcpu); 2633 } 2634 2635 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) 2636 { 2637 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; 2638 2639 /* Userspace is changing the multiplier while L2 is active */ 2640 if (is_guest_mode(vcpu)) 2641 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( 2642 l1_multiplier, 2643 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); 2644 else 2645 vcpu->arch.tsc_scaling_ratio = l1_multiplier; 2646 2647 if (kvm_caps.has_tsc_control) 2648 static_call(kvm_x86_write_tsc_multiplier)(vcpu); 2649 } 2650 2651 static inline bool kvm_check_tsc_unstable(void) 2652 { 2653 #ifdef CONFIG_X86_64 2654 /* 2655 * TSC is marked unstable when we're running on Hyper-V, 2656 * 'TSC page' clocksource is good. 2657 */ 2658 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK) 2659 return false; 2660 #endif 2661 return check_tsc_unstable(); 2662 } 2663 2664 /* 2665 * Infers attempts to synchronize the guest's tsc from host writes. Sets the 2666 * offset for the vcpu and tracks the TSC matching generation that the vcpu 2667 * participates in. 2668 */ 2669 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, 2670 u64 ns, bool matched) 2671 { 2672 struct kvm *kvm = vcpu->kvm; 2673 2674 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2675 2676 /* 2677 * We also track th most recent recorded KHZ, write and time to 2678 * allow the matching interval to be extended at each write. 2679 */ 2680 kvm->arch.last_tsc_nsec = ns; 2681 kvm->arch.last_tsc_write = tsc; 2682 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 2683 kvm->arch.last_tsc_offset = offset; 2684 2685 vcpu->arch.last_guest_tsc = tsc; 2686 2687 kvm_vcpu_write_tsc_offset(vcpu, offset); 2688 2689 if (!matched) { 2690 /* 2691 * We split periods of matched TSC writes into generations. 2692 * For each generation, we track the original measured 2693 * nanosecond time, offset, and write, so if TSCs are in 2694 * sync, we can match exact offset, and if not, we can match 2695 * exact software computation in compute_guest_tsc() 2696 * 2697 * These values are tracked in kvm->arch.cur_xxx variables. 2698 */ 2699 kvm->arch.cur_tsc_generation++; 2700 kvm->arch.cur_tsc_nsec = ns; 2701 kvm->arch.cur_tsc_write = tsc; 2702 kvm->arch.cur_tsc_offset = offset; 2703 kvm->arch.nr_vcpus_matched_tsc = 0; 2704 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { 2705 kvm->arch.nr_vcpus_matched_tsc++; 2706 } 2707 2708 /* Keep track of which generation this VCPU has synchronized to */ 2709 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; 2710 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 2711 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 2712 2713 kvm_track_tsc_matching(vcpu); 2714 } 2715 2716 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) 2717 { 2718 struct kvm *kvm = vcpu->kvm; 2719 u64 offset, ns, elapsed; 2720 unsigned long flags; 2721 bool matched = false; 2722 bool synchronizing = false; 2723 2724 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 2725 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2726 ns = get_kvmclock_base_ns(); 2727 elapsed = ns - kvm->arch.last_tsc_nsec; 2728 2729 if (vcpu->arch.virtual_tsc_khz) { 2730 if (data == 0) { 2731 /* 2732 * detection of vcpu initialization -- need to sync 2733 * with other vCPUs. This particularly helps to keep 2734 * kvm_clock stable after CPU hotplug 2735 */ 2736 synchronizing = true; 2737 } else { 2738 u64 tsc_exp = kvm->arch.last_tsc_write + 2739 nsec_to_cycles(vcpu, elapsed); 2740 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; 2741 /* 2742 * Special case: TSC write with a small delta (1 second) 2743 * of virtual cycle time against real time is 2744 * interpreted as an attempt to synchronize the CPU. 2745 */ 2746 synchronizing = data < tsc_exp + tsc_hz && 2747 data + tsc_hz > tsc_exp; 2748 } 2749 } 2750 2751 /* 2752 * For a reliable TSC, we can match TSC offsets, and for an unstable 2753 * TSC, we add elapsed time in this computation. We could let the 2754 * compensation code attempt to catch up if we fall behind, but 2755 * it's better to try to match offsets from the beginning. 2756 */ 2757 if (synchronizing && 2758 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 2759 if (!kvm_check_tsc_unstable()) { 2760 offset = kvm->arch.cur_tsc_offset; 2761 } else { 2762 u64 delta = nsec_to_cycles(vcpu, elapsed); 2763 data += delta; 2764 offset = kvm_compute_l1_tsc_offset(vcpu, data); 2765 } 2766 matched = true; 2767 } 2768 2769 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); 2770 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2771 } 2772 2773 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, 2774 s64 adjustment) 2775 { 2776 u64 tsc_offset = vcpu->arch.l1_tsc_offset; 2777 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); 2778 } 2779 2780 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) 2781 { 2782 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) 2783 WARN_ON(adjustment < 0); 2784 adjustment = kvm_scale_tsc((u64) adjustment, 2785 vcpu->arch.l1_tsc_scaling_ratio); 2786 adjust_tsc_offset_guest(vcpu, adjustment); 2787 } 2788 2789 #ifdef CONFIG_X86_64 2790 2791 static u64 read_tsc(void) 2792 { 2793 u64 ret = (u64)rdtsc_ordered(); 2794 u64 last = pvclock_gtod_data.clock.cycle_last; 2795 2796 if (likely(ret >= last)) 2797 return ret; 2798 2799 /* 2800 * GCC likes to generate cmov here, but this branch is extremely 2801 * predictable (it's just a function of time and the likely is 2802 * very likely) and there's a data dependence, so force GCC 2803 * to generate a branch instead. I don't barrier() because 2804 * we don't actually need a barrier, and if this function 2805 * ever gets inlined it will generate worse code. 2806 */ 2807 asm volatile (""); 2808 return last; 2809 } 2810 2811 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp, 2812 int *mode) 2813 { 2814 u64 tsc_pg_val; 2815 long v; 2816 2817 switch (clock->vclock_mode) { 2818 case VDSO_CLOCKMODE_HVCLOCK: 2819 if (hv_read_tsc_page_tsc(hv_get_tsc_page(), 2820 tsc_timestamp, &tsc_pg_val)) { 2821 /* TSC page valid */ 2822 *mode = VDSO_CLOCKMODE_HVCLOCK; 2823 v = (tsc_pg_val - clock->cycle_last) & 2824 clock->mask; 2825 } else { 2826 /* TSC page invalid */ 2827 *mode = VDSO_CLOCKMODE_NONE; 2828 } 2829 break; 2830 case VDSO_CLOCKMODE_TSC: 2831 *mode = VDSO_CLOCKMODE_TSC; 2832 *tsc_timestamp = read_tsc(); 2833 v = (*tsc_timestamp - clock->cycle_last) & 2834 clock->mask; 2835 break; 2836 default: 2837 *mode = VDSO_CLOCKMODE_NONE; 2838 } 2839 2840 if (*mode == VDSO_CLOCKMODE_NONE) 2841 *tsc_timestamp = v = 0; 2842 2843 return v * clock->mult; 2844 } 2845 2846 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp) 2847 { 2848 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2849 unsigned long seq; 2850 int mode; 2851 u64 ns; 2852 2853 do { 2854 seq = read_seqcount_begin(>od->seq); 2855 ns = gtod->raw_clock.base_cycles; 2856 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); 2857 ns >>= gtod->raw_clock.shift; 2858 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); 2859 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2860 *t = ns; 2861 2862 return mode; 2863 } 2864 2865 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) 2866 { 2867 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 2868 unsigned long seq; 2869 int mode; 2870 u64 ns; 2871 2872 do { 2873 seq = read_seqcount_begin(>od->seq); 2874 ts->tv_sec = gtod->wall_time_sec; 2875 ns = gtod->clock.base_cycles; 2876 ns += vgettsc(>od->clock, tsc_timestamp, &mode); 2877 ns >>= gtod->clock.shift; 2878 } while (unlikely(read_seqcount_retry(>od->seq, seq))); 2879 2880 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 2881 ts->tv_nsec = ns; 2882 2883 return mode; 2884 } 2885 2886 /* returns true if host is using TSC based clocksource */ 2887 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) 2888 { 2889 /* checked again under seqlock below */ 2890 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2891 return false; 2892 2893 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns, 2894 tsc_timestamp)); 2895 } 2896 2897 /* returns true if host is using TSC based clocksource */ 2898 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, 2899 u64 *tsc_timestamp) 2900 { 2901 /* checked again under seqlock below */ 2902 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) 2903 return false; 2904 2905 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); 2906 } 2907 #endif 2908 2909 /* 2910 * 2911 * Assuming a stable TSC across physical CPUS, and a stable TSC 2912 * across virtual CPUs, the following condition is possible. 2913 * Each numbered line represents an event visible to both 2914 * CPUs at the next numbered event. 2915 * 2916 * "timespecX" represents host monotonic time. "tscX" represents 2917 * RDTSC value. 2918 * 2919 * VCPU0 on CPU0 | VCPU1 on CPU1 2920 * 2921 * 1. read timespec0,tsc0 2922 * 2. | timespec1 = timespec0 + N 2923 * | tsc1 = tsc0 + M 2924 * 3. transition to guest | transition to guest 2925 * 4. ret0 = timespec0 + (rdtsc - tsc0) | 2926 * 5. | ret1 = timespec1 + (rdtsc - tsc1) 2927 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) 2928 * 2929 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: 2930 * 2931 * - ret0 < ret1 2932 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) 2933 * ... 2934 * - 0 < N - M => M < N 2935 * 2936 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not 2937 * always the case (the difference between two distinct xtime instances 2938 * might be smaller then the difference between corresponding TSC reads, 2939 * when updating guest vcpus pvclock areas). 2940 * 2941 * To avoid that problem, do not allow visibility of distinct 2942 * system_timestamp/tsc_timestamp values simultaneously: use a master 2943 * copy of host monotonic time values. Update that master copy 2944 * in lockstep. 2945 * 2946 * Rely on synchronization of host TSCs and guest TSCs for monotonicity. 2947 * 2948 */ 2949 2950 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) 2951 { 2952 #ifdef CONFIG_X86_64 2953 struct kvm_arch *ka = &kvm->arch; 2954 int vclock_mode; 2955 bool host_tsc_clocksource, vcpus_matched; 2956 2957 lockdep_assert_held(&kvm->arch.tsc_write_lock); 2958 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == 2959 atomic_read(&kvm->online_vcpus)); 2960 2961 /* 2962 * If the host uses TSC clock, then passthrough TSC as stable 2963 * to the guest. 2964 */ 2965 host_tsc_clocksource = kvm_get_time_and_clockread( 2966 &ka->master_kernel_ns, 2967 &ka->master_cycle_now); 2968 2969 ka->use_master_clock = host_tsc_clocksource && vcpus_matched 2970 && !ka->backwards_tsc_observed 2971 && !ka->boot_vcpu_runs_old_kvmclock; 2972 2973 if (ka->use_master_clock) 2974 atomic_set(&kvm_guest_has_master_clock, 1); 2975 2976 vclock_mode = pvclock_gtod_data.clock.vclock_mode; 2977 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, 2978 vcpus_matched); 2979 #endif 2980 } 2981 2982 static void kvm_make_mclock_inprogress_request(struct kvm *kvm) 2983 { 2984 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 2985 } 2986 2987 static void __kvm_start_pvclock_update(struct kvm *kvm) 2988 { 2989 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); 2990 write_seqcount_begin(&kvm->arch.pvclock_sc); 2991 } 2992 2993 static void kvm_start_pvclock_update(struct kvm *kvm) 2994 { 2995 kvm_make_mclock_inprogress_request(kvm); 2996 2997 /* no guest entries from this point */ 2998 __kvm_start_pvclock_update(kvm); 2999 } 3000 3001 static void kvm_end_pvclock_update(struct kvm *kvm) 3002 { 3003 struct kvm_arch *ka = &kvm->arch; 3004 struct kvm_vcpu *vcpu; 3005 unsigned long i; 3006 3007 write_seqcount_end(&ka->pvclock_sc); 3008 raw_spin_unlock_irq(&ka->tsc_write_lock); 3009 kvm_for_each_vcpu(i, vcpu, kvm) 3010 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3011 3012 /* guest entries allowed */ 3013 kvm_for_each_vcpu(i, vcpu, kvm) 3014 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 3015 } 3016 3017 static void kvm_update_masterclock(struct kvm *kvm) 3018 { 3019 kvm_hv_request_tsc_page_update(kvm); 3020 kvm_start_pvclock_update(kvm); 3021 pvclock_update_vm_gtod_copy(kvm); 3022 kvm_end_pvclock_update(kvm); 3023 } 3024 3025 /* 3026 * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's 3027 * per-CPU value (which may be zero if a CPU is going offline). Note, tsc_khz 3028 * can change during boot even if the TSC is constant, as it's possible for KVM 3029 * to be loaded before TSC calibration completes. Ideally, KVM would get a 3030 * notification when calibration completes, but practically speaking calibration 3031 * will complete before userspace is alive enough to create VMs. 3032 */ 3033 static unsigned long get_cpu_tsc_khz(void) 3034 { 3035 if (static_cpu_has(X86_FEATURE_CONSTANT_TSC)) 3036 return tsc_khz; 3037 else 3038 return __this_cpu_read(cpu_tsc_khz); 3039 } 3040 3041 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */ 3042 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 3043 { 3044 struct kvm_arch *ka = &kvm->arch; 3045 struct pvclock_vcpu_time_info hv_clock; 3046 3047 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 3048 get_cpu(); 3049 3050 data->flags = 0; 3051 if (ka->use_master_clock && 3052 (static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) { 3053 #ifdef CONFIG_X86_64 3054 struct timespec64 ts; 3055 3056 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) { 3057 data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec; 3058 data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC; 3059 } else 3060 #endif 3061 data->host_tsc = rdtsc(); 3062 3063 data->flags |= KVM_CLOCK_TSC_STABLE; 3064 hv_clock.tsc_timestamp = ka->master_cycle_now; 3065 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 3066 kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL, 3067 &hv_clock.tsc_shift, 3068 &hv_clock.tsc_to_system_mul); 3069 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc); 3070 } else { 3071 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset; 3072 } 3073 3074 put_cpu(); 3075 } 3076 3077 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data) 3078 { 3079 struct kvm_arch *ka = &kvm->arch; 3080 unsigned seq; 3081 3082 do { 3083 seq = read_seqcount_begin(&ka->pvclock_sc); 3084 __get_kvmclock(kvm, data); 3085 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3086 } 3087 3088 u64 get_kvmclock_ns(struct kvm *kvm) 3089 { 3090 struct kvm_clock_data data; 3091 3092 get_kvmclock(kvm, &data); 3093 return data.clock; 3094 } 3095 3096 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, 3097 struct gfn_to_pfn_cache *gpc, 3098 unsigned int offset) 3099 { 3100 struct kvm_vcpu_arch *vcpu = &v->arch; 3101 struct pvclock_vcpu_time_info *guest_hv_clock; 3102 unsigned long flags; 3103 3104 read_lock_irqsave(&gpc->lock, flags); 3105 while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) { 3106 read_unlock_irqrestore(&gpc->lock, flags); 3107 3108 if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock))) 3109 return; 3110 3111 read_lock_irqsave(&gpc->lock, flags); 3112 } 3113 3114 guest_hv_clock = (void *)(gpc->khva + offset); 3115 3116 /* 3117 * This VCPU is paused, but it's legal for a guest to read another 3118 * VCPU's kvmclock, so we really have to follow the specification where 3119 * it says that version is odd if data is being modified, and even after 3120 * it is consistent. 3121 */ 3122 3123 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; 3124 smp_wmb(); 3125 3126 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 3127 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 3128 3129 if (vcpu->pvclock_set_guest_stopped_request) { 3130 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; 3131 vcpu->pvclock_set_guest_stopped_request = false; 3132 } 3133 3134 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); 3135 smp_wmb(); 3136 3137 guest_hv_clock->version = ++vcpu->hv_clock.version; 3138 3139 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); 3140 read_unlock_irqrestore(&gpc->lock, flags); 3141 3142 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3143 } 3144 3145 static int kvm_guest_time_update(struct kvm_vcpu *v) 3146 { 3147 unsigned long flags, tgt_tsc_khz; 3148 unsigned seq; 3149 struct kvm_vcpu_arch *vcpu = &v->arch; 3150 struct kvm_arch *ka = &v->kvm->arch; 3151 s64 kernel_ns; 3152 u64 tsc_timestamp, host_tsc; 3153 u8 pvclock_flags; 3154 bool use_master_clock; 3155 3156 kernel_ns = 0; 3157 host_tsc = 0; 3158 3159 /* 3160 * If the host uses TSC clock, then passthrough TSC as stable 3161 * to the guest. 3162 */ 3163 do { 3164 seq = read_seqcount_begin(&ka->pvclock_sc); 3165 use_master_clock = ka->use_master_clock; 3166 if (use_master_clock) { 3167 host_tsc = ka->master_cycle_now; 3168 kernel_ns = ka->master_kernel_ns; 3169 } 3170 } while (read_seqcount_retry(&ka->pvclock_sc, seq)); 3171 3172 /* Keep irq disabled to prevent changes to the clock */ 3173 local_irq_save(flags); 3174 tgt_tsc_khz = get_cpu_tsc_khz(); 3175 if (unlikely(tgt_tsc_khz == 0)) { 3176 local_irq_restore(flags); 3177 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3178 return 1; 3179 } 3180 if (!use_master_clock) { 3181 host_tsc = rdtsc(); 3182 kernel_ns = get_kvmclock_base_ns(); 3183 } 3184 3185 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 3186 3187 /* 3188 * We may have to catch up the TSC to match elapsed wall clock 3189 * time for two reasons, even if kvmclock is used. 3190 * 1) CPU could have been running below the maximum TSC rate 3191 * 2) Broken TSC compensation resets the base at each VCPU 3192 * entry to avoid unknown leaps of TSC even when running 3193 * again on the same CPU. This may cause apparent elapsed 3194 * time to disappear, and the guest to stand still or run 3195 * very slowly. 3196 */ 3197 if (vcpu->tsc_catchup) { 3198 u64 tsc = compute_guest_tsc(v, kernel_ns); 3199 if (tsc > tsc_timestamp) { 3200 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); 3201 tsc_timestamp = tsc; 3202 } 3203 } 3204 3205 local_irq_restore(flags); 3206 3207 /* With all the info we got, fill in the values */ 3208 3209 if (kvm_caps.has_tsc_control) 3210 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz, 3211 v->arch.l1_tsc_scaling_ratio); 3212 3213 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { 3214 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, 3215 &vcpu->hv_clock.tsc_shift, 3216 &vcpu->hv_clock.tsc_to_system_mul); 3217 vcpu->hw_tsc_khz = tgt_tsc_khz; 3218 kvm_xen_update_tsc_info(v); 3219 } 3220 3221 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; 3222 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; 3223 vcpu->last_guest_tsc = tsc_timestamp; 3224 3225 /* If the host uses TSC clocksource, then it is stable */ 3226 pvclock_flags = 0; 3227 if (use_master_clock) 3228 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; 3229 3230 vcpu->hv_clock.flags = pvclock_flags; 3231 3232 if (vcpu->pv_time.active) 3233 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); 3234 if (vcpu->xen.vcpu_info_cache.active) 3235 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, 3236 offsetof(struct compat_vcpu_info, time)); 3237 if (vcpu->xen.vcpu_time_info_cache.active) 3238 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); 3239 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 3240 return 0; 3241 } 3242 3243 /* 3244 * kvmclock updates which are isolated to a given vcpu, such as 3245 * vcpu->cpu migration, should not allow system_timestamp from 3246 * the rest of the vcpus to remain static. Otherwise ntp frequency 3247 * correction applies to one vcpu's system_timestamp but not 3248 * the others. 3249 * 3250 * So in those cases, request a kvmclock update for all vcpus. 3251 * We need to rate-limit these requests though, as they can 3252 * considerably slow guests that have a large number of vcpus. 3253 * The time for a remote vcpu to update its kvmclock is bound 3254 * by the delay we use to rate-limit the updates. 3255 */ 3256 3257 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100) 3258 3259 static void kvmclock_update_fn(struct work_struct *work) 3260 { 3261 unsigned long i; 3262 struct delayed_work *dwork = to_delayed_work(work); 3263 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3264 kvmclock_update_work); 3265 struct kvm *kvm = container_of(ka, struct kvm, arch); 3266 struct kvm_vcpu *vcpu; 3267 3268 kvm_for_each_vcpu(i, vcpu, kvm) { 3269 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3270 kvm_vcpu_kick(vcpu); 3271 } 3272 } 3273 3274 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) 3275 { 3276 struct kvm *kvm = v->kvm; 3277 3278 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 3279 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 3280 KVMCLOCK_UPDATE_DELAY); 3281 } 3282 3283 #define KVMCLOCK_SYNC_PERIOD (300 * HZ) 3284 3285 static void kvmclock_sync_fn(struct work_struct *work) 3286 { 3287 struct delayed_work *dwork = to_delayed_work(work); 3288 struct kvm_arch *ka = container_of(dwork, struct kvm_arch, 3289 kvmclock_sync_work); 3290 struct kvm *kvm = container_of(ka, struct kvm, arch); 3291 3292 if (!kvmclock_periodic_sync) 3293 return; 3294 3295 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); 3296 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 3297 KVMCLOCK_SYNC_PERIOD); 3298 } 3299 3300 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */ 3301 static bool is_mci_control_msr(u32 msr) 3302 { 3303 return (msr & 3) == 0; 3304 } 3305 static bool is_mci_status_msr(u32 msr) 3306 { 3307 return (msr & 3) == 1; 3308 } 3309 3310 /* 3311 * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP. 3312 */ 3313 static bool can_set_mci_status(struct kvm_vcpu *vcpu) 3314 { 3315 /* McStatusWrEn enabled? */ 3316 if (guest_cpuid_is_amd_or_hygon(vcpu)) 3317 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); 3318 3319 return false; 3320 } 3321 3322 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3323 { 3324 u64 mcg_cap = vcpu->arch.mcg_cap; 3325 unsigned bank_num = mcg_cap & 0xff; 3326 u32 msr = msr_info->index; 3327 u64 data = msr_info->data; 3328 u32 offset, last_msr; 3329 3330 switch (msr) { 3331 case MSR_IA32_MCG_STATUS: 3332 vcpu->arch.mcg_status = data; 3333 break; 3334 case MSR_IA32_MCG_CTL: 3335 if (!(mcg_cap & MCG_CTL_P) && 3336 (data || !msr_info->host_initiated)) 3337 return 1; 3338 if (data != 0 && data != ~(u64)0) 3339 return 1; 3340 vcpu->arch.mcg_ctl = data; 3341 break; 3342 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3343 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 3344 if (msr > last_msr) 3345 return 1; 3346 3347 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated)) 3348 return 1; 3349 /* An attempt to write a 1 to a reserved bit raises #GP */ 3350 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK)) 3351 return 1; 3352 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 3353 last_msr + 1 - MSR_IA32_MC0_CTL2); 3354 vcpu->arch.mci_ctl2_banks[offset] = data; 3355 break; 3356 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3357 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 3358 if (msr > last_msr) 3359 return 1; 3360 3361 /* 3362 * Only 0 or all 1s can be written to IA32_MCi_CTL, all other 3363 * values are architecturally undefined. But, some Linux 3364 * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB 3365 * issue on AMD K8s, allow bit 10 to be clear when setting all 3366 * other bits in order to avoid an uncaught #GP in the guest. 3367 * 3368 * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable, 3369 * single-bit ECC data errors. 3370 */ 3371 if (is_mci_control_msr(msr) && 3372 data != 0 && (data | (1 << 10) | 1) != ~(u64)0) 3373 return 1; 3374 3375 /* 3376 * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR. 3377 * AMD-based CPUs allow non-zero values, but if and only if 3378 * HWCR[McStatusWrEn] is set. 3379 */ 3380 if (!msr_info->host_initiated && is_mci_status_msr(msr) && 3381 data != 0 && !can_set_mci_status(vcpu)) 3382 return 1; 3383 3384 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 3385 last_msr + 1 - MSR_IA32_MC0_CTL); 3386 vcpu->arch.mce_banks[offset] = data; 3387 break; 3388 default: 3389 return 1; 3390 } 3391 return 0; 3392 } 3393 3394 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) 3395 { 3396 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; 3397 3398 return (vcpu->arch.apf.msr_en_val & mask) == mask; 3399 } 3400 3401 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) 3402 { 3403 gpa_t gpa = data & ~0x3f; 3404 3405 /* Bits 4:5 are reserved, Should be zero */ 3406 if (data & 0x30) 3407 return 1; 3408 3409 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && 3410 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT)) 3411 return 1; 3412 3413 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && 3414 (data & KVM_ASYNC_PF_DELIVERY_AS_INT)) 3415 return 1; 3416 3417 if (!lapic_in_kernel(vcpu)) 3418 return data ? 1 : 0; 3419 3420 vcpu->arch.apf.msr_en_val = data; 3421 3422 if (!kvm_pv_async_pf_enabled(vcpu)) { 3423 kvm_clear_async_pf_completion_queue(vcpu); 3424 kvm_async_pf_hash_reset(vcpu); 3425 return 0; 3426 } 3427 3428 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, 3429 sizeof(u64))) 3430 return 1; 3431 3432 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 3433 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; 3434 3435 kvm_async_pf_wakeup_all(vcpu); 3436 3437 return 0; 3438 } 3439 3440 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) 3441 { 3442 /* Bits 8-63 are reserved */ 3443 if (data >> 8) 3444 return 1; 3445 3446 if (!lapic_in_kernel(vcpu)) 3447 return 1; 3448 3449 vcpu->arch.apf.msr_int_val = data; 3450 3451 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; 3452 3453 return 0; 3454 } 3455 3456 static void kvmclock_reset(struct kvm_vcpu *vcpu) 3457 { 3458 kvm_gpc_deactivate(&vcpu->arch.pv_time); 3459 vcpu->arch.time = 0; 3460 } 3461 3462 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 3463 { 3464 ++vcpu->stat.tlb_flush; 3465 static_call(kvm_x86_flush_tlb_all)(vcpu); 3466 3467 /* Flushing all ASIDs flushes the current ASID... */ 3468 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 3469 } 3470 3471 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 3472 { 3473 ++vcpu->stat.tlb_flush; 3474 3475 if (!tdp_enabled) { 3476 /* 3477 * A TLB flush on behalf of the guest is equivalent to 3478 * INVPCID(all), toggling CR4.PGE, etc., which requires 3479 * a forced sync of the shadow page tables. Ensure all the 3480 * roots are synced and the guest TLB in hardware is clean. 3481 */ 3482 kvm_mmu_sync_roots(vcpu); 3483 kvm_mmu_sync_prev_roots(vcpu); 3484 } 3485 3486 static_call(kvm_x86_flush_tlb_guest)(vcpu); 3487 3488 /* 3489 * Flushing all "guest" TLB is always a superset of Hyper-V's fine 3490 * grained flushing. 3491 */ 3492 kvm_hv_vcpu_purge_flush_tlb(vcpu); 3493 } 3494 3495 3496 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 3497 { 3498 ++vcpu->stat.tlb_flush; 3499 static_call(kvm_x86_flush_tlb_current)(vcpu); 3500 } 3501 3502 /* 3503 * Service "local" TLB flush requests, which are specific to the current MMU 3504 * context. In addition to the generic event handling in vcpu_enter_guest(), 3505 * TLB flushes that are targeted at an MMU context also need to be serviced 3506 * prior before nested VM-Enter/VM-Exit. 3507 */ 3508 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) 3509 { 3510 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) 3511 kvm_vcpu_flush_tlb_current(vcpu); 3512 3513 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3514 kvm_vcpu_flush_tlb_guest(vcpu); 3515 } 3516 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3517 3518 static void record_steal_time(struct kvm_vcpu *vcpu) 3519 { 3520 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 3521 struct kvm_steal_time __user *st; 3522 struct kvm_memslots *slots; 3523 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 3524 u64 steal; 3525 u32 version; 3526 3527 if (kvm_xen_msr_enabled(vcpu->kvm)) { 3528 kvm_xen_runstate_set_running(vcpu); 3529 return; 3530 } 3531 3532 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 3533 return; 3534 3535 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) 3536 return; 3537 3538 slots = kvm_memslots(vcpu->kvm); 3539 3540 if (unlikely(slots->generation != ghc->generation || 3541 gpa != ghc->gpa || 3542 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { 3543 /* We rely on the fact that it fits in a single page. */ 3544 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); 3545 3546 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || 3547 kvm_is_error_hva(ghc->hva) || !ghc->memslot) 3548 return; 3549 } 3550 3551 st = (struct kvm_steal_time __user *)ghc->hva; 3552 /* 3553 * Doing a TLB flush here, on the guest's behalf, can avoid 3554 * expensive IPIs. 3555 */ 3556 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { 3557 u8 st_preempted = 0; 3558 int err = -EFAULT; 3559 3560 if (!user_access_begin(st, sizeof(*st))) 3561 return; 3562 3563 asm volatile("1: xchgb %0, %2\n" 3564 "xor %1, %1\n" 3565 "2:\n" 3566 _ASM_EXTABLE_UA(1b, 2b) 3567 : "+q" (st_preempted), 3568 "+&r" (err), 3569 "+m" (st->preempted)); 3570 if (err) 3571 goto out; 3572 3573 user_access_end(); 3574 3575 vcpu->arch.st.preempted = 0; 3576 3577 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, 3578 st_preempted & KVM_VCPU_FLUSH_TLB); 3579 if (st_preempted & KVM_VCPU_FLUSH_TLB) 3580 kvm_vcpu_flush_tlb_guest(vcpu); 3581 3582 if (!user_access_begin(st, sizeof(*st))) 3583 goto dirty; 3584 } else { 3585 if (!user_access_begin(st, sizeof(*st))) 3586 return; 3587 3588 unsafe_put_user(0, &st->preempted, out); 3589 vcpu->arch.st.preempted = 0; 3590 } 3591 3592 unsafe_get_user(version, &st->version, out); 3593 if (version & 1) 3594 version += 1; /* first time write, random junk */ 3595 3596 version += 1; 3597 unsafe_put_user(version, &st->version, out); 3598 3599 smp_wmb(); 3600 3601 unsafe_get_user(steal, &st->steal, out); 3602 steal += current->sched_info.run_delay - 3603 vcpu->arch.st.last_steal; 3604 vcpu->arch.st.last_steal = current->sched_info.run_delay; 3605 unsafe_put_user(steal, &st->steal, out); 3606 3607 version += 1; 3608 unsafe_put_user(version, &st->version, out); 3609 3610 out: 3611 user_access_end(); 3612 dirty: 3613 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 3614 } 3615 3616 static bool kvm_is_msr_to_save(u32 msr_index) 3617 { 3618 unsigned int i; 3619 3620 for (i = 0; i < num_msrs_to_save; i++) { 3621 if (msrs_to_save[i] == msr_index) 3622 return true; 3623 } 3624 3625 return false; 3626 } 3627 3628 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 3629 { 3630 u32 msr = msr_info->index; 3631 u64 data = msr_info->data; 3632 3633 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) 3634 return kvm_xen_write_hypercall_page(vcpu, data); 3635 3636 switch (msr) { 3637 case MSR_AMD64_NB_CFG: 3638 case MSR_IA32_UCODE_WRITE: 3639 case MSR_VM_HSAVE_PA: 3640 case MSR_AMD64_PATCH_LOADER: 3641 case MSR_AMD64_BU_CFG2: 3642 case MSR_AMD64_DC_CFG: 3643 case MSR_F15H_EX_CFG: 3644 break; 3645 3646 case MSR_IA32_UCODE_REV: 3647 if (msr_info->host_initiated) 3648 vcpu->arch.microcode_version = data; 3649 break; 3650 case MSR_IA32_ARCH_CAPABILITIES: 3651 if (!msr_info->host_initiated) 3652 return 1; 3653 vcpu->arch.arch_capabilities = data; 3654 break; 3655 case MSR_IA32_PERF_CAPABILITIES: 3656 if (!msr_info->host_initiated) 3657 return 1; 3658 if (data & ~kvm_caps.supported_perf_cap) 3659 return 1; 3660 3661 /* 3662 * Note, this is not just a performance optimization! KVM 3663 * disallows changing feature MSRs after the vCPU has run; PMU 3664 * refresh will bug the VM if called after the vCPU has run. 3665 */ 3666 if (vcpu->arch.perf_capabilities == data) 3667 break; 3668 3669 vcpu->arch.perf_capabilities = data; 3670 kvm_pmu_refresh(vcpu); 3671 break; 3672 case MSR_IA32_PRED_CMD: 3673 if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu)) 3674 return 1; 3675 3676 if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB)) 3677 return 1; 3678 if (!data) 3679 break; 3680 3681 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); 3682 break; 3683 case MSR_IA32_FLUSH_CMD: 3684 if (!msr_info->host_initiated && 3685 !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)) 3686 return 1; 3687 3688 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH)) 3689 return 1; 3690 if (!data) 3691 break; 3692 3693 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); 3694 break; 3695 case MSR_EFER: 3696 return set_efer(vcpu, msr_info); 3697 case MSR_K7_HWCR: 3698 data &= ~(u64)0x40; /* ignore flush filter disable */ 3699 data &= ~(u64)0x100; /* ignore ignne emulation enable */ 3700 data &= ~(u64)0x8; /* ignore TLB cache disable */ 3701 3702 /* Handle McStatusWrEn */ 3703 if (data == BIT_ULL(18)) { 3704 vcpu->arch.msr_hwcr = data; 3705 } else if (data != 0) { 3706 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 3707 return 1; 3708 } 3709 break; 3710 case MSR_FAM10H_MMIO_CONF_BASE: 3711 if (data != 0) { 3712 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 3713 return 1; 3714 } 3715 break; 3716 case MSR_IA32_CR_PAT: 3717 if (!kvm_pat_valid(data)) 3718 return 1; 3719 3720 vcpu->arch.pat = data; 3721 break; 3722 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: 3723 case MSR_MTRRdefType: 3724 return kvm_mtrr_set_msr(vcpu, msr, data); 3725 case MSR_IA32_APICBASE: 3726 return kvm_set_apic_base(vcpu, msr_info); 3727 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 3728 return kvm_x2apic_msr_write(vcpu, msr, data); 3729 case MSR_IA32_TSC_DEADLINE: 3730 kvm_set_lapic_tscdeadline_msr(vcpu, data); 3731 break; 3732 case MSR_IA32_TSC_ADJUST: 3733 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { 3734 if (!msr_info->host_initiated) { 3735 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 3736 adjust_tsc_offset_guest(vcpu, adj); 3737 /* Before back to guest, tsc_timestamp must be adjusted 3738 * as well, otherwise guest's percpu pvclock time could jump. 3739 */ 3740 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 3741 } 3742 vcpu->arch.ia32_tsc_adjust_msr = data; 3743 } 3744 break; 3745 case MSR_IA32_MISC_ENABLE: { 3746 u64 old_val = vcpu->arch.ia32_misc_enable_msr; 3747 3748 if (!msr_info->host_initiated) { 3749 /* RO bits */ 3750 if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) 3751 return 1; 3752 3753 /* R bits, i.e. writes are ignored, but don't fault. */ 3754 data = data & ~MSR_IA32_MISC_ENABLE_EMON; 3755 data |= old_val & MSR_IA32_MISC_ENABLE_EMON; 3756 } 3757 3758 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && 3759 ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { 3760 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) 3761 return 1; 3762 vcpu->arch.ia32_misc_enable_msr = data; 3763 kvm_update_cpuid_runtime(vcpu); 3764 } else { 3765 vcpu->arch.ia32_misc_enable_msr = data; 3766 } 3767 break; 3768 } 3769 case MSR_IA32_SMBASE: 3770 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 3771 return 1; 3772 vcpu->arch.smbase = data; 3773 break; 3774 case MSR_IA32_POWER_CTL: 3775 vcpu->arch.msr_ia32_power_ctl = data; 3776 break; 3777 case MSR_IA32_TSC: 3778 if (msr_info->host_initiated) { 3779 kvm_synchronize_tsc(vcpu, data); 3780 } else { 3781 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; 3782 adjust_tsc_offset_guest(vcpu, adj); 3783 vcpu->arch.ia32_tsc_adjust_msr += adj; 3784 } 3785 break; 3786 case MSR_IA32_XSS: 3787 if (!msr_info->host_initiated && 3788 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 3789 return 1; 3790 /* 3791 * KVM supports exposing PT to the guest, but does not support 3792 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than 3793 * XSAVES/XRSTORS to save/restore PT MSRs. 3794 */ 3795 if (data & ~kvm_caps.supported_xss) 3796 return 1; 3797 vcpu->arch.ia32_xss = data; 3798 kvm_update_cpuid_runtime(vcpu); 3799 break; 3800 case MSR_SMI_COUNT: 3801 if (!msr_info->host_initiated) 3802 return 1; 3803 vcpu->arch.smi_count = data; 3804 break; 3805 case MSR_KVM_WALL_CLOCK_NEW: 3806 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3807 return 1; 3808 3809 vcpu->kvm->arch.wall_clock = data; 3810 kvm_write_wall_clock(vcpu->kvm, data, 0); 3811 break; 3812 case MSR_KVM_WALL_CLOCK: 3813 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3814 return 1; 3815 3816 vcpu->kvm->arch.wall_clock = data; 3817 kvm_write_wall_clock(vcpu->kvm, data, 0); 3818 break; 3819 case MSR_KVM_SYSTEM_TIME_NEW: 3820 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 3821 return 1; 3822 3823 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); 3824 break; 3825 case MSR_KVM_SYSTEM_TIME: 3826 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 3827 return 1; 3828 3829 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); 3830 break; 3831 case MSR_KVM_ASYNC_PF_EN: 3832 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 3833 return 1; 3834 3835 if (kvm_pv_enable_async_pf(vcpu, data)) 3836 return 1; 3837 break; 3838 case MSR_KVM_ASYNC_PF_INT: 3839 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3840 return 1; 3841 3842 if (kvm_pv_enable_async_pf_int(vcpu, data)) 3843 return 1; 3844 break; 3845 case MSR_KVM_ASYNC_PF_ACK: 3846 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 3847 return 1; 3848 if (data & 0x1) { 3849 vcpu->arch.apf.pageready_pending = false; 3850 kvm_check_async_pf_completion(vcpu); 3851 } 3852 break; 3853 case MSR_KVM_STEAL_TIME: 3854 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 3855 return 1; 3856 3857 if (unlikely(!sched_info_on())) 3858 return 1; 3859 3860 if (data & KVM_STEAL_RESERVED_MASK) 3861 return 1; 3862 3863 vcpu->arch.st.msr_val = data; 3864 3865 if (!(data & KVM_MSR_ENABLED)) 3866 break; 3867 3868 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 3869 3870 break; 3871 case MSR_KVM_PV_EOI_EN: 3872 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 3873 return 1; 3874 3875 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) 3876 return 1; 3877 break; 3878 3879 case MSR_KVM_POLL_CONTROL: 3880 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 3881 return 1; 3882 3883 /* only enable bit supported */ 3884 if (data & (-1ULL << 1)) 3885 return 1; 3886 3887 vcpu->arch.msr_kvm_poll_control = data; 3888 break; 3889 3890 case MSR_IA32_MCG_CTL: 3891 case MSR_IA32_MCG_STATUS: 3892 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 3893 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 3894 return set_msr_mce(vcpu, msr_info); 3895 3896 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 3897 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 3898 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 3899 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 3900 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3901 return kvm_pmu_set_msr(vcpu, msr_info); 3902 3903 if (data) 3904 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 3905 break; 3906 case MSR_K7_CLK_CTL: 3907 /* 3908 * Ignore all writes to this no longer documented MSR. 3909 * Writes are only relevant for old K7 processors, 3910 * all pre-dating SVM, but a recommended workaround from 3911 * AMD for these chips. It is possible to specify the 3912 * affected processor models on the command line, hence 3913 * the need to ignore the workaround. 3914 */ 3915 break; 3916 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 3917 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 3918 case HV_X64_MSR_SYNDBG_OPTIONS: 3919 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 3920 case HV_X64_MSR_CRASH_CTL: 3921 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 3922 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 3923 case HV_X64_MSR_TSC_EMULATION_CONTROL: 3924 case HV_X64_MSR_TSC_EMULATION_STATUS: 3925 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 3926 return kvm_hv_set_msr_common(vcpu, msr, data, 3927 msr_info->host_initiated); 3928 case MSR_IA32_BBL_CR_CTL3: 3929 /* Drop writes to this legacy MSR -- see rdmsr 3930 * counterpart for further detail. 3931 */ 3932 kvm_pr_unimpl_wrmsr(vcpu, msr, data); 3933 break; 3934 case MSR_AMD64_OSVW_ID_LENGTH: 3935 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3936 return 1; 3937 vcpu->arch.osvw.length = data; 3938 break; 3939 case MSR_AMD64_OSVW_STATUS: 3940 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 3941 return 1; 3942 vcpu->arch.osvw.status = data; 3943 break; 3944 case MSR_PLATFORM_INFO: 3945 if (!msr_info->host_initiated || 3946 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 3947 cpuid_fault_enabled(vcpu))) 3948 return 1; 3949 vcpu->arch.msr_platform_info = data; 3950 break; 3951 case MSR_MISC_FEATURES_ENABLES: 3952 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || 3953 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && 3954 !supports_cpuid_fault(vcpu))) 3955 return 1; 3956 vcpu->arch.msr_misc_features_enables = data; 3957 break; 3958 #ifdef CONFIG_X86_64 3959 case MSR_IA32_XFD: 3960 if (!msr_info->host_initiated && 3961 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3962 return 1; 3963 3964 if (data & ~kvm_guest_supported_xfd(vcpu)) 3965 return 1; 3966 3967 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); 3968 break; 3969 case MSR_IA32_XFD_ERR: 3970 if (!msr_info->host_initiated && 3971 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 3972 return 1; 3973 3974 if (data & ~kvm_guest_supported_xfd(vcpu)) 3975 return 1; 3976 3977 vcpu->arch.guest_fpu.xfd_err = data; 3978 break; 3979 #endif 3980 default: 3981 if (kvm_pmu_is_valid_msr(vcpu, msr)) 3982 return kvm_pmu_set_msr(vcpu, msr_info); 3983 3984 /* 3985 * Userspace is allowed to write '0' to MSRs that KVM reports 3986 * as to-be-saved, even if an MSRs isn't fully supported. 3987 */ 3988 if (msr_info->host_initiated && !data && 3989 kvm_is_msr_to_save(msr)) 3990 break; 3991 3992 return KVM_MSR_RET_INVALID; 3993 } 3994 return 0; 3995 } 3996 EXPORT_SYMBOL_GPL(kvm_set_msr_common); 3997 3998 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 3999 { 4000 u64 data; 4001 u64 mcg_cap = vcpu->arch.mcg_cap; 4002 unsigned bank_num = mcg_cap & 0xff; 4003 u32 offset, last_msr; 4004 4005 switch (msr) { 4006 case MSR_IA32_P5_MC_ADDR: 4007 case MSR_IA32_P5_MC_TYPE: 4008 data = 0; 4009 break; 4010 case MSR_IA32_MCG_CAP: 4011 data = vcpu->arch.mcg_cap; 4012 break; 4013 case MSR_IA32_MCG_CTL: 4014 if (!(mcg_cap & MCG_CTL_P) && !host) 4015 return 1; 4016 data = vcpu->arch.mcg_ctl; 4017 break; 4018 case MSR_IA32_MCG_STATUS: 4019 data = vcpu->arch.mcg_status; 4020 break; 4021 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4022 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; 4023 if (msr > last_msr) 4024 return 1; 4025 4026 if (!(mcg_cap & MCG_CMCI_P) && !host) 4027 return 1; 4028 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, 4029 last_msr + 1 - MSR_IA32_MC0_CTL2); 4030 data = vcpu->arch.mci_ctl2_banks[offset]; 4031 break; 4032 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4033 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; 4034 if (msr > last_msr) 4035 return 1; 4036 4037 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, 4038 last_msr + 1 - MSR_IA32_MC0_CTL); 4039 data = vcpu->arch.mce_banks[offset]; 4040 break; 4041 default: 4042 return 1; 4043 } 4044 *pdata = data; 4045 return 0; 4046 } 4047 4048 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 4049 { 4050 switch (msr_info->index) { 4051 case MSR_IA32_PLATFORM_ID: 4052 case MSR_IA32_EBL_CR_POWERON: 4053 case MSR_IA32_LASTBRANCHFROMIP: 4054 case MSR_IA32_LASTBRANCHTOIP: 4055 case MSR_IA32_LASTINTFROMIP: 4056 case MSR_IA32_LASTINTTOIP: 4057 case MSR_AMD64_SYSCFG: 4058 case MSR_K8_TSEG_ADDR: 4059 case MSR_K8_TSEG_MASK: 4060 case MSR_VM_HSAVE_PA: 4061 case MSR_K8_INT_PENDING_MSG: 4062 case MSR_AMD64_NB_CFG: 4063 case MSR_FAM10H_MMIO_CONF_BASE: 4064 case MSR_AMD64_BU_CFG2: 4065 case MSR_IA32_PERF_CTL: 4066 case MSR_AMD64_DC_CFG: 4067 case MSR_F15H_EX_CFG: 4068 /* 4069 * Intel Sandy Bridge CPUs must support the RAPL (running average power 4070 * limit) MSRs. Just return 0, as we do not want to expose the host 4071 * data here. Do not conditionalize this on CPUID, as KVM does not do 4072 * so for existing CPU-specific MSRs. 4073 */ 4074 case MSR_RAPL_POWER_UNIT: 4075 case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */ 4076 case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */ 4077 case MSR_PKG_ENERGY_STATUS: /* Total package */ 4078 case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */ 4079 msr_info->data = 0; 4080 break; 4081 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 4082 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 4083 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: 4084 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: 4085 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4086 return kvm_pmu_get_msr(vcpu, msr_info); 4087 msr_info->data = 0; 4088 break; 4089 case MSR_IA32_UCODE_REV: 4090 msr_info->data = vcpu->arch.microcode_version; 4091 break; 4092 case MSR_IA32_ARCH_CAPABILITIES: 4093 if (!msr_info->host_initiated && 4094 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) 4095 return 1; 4096 msr_info->data = vcpu->arch.arch_capabilities; 4097 break; 4098 case MSR_IA32_PERF_CAPABILITIES: 4099 if (!msr_info->host_initiated && 4100 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 4101 return 1; 4102 msr_info->data = vcpu->arch.perf_capabilities; 4103 break; 4104 case MSR_IA32_POWER_CTL: 4105 msr_info->data = vcpu->arch.msr_ia32_power_ctl; 4106 break; 4107 case MSR_IA32_TSC: { 4108 /* 4109 * Intel SDM states that MSR_IA32_TSC read adds the TSC offset 4110 * even when not intercepted. AMD manual doesn't explicitly 4111 * state this but appears to behave the same. 4112 * 4113 * On userspace reads and writes, however, we unconditionally 4114 * return L1's TSC value to ensure backwards-compatible 4115 * behavior for migration. 4116 */ 4117 u64 offset, ratio; 4118 4119 if (msr_info->host_initiated) { 4120 offset = vcpu->arch.l1_tsc_offset; 4121 ratio = vcpu->arch.l1_tsc_scaling_ratio; 4122 } else { 4123 offset = vcpu->arch.tsc_offset; 4124 ratio = vcpu->arch.tsc_scaling_ratio; 4125 } 4126 4127 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; 4128 break; 4129 } 4130 case MSR_IA32_CR_PAT: 4131 msr_info->data = vcpu->arch.pat; 4132 break; 4133 case MSR_MTRRcap: 4134 case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000: 4135 case MSR_MTRRdefType: 4136 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); 4137 case 0xcd: /* fsb frequency */ 4138 msr_info->data = 3; 4139 break; 4140 /* 4141 * MSR_EBC_FREQUENCY_ID 4142 * Conservative value valid for even the basic CPU models. 4143 * Models 0,1: 000 in bits 23:21 indicating a bus speed of 4144 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, 4145 * and 266MHz for model 3, or 4. Set Core Clock 4146 * Frequency to System Bus Frequency Ratio to 1 (bits 4147 * 31:24) even though these are only valid for CPU 4148 * models > 2, however guests may end up dividing or 4149 * multiplying by zero otherwise. 4150 */ 4151 case MSR_EBC_FREQUENCY_ID: 4152 msr_info->data = 1 << 24; 4153 break; 4154 case MSR_IA32_APICBASE: 4155 msr_info->data = kvm_get_apic_base(vcpu); 4156 break; 4157 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: 4158 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); 4159 case MSR_IA32_TSC_DEADLINE: 4160 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); 4161 break; 4162 case MSR_IA32_TSC_ADJUST: 4163 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; 4164 break; 4165 case MSR_IA32_MISC_ENABLE: 4166 msr_info->data = vcpu->arch.ia32_misc_enable_msr; 4167 break; 4168 case MSR_IA32_SMBASE: 4169 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) 4170 return 1; 4171 msr_info->data = vcpu->arch.smbase; 4172 break; 4173 case MSR_SMI_COUNT: 4174 msr_info->data = vcpu->arch.smi_count; 4175 break; 4176 case MSR_IA32_PERF_STATUS: 4177 /* TSC increment by tick */ 4178 msr_info->data = 1000ULL; 4179 /* CPU multiplier */ 4180 msr_info->data |= (((uint64_t)4ULL) << 40); 4181 break; 4182 case MSR_EFER: 4183 msr_info->data = vcpu->arch.efer; 4184 break; 4185 case MSR_KVM_WALL_CLOCK: 4186 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4187 return 1; 4188 4189 msr_info->data = vcpu->kvm->arch.wall_clock; 4190 break; 4191 case MSR_KVM_WALL_CLOCK_NEW: 4192 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4193 return 1; 4194 4195 msr_info->data = vcpu->kvm->arch.wall_clock; 4196 break; 4197 case MSR_KVM_SYSTEM_TIME: 4198 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) 4199 return 1; 4200 4201 msr_info->data = vcpu->arch.time; 4202 break; 4203 case MSR_KVM_SYSTEM_TIME_NEW: 4204 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) 4205 return 1; 4206 4207 msr_info->data = vcpu->arch.time; 4208 break; 4209 case MSR_KVM_ASYNC_PF_EN: 4210 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) 4211 return 1; 4212 4213 msr_info->data = vcpu->arch.apf.msr_en_val; 4214 break; 4215 case MSR_KVM_ASYNC_PF_INT: 4216 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4217 return 1; 4218 4219 msr_info->data = vcpu->arch.apf.msr_int_val; 4220 break; 4221 case MSR_KVM_ASYNC_PF_ACK: 4222 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) 4223 return 1; 4224 4225 msr_info->data = 0; 4226 break; 4227 case MSR_KVM_STEAL_TIME: 4228 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) 4229 return 1; 4230 4231 msr_info->data = vcpu->arch.st.msr_val; 4232 break; 4233 case MSR_KVM_PV_EOI_EN: 4234 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) 4235 return 1; 4236 4237 msr_info->data = vcpu->arch.pv_eoi.msr_val; 4238 break; 4239 case MSR_KVM_POLL_CONTROL: 4240 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) 4241 return 1; 4242 4243 msr_info->data = vcpu->arch.msr_kvm_poll_control; 4244 break; 4245 case MSR_IA32_P5_MC_ADDR: 4246 case MSR_IA32_P5_MC_TYPE: 4247 case MSR_IA32_MCG_CAP: 4248 case MSR_IA32_MCG_CTL: 4249 case MSR_IA32_MCG_STATUS: 4250 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: 4251 case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: 4252 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, 4253 msr_info->host_initiated); 4254 case MSR_IA32_XSS: 4255 if (!msr_info->host_initiated && 4256 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) 4257 return 1; 4258 msr_info->data = vcpu->arch.ia32_xss; 4259 break; 4260 case MSR_K7_CLK_CTL: 4261 /* 4262 * Provide expected ramp-up count for K7. All other 4263 * are set to zero, indicating minimum divisors for 4264 * every field. 4265 * 4266 * This prevents guest kernels on AMD host with CPU 4267 * type 6, model 8 and higher from exploding due to 4268 * the rdmsr failing. 4269 */ 4270 msr_info->data = 0x20000000; 4271 break; 4272 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: 4273 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: 4274 case HV_X64_MSR_SYNDBG_OPTIONS: 4275 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 4276 case HV_X64_MSR_CRASH_CTL: 4277 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT: 4278 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 4279 case HV_X64_MSR_TSC_EMULATION_CONTROL: 4280 case HV_X64_MSR_TSC_EMULATION_STATUS: 4281 case HV_X64_MSR_TSC_INVARIANT_CONTROL: 4282 return kvm_hv_get_msr_common(vcpu, 4283 msr_info->index, &msr_info->data, 4284 msr_info->host_initiated); 4285 case MSR_IA32_BBL_CR_CTL3: 4286 /* This legacy MSR exists but isn't fully documented in current 4287 * silicon. It is however accessed by winxp in very narrow 4288 * scenarios where it sets bit #19, itself documented as 4289 * a "reserved" bit. Best effort attempt to source coherent 4290 * read data here should the balance of the register be 4291 * interpreted by the guest: 4292 * 4293 * L2 cache control register 3: 64GB range, 256KB size, 4294 * enabled, latency 0x1, configured 4295 */ 4296 msr_info->data = 0xbe702111; 4297 break; 4298 case MSR_AMD64_OSVW_ID_LENGTH: 4299 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4300 return 1; 4301 msr_info->data = vcpu->arch.osvw.length; 4302 break; 4303 case MSR_AMD64_OSVW_STATUS: 4304 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) 4305 return 1; 4306 msr_info->data = vcpu->arch.osvw.status; 4307 break; 4308 case MSR_PLATFORM_INFO: 4309 if (!msr_info->host_initiated && 4310 !vcpu->kvm->arch.guest_can_read_msr_platform_info) 4311 return 1; 4312 msr_info->data = vcpu->arch.msr_platform_info; 4313 break; 4314 case MSR_MISC_FEATURES_ENABLES: 4315 msr_info->data = vcpu->arch.msr_misc_features_enables; 4316 break; 4317 case MSR_K7_HWCR: 4318 msr_info->data = vcpu->arch.msr_hwcr; 4319 break; 4320 #ifdef CONFIG_X86_64 4321 case MSR_IA32_XFD: 4322 if (!msr_info->host_initiated && 4323 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4324 return 1; 4325 4326 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; 4327 break; 4328 case MSR_IA32_XFD_ERR: 4329 if (!msr_info->host_initiated && 4330 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) 4331 return 1; 4332 4333 msr_info->data = vcpu->arch.guest_fpu.xfd_err; 4334 break; 4335 #endif 4336 default: 4337 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) 4338 return kvm_pmu_get_msr(vcpu, msr_info); 4339 4340 /* 4341 * Userspace is allowed to read MSRs that KVM reports as 4342 * to-be-saved, even if an MSR isn't fully supported. 4343 */ 4344 if (msr_info->host_initiated && 4345 kvm_is_msr_to_save(msr_info->index)) { 4346 msr_info->data = 0; 4347 break; 4348 } 4349 4350 return KVM_MSR_RET_INVALID; 4351 } 4352 return 0; 4353 } 4354 EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4355 4356 /* 4357 * Read or write a bunch of msrs. All parameters are kernel addresses. 4358 * 4359 * @return number of msrs set successfully. 4360 */ 4361 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, 4362 struct kvm_msr_entry *entries, 4363 int (*do_msr)(struct kvm_vcpu *vcpu, 4364 unsigned index, u64 *data)) 4365 { 4366 int i; 4367 4368 for (i = 0; i < msrs->nmsrs; ++i) 4369 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 4370 break; 4371 4372 return i; 4373 } 4374 4375 /* 4376 * Read or write a bunch of msrs. Parameters are user addresses. 4377 * 4378 * @return number of msrs set successfully. 4379 */ 4380 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, 4381 int (*do_msr)(struct kvm_vcpu *vcpu, 4382 unsigned index, u64 *data), 4383 int writeback) 4384 { 4385 struct kvm_msrs msrs; 4386 struct kvm_msr_entry *entries; 4387 unsigned size; 4388 int r; 4389 4390 r = -EFAULT; 4391 if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 4392 goto out; 4393 4394 r = -E2BIG; 4395 if (msrs.nmsrs >= MAX_IO_MSRS) 4396 goto out; 4397 4398 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 4399 entries = memdup_user(user_msrs->entries, size); 4400 if (IS_ERR(entries)) { 4401 r = PTR_ERR(entries); 4402 goto out; 4403 } 4404 4405 r = __msr_io(vcpu, &msrs, entries, do_msr); 4406 4407 if (writeback && copy_to_user(user_msrs->entries, entries, size)) 4408 r = -EFAULT; 4409 4410 kfree(entries); 4411 out: 4412 return r; 4413 } 4414 4415 static inline bool kvm_can_mwait_in_guest(void) 4416 { 4417 return boot_cpu_has(X86_FEATURE_MWAIT) && 4418 !boot_cpu_has_bug(X86_BUG_MONITOR) && 4419 boot_cpu_has(X86_FEATURE_ARAT); 4420 } 4421 4422 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, 4423 struct kvm_cpuid2 __user *cpuid_arg) 4424 { 4425 struct kvm_cpuid2 cpuid; 4426 int r; 4427 4428 r = -EFAULT; 4429 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4430 return r; 4431 4432 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); 4433 if (r) 4434 return r; 4435 4436 r = -EFAULT; 4437 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4438 return r; 4439 4440 return 0; 4441 } 4442 4443 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 4444 { 4445 int r = 0; 4446 4447 switch (ext) { 4448 case KVM_CAP_IRQCHIP: 4449 case KVM_CAP_HLT: 4450 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: 4451 case KVM_CAP_SET_TSS_ADDR: 4452 case KVM_CAP_EXT_CPUID: 4453 case KVM_CAP_EXT_EMUL_CPUID: 4454 case KVM_CAP_CLOCKSOURCE: 4455 case KVM_CAP_PIT: 4456 case KVM_CAP_NOP_IO_DELAY: 4457 case KVM_CAP_MP_STATE: 4458 case KVM_CAP_SYNC_MMU: 4459 case KVM_CAP_USER_NMI: 4460 case KVM_CAP_REINJECT_CONTROL: 4461 case KVM_CAP_IRQ_INJECT_STATUS: 4462 case KVM_CAP_IOEVENTFD: 4463 case KVM_CAP_IOEVENTFD_NO_LENGTH: 4464 case KVM_CAP_PIT2: 4465 case KVM_CAP_PIT_STATE2: 4466 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 4467 case KVM_CAP_VCPU_EVENTS: 4468 case KVM_CAP_HYPERV: 4469 case KVM_CAP_HYPERV_VAPIC: 4470 case KVM_CAP_HYPERV_SPIN: 4471 case KVM_CAP_HYPERV_SYNIC: 4472 case KVM_CAP_HYPERV_SYNIC2: 4473 case KVM_CAP_HYPERV_VP_INDEX: 4474 case KVM_CAP_HYPERV_EVENTFD: 4475 case KVM_CAP_HYPERV_TLBFLUSH: 4476 case KVM_CAP_HYPERV_SEND_IPI: 4477 case KVM_CAP_HYPERV_CPUID: 4478 case KVM_CAP_HYPERV_ENFORCE_CPUID: 4479 case KVM_CAP_SYS_HYPERV_CPUID: 4480 case KVM_CAP_PCI_SEGMENT: 4481 case KVM_CAP_DEBUGREGS: 4482 case KVM_CAP_X86_ROBUST_SINGLESTEP: 4483 case KVM_CAP_XSAVE: 4484 case KVM_CAP_ASYNC_PF: 4485 case KVM_CAP_ASYNC_PF_INT: 4486 case KVM_CAP_GET_TSC_KHZ: 4487 case KVM_CAP_KVMCLOCK_CTRL: 4488 case KVM_CAP_READONLY_MEM: 4489 case KVM_CAP_HYPERV_TIME: 4490 case KVM_CAP_IOAPIC_POLARITY_IGNORED: 4491 case KVM_CAP_TSC_DEADLINE_TIMER: 4492 case KVM_CAP_DISABLE_QUIRKS: 4493 case KVM_CAP_SET_BOOT_CPU_ID: 4494 case KVM_CAP_SPLIT_IRQCHIP: 4495 case KVM_CAP_IMMEDIATE_EXIT: 4496 case KVM_CAP_PMU_EVENT_FILTER: 4497 case KVM_CAP_PMU_EVENT_MASKED_EVENTS: 4498 case KVM_CAP_GET_MSR_FEATURES: 4499 case KVM_CAP_MSR_PLATFORM_INFO: 4500 case KVM_CAP_EXCEPTION_PAYLOAD: 4501 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 4502 case KVM_CAP_SET_GUEST_DEBUG: 4503 case KVM_CAP_LAST_CPU: 4504 case KVM_CAP_X86_USER_SPACE_MSR: 4505 case KVM_CAP_X86_MSR_FILTER: 4506 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4507 #ifdef CONFIG_X86_SGX_KVM 4508 case KVM_CAP_SGX_ATTRIBUTE: 4509 #endif 4510 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 4511 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 4512 case KVM_CAP_SREGS2: 4513 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 4514 case KVM_CAP_VCPU_ATTRIBUTES: 4515 case KVM_CAP_SYS_ATTRIBUTES: 4516 case KVM_CAP_VAPIC: 4517 case KVM_CAP_ENABLE_CAP: 4518 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 4519 case KVM_CAP_IRQFD_RESAMPLE: 4520 r = 1; 4521 break; 4522 case KVM_CAP_EXIT_HYPERCALL: 4523 r = KVM_EXIT_HYPERCALL_VALID_MASK; 4524 break; 4525 case KVM_CAP_SET_GUEST_DEBUG2: 4526 return KVM_GUESTDBG_VALID_MASK; 4527 #ifdef CONFIG_KVM_XEN 4528 case KVM_CAP_XEN_HVM: 4529 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | 4530 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | 4531 KVM_XEN_HVM_CONFIG_SHARED_INFO | 4532 KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL | 4533 KVM_XEN_HVM_CONFIG_EVTCHN_SEND; 4534 if (sched_info_on()) 4535 r |= KVM_XEN_HVM_CONFIG_RUNSTATE | 4536 KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG; 4537 break; 4538 #endif 4539 case KVM_CAP_SYNC_REGS: 4540 r = KVM_SYNC_X86_VALID_FIELDS; 4541 break; 4542 case KVM_CAP_ADJUST_CLOCK: 4543 r = KVM_CLOCK_VALID_FLAGS; 4544 break; 4545 case KVM_CAP_X86_DISABLE_EXITS: 4546 r = KVM_X86_DISABLE_EXITS_PAUSE; 4547 4548 if (!mitigate_smt_rsb) { 4549 r |= KVM_X86_DISABLE_EXITS_HLT | 4550 KVM_X86_DISABLE_EXITS_CSTATE; 4551 4552 if (kvm_can_mwait_in_guest()) 4553 r |= KVM_X86_DISABLE_EXITS_MWAIT; 4554 } 4555 break; 4556 case KVM_CAP_X86_SMM: 4557 if (!IS_ENABLED(CONFIG_KVM_SMM)) 4558 break; 4559 4560 /* SMBASE is usually relocated above 1M on modern chipsets, 4561 * and SMM handlers might indeed rely on 4G segment limits, 4562 * so do not report SMM to be available if real mode is 4563 * emulated via vm86 mode. Still, do not go to great lengths 4564 * to avoid userspace's usage of the feature, because it is a 4565 * fringe case that is not enabled except via specific settings 4566 * of the module parameters. 4567 */ 4568 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 4569 break; 4570 case KVM_CAP_NR_VCPUS: 4571 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); 4572 break; 4573 case KVM_CAP_MAX_VCPUS: 4574 r = KVM_MAX_VCPUS; 4575 break; 4576 case KVM_CAP_MAX_VCPU_ID: 4577 r = KVM_MAX_VCPU_IDS; 4578 break; 4579 case KVM_CAP_PV_MMU: /* obsolete */ 4580 r = 0; 4581 break; 4582 case KVM_CAP_MCE: 4583 r = KVM_MAX_MCE_BANKS; 4584 break; 4585 case KVM_CAP_XCRS: 4586 r = boot_cpu_has(X86_FEATURE_XSAVE); 4587 break; 4588 case KVM_CAP_TSC_CONTROL: 4589 case KVM_CAP_VM_TSC_CONTROL: 4590 r = kvm_caps.has_tsc_control; 4591 break; 4592 case KVM_CAP_X2APIC_API: 4593 r = KVM_X2APIC_API_VALID_FLAGS; 4594 break; 4595 case KVM_CAP_NESTED_STATE: 4596 r = kvm_x86_ops.nested_ops->get_state ? 4597 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; 4598 break; 4599 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 4600 r = kvm_x86_ops.enable_l2_tlb_flush != NULL; 4601 break; 4602 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 4603 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; 4604 break; 4605 case KVM_CAP_SMALLER_MAXPHYADDR: 4606 r = (int) allow_smaller_maxphyaddr; 4607 break; 4608 case KVM_CAP_STEAL_TIME: 4609 r = sched_info_on(); 4610 break; 4611 case KVM_CAP_X86_BUS_LOCK_EXIT: 4612 if (kvm_caps.has_bus_lock_exit) 4613 r = KVM_BUS_LOCK_DETECTION_OFF | 4614 KVM_BUS_LOCK_DETECTION_EXIT; 4615 else 4616 r = 0; 4617 break; 4618 case KVM_CAP_XSAVE2: { 4619 r = xstate_required_size(kvm_get_filtered_xcr0(), false); 4620 if (r < sizeof(struct kvm_xsave)) 4621 r = sizeof(struct kvm_xsave); 4622 break; 4623 } 4624 case KVM_CAP_PMU_CAPABILITY: 4625 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; 4626 break; 4627 case KVM_CAP_DISABLE_QUIRKS2: 4628 r = KVM_X86_VALID_QUIRKS; 4629 break; 4630 case KVM_CAP_X86_NOTIFY_VMEXIT: 4631 r = kvm_caps.has_notify_vmexit; 4632 break; 4633 default: 4634 break; 4635 } 4636 return r; 4637 } 4638 4639 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) 4640 { 4641 void __user *uaddr = (void __user*)(unsigned long)attr->addr; 4642 4643 if ((u64)(unsigned long)uaddr != attr->addr) 4644 return ERR_PTR_USR(-EFAULT); 4645 return uaddr; 4646 } 4647 4648 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) 4649 { 4650 u64 __user *uaddr = kvm_get_attr_addr(attr); 4651 4652 if (attr->group) 4653 return -ENXIO; 4654 4655 if (IS_ERR(uaddr)) 4656 return PTR_ERR(uaddr); 4657 4658 switch (attr->attr) { 4659 case KVM_X86_XCOMP_GUEST_SUPP: 4660 if (put_user(kvm_caps.supported_xcr0, uaddr)) 4661 return -EFAULT; 4662 return 0; 4663 default: 4664 return -ENXIO; 4665 } 4666 } 4667 4668 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) 4669 { 4670 if (attr->group) 4671 return -ENXIO; 4672 4673 switch (attr->attr) { 4674 case KVM_X86_XCOMP_GUEST_SUPP: 4675 return 0; 4676 default: 4677 return -ENXIO; 4678 } 4679 } 4680 4681 long kvm_arch_dev_ioctl(struct file *filp, 4682 unsigned int ioctl, unsigned long arg) 4683 { 4684 void __user *argp = (void __user *)arg; 4685 long r; 4686 4687 switch (ioctl) { 4688 case KVM_GET_MSR_INDEX_LIST: { 4689 struct kvm_msr_list __user *user_msr_list = argp; 4690 struct kvm_msr_list msr_list; 4691 unsigned n; 4692 4693 r = -EFAULT; 4694 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4695 goto out; 4696 n = msr_list.nmsrs; 4697 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 4698 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4699 goto out; 4700 r = -E2BIG; 4701 if (n < msr_list.nmsrs) 4702 goto out; 4703 r = -EFAULT; 4704 if (copy_to_user(user_msr_list->indices, &msrs_to_save, 4705 num_msrs_to_save * sizeof(u32))) 4706 goto out; 4707 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, 4708 &emulated_msrs, 4709 num_emulated_msrs * sizeof(u32))) 4710 goto out; 4711 r = 0; 4712 break; 4713 } 4714 case KVM_GET_SUPPORTED_CPUID: 4715 case KVM_GET_EMULATED_CPUID: { 4716 struct kvm_cpuid2 __user *cpuid_arg = argp; 4717 struct kvm_cpuid2 cpuid; 4718 4719 r = -EFAULT; 4720 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 4721 goto out; 4722 4723 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, 4724 ioctl); 4725 if (r) 4726 goto out; 4727 4728 r = -EFAULT; 4729 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 4730 goto out; 4731 r = 0; 4732 break; 4733 } 4734 case KVM_X86_GET_MCE_CAP_SUPPORTED: 4735 r = -EFAULT; 4736 if (copy_to_user(argp, &kvm_caps.supported_mce_cap, 4737 sizeof(kvm_caps.supported_mce_cap))) 4738 goto out; 4739 r = 0; 4740 break; 4741 case KVM_GET_MSR_FEATURE_INDEX_LIST: { 4742 struct kvm_msr_list __user *user_msr_list = argp; 4743 struct kvm_msr_list msr_list; 4744 unsigned int n; 4745 4746 r = -EFAULT; 4747 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 4748 goto out; 4749 n = msr_list.nmsrs; 4750 msr_list.nmsrs = num_msr_based_features; 4751 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 4752 goto out; 4753 r = -E2BIG; 4754 if (n < msr_list.nmsrs) 4755 goto out; 4756 r = -EFAULT; 4757 if (copy_to_user(user_msr_list->indices, &msr_based_features, 4758 num_msr_based_features * sizeof(u32))) 4759 goto out; 4760 r = 0; 4761 break; 4762 } 4763 case KVM_GET_MSRS: 4764 r = msr_io(NULL, argp, do_get_msr_feature, 1); 4765 break; 4766 case KVM_GET_SUPPORTED_HV_CPUID: 4767 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); 4768 break; 4769 case KVM_GET_DEVICE_ATTR: { 4770 struct kvm_device_attr attr; 4771 r = -EFAULT; 4772 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4773 break; 4774 r = kvm_x86_dev_get_attr(&attr); 4775 break; 4776 } 4777 case KVM_HAS_DEVICE_ATTR: { 4778 struct kvm_device_attr attr; 4779 r = -EFAULT; 4780 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 4781 break; 4782 r = kvm_x86_dev_has_attr(&attr); 4783 break; 4784 } 4785 default: 4786 r = -EINVAL; 4787 break; 4788 } 4789 out: 4790 return r; 4791 } 4792 4793 static void wbinvd_ipi(void *garbage) 4794 { 4795 wbinvd(); 4796 } 4797 4798 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) 4799 { 4800 return kvm_arch_has_noncoherent_dma(vcpu->kvm); 4801 } 4802 4803 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 4804 { 4805 /* Address WBINVD may be executed by guest */ 4806 if (need_emulate_wbinvd(vcpu)) { 4807 if (static_call(kvm_x86_has_wbinvd_exit)()) 4808 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 4809 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 4810 smp_call_function_single(vcpu->cpu, 4811 wbinvd_ipi, NULL, 1); 4812 } 4813 4814 static_call(kvm_x86_vcpu_load)(vcpu, cpu); 4815 4816 /* Save host pkru register if supported */ 4817 vcpu->arch.host_pkru = read_pkru(); 4818 4819 /* Apply any externally detected TSC adjustments (due to suspend) */ 4820 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { 4821 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); 4822 vcpu->arch.tsc_offset_adjustment = 0; 4823 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 4824 } 4825 4826 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { 4827 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : 4828 rdtsc() - vcpu->arch.last_host_tsc; 4829 if (tsc_delta < 0) 4830 mark_tsc_unstable("KVM discovered backwards TSC"); 4831 4832 if (kvm_check_tsc_unstable()) { 4833 u64 offset = kvm_compute_l1_tsc_offset(vcpu, 4834 vcpu->arch.last_guest_tsc); 4835 kvm_vcpu_write_tsc_offset(vcpu, offset); 4836 vcpu->arch.tsc_catchup = 1; 4837 } 4838 4839 if (kvm_lapic_hv_timer_in_use(vcpu)) 4840 kvm_lapic_restart_hv_timer(vcpu); 4841 4842 /* 4843 * On a host with synchronized TSC, there is no need to update 4844 * kvmclock on vcpu->cpu migration 4845 */ 4846 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) 4847 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); 4848 if (vcpu->cpu != cpu) 4849 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); 4850 vcpu->cpu = cpu; 4851 } 4852 4853 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 4854 } 4855 4856 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) 4857 { 4858 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; 4859 struct kvm_steal_time __user *st; 4860 struct kvm_memslots *slots; 4861 static const u8 preempted = KVM_VCPU_PREEMPTED; 4862 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; 4863 4864 /* 4865 * The vCPU can be marked preempted if and only if the VM-Exit was on 4866 * an instruction boundary and will not trigger guest emulation of any 4867 * kind (see vcpu_run). Vendor specific code controls (conservatively) 4868 * when this is true, for example allowing the vCPU to be marked 4869 * preempted if and only if the VM-Exit was due to a host interrupt. 4870 */ 4871 if (!vcpu->arch.at_instruction_boundary) { 4872 vcpu->stat.preemption_other++; 4873 return; 4874 } 4875 4876 vcpu->stat.preemption_reported++; 4877 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) 4878 return; 4879 4880 if (vcpu->arch.st.preempted) 4881 return; 4882 4883 /* This happens on process exit */ 4884 if (unlikely(current->mm != vcpu->kvm->mm)) 4885 return; 4886 4887 slots = kvm_memslots(vcpu->kvm); 4888 4889 if (unlikely(slots->generation != ghc->generation || 4890 gpa != ghc->gpa || 4891 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) 4892 return; 4893 4894 st = (struct kvm_steal_time __user *)ghc->hva; 4895 BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); 4896 4897 if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) 4898 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 4899 4900 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 4901 } 4902 4903 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4904 { 4905 int idx; 4906 4907 if (vcpu->preempted) { 4908 if (!vcpu->arch.guest_state_protected) 4909 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4910 4911 /* 4912 * Take the srcu lock as memslots will be accessed to check the gfn 4913 * cache generation against the memslots generation. 4914 */ 4915 idx = srcu_read_lock(&vcpu->kvm->srcu); 4916 if (kvm_xen_msr_enabled(vcpu->kvm)) 4917 kvm_xen_runstate_set_preempted(vcpu); 4918 else 4919 kvm_steal_time_set_preempted(vcpu); 4920 srcu_read_unlock(&vcpu->kvm->srcu, idx); 4921 } 4922 4923 static_call(kvm_x86_vcpu_put)(vcpu); 4924 vcpu->arch.last_host_tsc = rdtsc(); 4925 } 4926 4927 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 4928 struct kvm_lapic_state *s) 4929 { 4930 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 4931 4932 return kvm_apic_get_state(vcpu, s); 4933 } 4934 4935 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, 4936 struct kvm_lapic_state *s) 4937 { 4938 int r; 4939 4940 r = kvm_apic_set_state(vcpu, s); 4941 if (r) 4942 return r; 4943 update_cr8_intercept(vcpu); 4944 4945 return 0; 4946 } 4947 4948 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) 4949 { 4950 /* 4951 * We can accept userspace's request for interrupt injection 4952 * as long as we have a place to store the interrupt number. 4953 * The actual injection will happen when the CPU is able to 4954 * deliver the interrupt. 4955 */ 4956 if (kvm_cpu_has_extint(vcpu)) 4957 return false; 4958 4959 /* Acknowledging ExtINT does not happen if LINT0 is masked. */ 4960 return (!lapic_in_kernel(vcpu) || 4961 kvm_apic_accept_pic_intr(vcpu)); 4962 } 4963 4964 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) 4965 { 4966 /* 4967 * Do not cause an interrupt window exit if an exception 4968 * is pending or an event needs reinjection; userspace 4969 * might want to inject the interrupt manually using KVM_SET_REGS 4970 * or KVM_SET_SREGS. For that to work, we must be at an 4971 * instruction boundary and with no events half-injected. 4972 */ 4973 return (kvm_arch_interrupt_allowed(vcpu) && 4974 kvm_cpu_accept_dm_intr(vcpu) && 4975 !kvm_event_needs_reinjection(vcpu) && 4976 !kvm_is_exception_pending(vcpu)); 4977 } 4978 4979 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 4980 struct kvm_interrupt *irq) 4981 { 4982 if (irq->irq >= KVM_NR_INTERRUPTS) 4983 return -EINVAL; 4984 4985 if (!irqchip_in_kernel(vcpu->kvm)) { 4986 kvm_queue_interrupt(vcpu, irq->irq, false); 4987 kvm_make_request(KVM_REQ_EVENT, vcpu); 4988 return 0; 4989 } 4990 4991 /* 4992 * With in-kernel LAPIC, we only use this to inject EXTINT, so 4993 * fail for in-kernel 8259. 4994 */ 4995 if (pic_in_kernel(vcpu->kvm)) 4996 return -ENXIO; 4997 4998 if (vcpu->arch.pending_external_vector != -1) 4999 return -EEXIST; 5000 5001 vcpu->arch.pending_external_vector = irq->irq; 5002 kvm_make_request(KVM_REQ_EVENT, vcpu); 5003 return 0; 5004 } 5005 5006 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) 5007 { 5008 kvm_inject_nmi(vcpu); 5009 5010 return 0; 5011 } 5012 5013 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, 5014 struct kvm_tpr_access_ctl *tac) 5015 { 5016 if (tac->flags) 5017 return -EINVAL; 5018 vcpu->arch.tpr_access_reporting = !!tac->enabled; 5019 return 0; 5020 } 5021 5022 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, 5023 u64 mcg_cap) 5024 { 5025 int r; 5026 unsigned bank_num = mcg_cap & 0xff, bank; 5027 5028 r = -EINVAL; 5029 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS) 5030 goto out; 5031 if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000)) 5032 goto out; 5033 r = 0; 5034 vcpu->arch.mcg_cap = mcg_cap; 5035 /* Init IA32_MCG_CTL to all 1s */ 5036 if (mcg_cap & MCG_CTL_P) 5037 vcpu->arch.mcg_ctl = ~(u64)0; 5038 /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */ 5039 for (bank = 0; bank < bank_num; bank++) { 5040 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 5041 if (mcg_cap & MCG_CMCI_P) 5042 vcpu->arch.mci_ctl2_banks[bank] = 0; 5043 } 5044 5045 kvm_apic_after_set_mcg_cap(vcpu); 5046 5047 static_call(kvm_x86_setup_mce)(vcpu); 5048 out: 5049 return r; 5050 } 5051 5052 /* 5053 * Validate this is an UCNA (uncorrectable no action) error by checking the 5054 * MCG_STATUS and MCi_STATUS registers: 5055 * - none of the bits for Machine Check Exceptions are set 5056 * - both the VAL (valid) and UC (uncorrectable) bits are set 5057 * MCI_STATUS_PCC - Processor Context Corrupted 5058 * MCI_STATUS_S - Signaled as a Machine Check Exception 5059 * MCI_STATUS_AR - Software recoverable Action Required 5060 */ 5061 static bool is_ucna(struct kvm_x86_mce *mce) 5062 { 5063 return !mce->mcg_status && 5064 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) && 5065 (mce->status & MCI_STATUS_VAL) && 5066 (mce->status & MCI_STATUS_UC); 5067 } 5068 5069 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) 5070 { 5071 u64 mcg_cap = vcpu->arch.mcg_cap; 5072 5073 banks[1] = mce->status; 5074 banks[2] = mce->addr; 5075 banks[3] = mce->misc; 5076 vcpu->arch.mcg_status = mce->mcg_status; 5077 5078 if (!(mcg_cap & MCG_CMCI_P) || 5079 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) 5080 return 0; 5081 5082 if (lapic_in_kernel(vcpu)) 5083 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); 5084 5085 return 0; 5086 } 5087 5088 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, 5089 struct kvm_x86_mce *mce) 5090 { 5091 u64 mcg_cap = vcpu->arch.mcg_cap; 5092 unsigned bank_num = mcg_cap & 0xff; 5093 u64 *banks = vcpu->arch.mce_banks; 5094 5095 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) 5096 return -EINVAL; 5097 5098 banks += array_index_nospec(4 * mce->bank, 4 * bank_num); 5099 5100 if (is_ucna(mce)) 5101 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); 5102 5103 /* 5104 * if IA32_MCG_CTL is not all 1s, the uncorrected error 5105 * reporting is disabled 5106 */ 5107 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && 5108 vcpu->arch.mcg_ctl != ~(u64)0) 5109 return 0; 5110 /* 5111 * if IA32_MCi_CTL is not all 1s, the uncorrected error 5112 * reporting is disabled for the bank 5113 */ 5114 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) 5115 return 0; 5116 if (mce->status & MCI_STATUS_UC) { 5117 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || 5118 !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) { 5119 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5120 return 0; 5121 } 5122 if (banks[1] & MCI_STATUS_VAL) 5123 mce->status |= MCI_STATUS_OVER; 5124 banks[2] = mce->addr; 5125 banks[3] = mce->misc; 5126 vcpu->arch.mcg_status = mce->mcg_status; 5127 banks[1] = mce->status; 5128 kvm_queue_exception(vcpu, MC_VECTOR); 5129 } else if (!(banks[1] & MCI_STATUS_VAL) 5130 || !(banks[1] & MCI_STATUS_UC)) { 5131 if (banks[1] & MCI_STATUS_VAL) 5132 mce->status |= MCI_STATUS_OVER; 5133 banks[2] = mce->addr; 5134 banks[3] = mce->misc; 5135 banks[1] = mce->status; 5136 } else 5137 banks[1] |= MCI_STATUS_OVER; 5138 return 0; 5139 } 5140 5141 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, 5142 struct kvm_vcpu_events *events) 5143 { 5144 struct kvm_queued_exception *ex; 5145 5146 process_nmi(vcpu); 5147 5148 #ifdef CONFIG_KVM_SMM 5149 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 5150 process_smi(vcpu); 5151 #endif 5152 5153 /* 5154 * KVM's ABI only allows for one exception to be migrated. Luckily, 5155 * the only time there can be two queued exceptions is if there's a 5156 * non-exiting _injected_ exception, and a pending exiting exception. 5157 * In that case, ignore the VM-Exiting exception as it's an extension 5158 * of the injected exception. 5159 */ 5160 if (vcpu->arch.exception_vmexit.pending && 5161 !vcpu->arch.exception.pending && 5162 !vcpu->arch.exception.injected) 5163 ex = &vcpu->arch.exception_vmexit; 5164 else 5165 ex = &vcpu->arch.exception; 5166 5167 /* 5168 * In guest mode, payload delivery should be deferred if the exception 5169 * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1 5170 * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability, 5171 * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not 5172 * propagate the payload and so it cannot be safely deferred. Deliver 5173 * the payload if the capability hasn't been requested. 5174 */ 5175 if (!vcpu->kvm->arch.exception_payload_enabled && 5176 ex->pending && ex->has_payload) 5177 kvm_deliver_exception_payload(vcpu, ex); 5178 5179 memset(events, 0, sizeof(*events)); 5180 5181 /* 5182 * The API doesn't provide the instruction length for software 5183 * exceptions, so don't report them. As long as the guest RIP 5184 * isn't advanced, we should expect to encounter the exception 5185 * again. 5186 */ 5187 if (!kvm_exception_is_soft(ex->vector)) { 5188 events->exception.injected = ex->injected; 5189 events->exception.pending = ex->pending; 5190 /* 5191 * For ABI compatibility, deliberately conflate 5192 * pending and injected exceptions when 5193 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. 5194 */ 5195 if (!vcpu->kvm->arch.exception_payload_enabled) 5196 events->exception.injected |= ex->pending; 5197 } 5198 events->exception.nr = ex->vector; 5199 events->exception.has_error_code = ex->has_error_code; 5200 events->exception.error_code = ex->error_code; 5201 events->exception_has_payload = ex->has_payload; 5202 events->exception_payload = ex->payload; 5203 5204 events->interrupt.injected = 5205 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 5206 events->interrupt.nr = vcpu->arch.interrupt.nr; 5207 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 5208 5209 events->nmi.injected = vcpu->arch.nmi_injected; 5210 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); 5211 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 5212 5213 /* events->sipi_vector is never valid when reporting to user space */ 5214 5215 #ifdef CONFIG_KVM_SMM 5216 events->smi.smm = is_smm(vcpu); 5217 events->smi.pending = vcpu->arch.smi_pending; 5218 events->smi.smm_inside_nmi = 5219 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); 5220 #endif 5221 events->smi.latched_init = kvm_lapic_latched_init(vcpu); 5222 5223 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 5224 | KVM_VCPUEVENT_VALID_SHADOW 5225 | KVM_VCPUEVENT_VALID_SMM); 5226 if (vcpu->kvm->arch.exception_payload_enabled) 5227 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; 5228 if (vcpu->kvm->arch.triple_fault_event) { 5229 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5230 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; 5231 } 5232 } 5233 5234 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 5235 struct kvm_vcpu_events *events) 5236 { 5237 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING 5238 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 5239 | KVM_VCPUEVENT_VALID_SHADOW 5240 | KVM_VCPUEVENT_VALID_SMM 5241 | KVM_VCPUEVENT_VALID_PAYLOAD 5242 | KVM_VCPUEVENT_VALID_TRIPLE_FAULT)) 5243 return -EINVAL; 5244 5245 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { 5246 if (!vcpu->kvm->arch.exception_payload_enabled) 5247 return -EINVAL; 5248 if (events->exception.pending) 5249 events->exception.injected = 0; 5250 else 5251 events->exception_has_payload = 0; 5252 } else { 5253 events->exception.pending = 0; 5254 events->exception_has_payload = 0; 5255 } 5256 5257 if ((events->exception.injected || events->exception.pending) && 5258 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) 5259 return -EINVAL; 5260 5261 /* INITs are latched while in SMM */ 5262 if (events->flags & KVM_VCPUEVENT_VALID_SMM && 5263 (events->smi.smm || events->smi.pending) && 5264 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) 5265 return -EINVAL; 5266 5267 process_nmi(vcpu); 5268 5269 /* 5270 * Flag that userspace is stuffing an exception, the next KVM_RUN will 5271 * morph the exception to a VM-Exit if appropriate. Do this only for 5272 * pending exceptions, already-injected exceptions are not subject to 5273 * intercpetion. Note, userspace that conflates pending and injected 5274 * is hosed, and will incorrectly convert an injected exception into a 5275 * pending exception, which in turn may cause a spurious VM-Exit. 5276 */ 5277 vcpu->arch.exception_from_userspace = events->exception.pending; 5278 5279 vcpu->arch.exception_vmexit.pending = false; 5280 5281 vcpu->arch.exception.injected = events->exception.injected; 5282 vcpu->arch.exception.pending = events->exception.pending; 5283 vcpu->arch.exception.vector = events->exception.nr; 5284 vcpu->arch.exception.has_error_code = events->exception.has_error_code; 5285 vcpu->arch.exception.error_code = events->exception.error_code; 5286 vcpu->arch.exception.has_payload = events->exception_has_payload; 5287 vcpu->arch.exception.payload = events->exception_payload; 5288 5289 vcpu->arch.interrupt.injected = events->interrupt.injected; 5290 vcpu->arch.interrupt.nr = events->interrupt.nr; 5291 vcpu->arch.interrupt.soft = events->interrupt.soft; 5292 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 5293 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 5294 events->interrupt.shadow); 5295 5296 vcpu->arch.nmi_injected = events->nmi.injected; 5297 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { 5298 vcpu->arch.nmi_pending = 0; 5299 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); 5300 kvm_make_request(KVM_REQ_NMI, vcpu); 5301 } 5302 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 5303 5304 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 5305 lapic_in_kernel(vcpu)) 5306 vcpu->arch.apic->sipi_vector = events->sipi_vector; 5307 5308 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 5309 #ifdef CONFIG_KVM_SMM 5310 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { 5311 kvm_leave_nested(vcpu); 5312 kvm_smm_changed(vcpu, events->smi.smm); 5313 } 5314 5315 vcpu->arch.smi_pending = events->smi.pending; 5316 5317 if (events->smi.smm) { 5318 if (events->smi.smm_inside_nmi) 5319 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 5320 else 5321 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; 5322 } 5323 5324 #else 5325 if (events->smi.smm || events->smi.pending || 5326 events->smi.smm_inside_nmi) 5327 return -EINVAL; 5328 #endif 5329 5330 if (lapic_in_kernel(vcpu)) { 5331 if (events->smi.latched_init) 5332 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5333 else 5334 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); 5335 } 5336 } 5337 5338 if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { 5339 if (!vcpu->kvm->arch.triple_fault_event) 5340 return -EINVAL; 5341 if (events->triple_fault.pending) 5342 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5343 else 5344 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); 5345 } 5346 5347 kvm_make_request(KVM_REQ_EVENT, vcpu); 5348 5349 return 0; 5350 } 5351 5352 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, 5353 struct kvm_debugregs *dbgregs) 5354 { 5355 unsigned long val; 5356 5357 memset(dbgregs, 0, sizeof(*dbgregs)); 5358 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); 5359 kvm_get_dr(vcpu, 6, &val); 5360 dbgregs->dr6 = val; 5361 dbgregs->dr7 = vcpu->arch.dr7; 5362 } 5363 5364 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 5365 struct kvm_debugregs *dbgregs) 5366 { 5367 if (dbgregs->flags) 5368 return -EINVAL; 5369 5370 if (!kvm_dr6_valid(dbgregs->dr6)) 5371 return -EINVAL; 5372 if (!kvm_dr7_valid(dbgregs->dr7)) 5373 return -EINVAL; 5374 5375 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); 5376 kvm_update_dr0123(vcpu); 5377 vcpu->arch.dr6 = dbgregs->dr6; 5378 vcpu->arch.dr7 = dbgregs->dr7; 5379 kvm_update_dr7(vcpu); 5380 5381 return 0; 5382 } 5383 5384 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5385 struct kvm_xsave *guest_xsave) 5386 { 5387 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5388 return; 5389 5390 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5391 guest_xsave->region, 5392 sizeof(guest_xsave->region), 5393 vcpu->arch.pkru); 5394 } 5395 5396 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, 5397 u8 *state, unsigned int size) 5398 { 5399 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5400 return; 5401 5402 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, 5403 state, size, vcpu->arch.pkru); 5404 } 5405 5406 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, 5407 struct kvm_xsave *guest_xsave) 5408 { 5409 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 5410 return 0; 5411 5412 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, 5413 guest_xsave->region, 5414 kvm_caps.supported_xcr0, 5415 &vcpu->arch.pkru); 5416 } 5417 5418 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, 5419 struct kvm_xcrs *guest_xcrs) 5420 { 5421 if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 5422 guest_xcrs->nr_xcrs = 0; 5423 return; 5424 } 5425 5426 guest_xcrs->nr_xcrs = 1; 5427 guest_xcrs->flags = 0; 5428 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; 5429 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; 5430 } 5431 5432 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, 5433 struct kvm_xcrs *guest_xcrs) 5434 { 5435 int i, r = 0; 5436 5437 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 5438 return -EINVAL; 5439 5440 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) 5441 return -EINVAL; 5442 5443 for (i = 0; i < guest_xcrs->nr_xcrs; i++) 5444 /* Only support XCR0 currently */ 5445 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { 5446 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, 5447 guest_xcrs->xcrs[i].value); 5448 break; 5449 } 5450 if (r) 5451 r = -EINVAL; 5452 return r; 5453 } 5454 5455 /* 5456 * kvm_set_guest_paused() indicates to the guest kernel that it has been 5457 * stopped by the hypervisor. This function will be called from the host only. 5458 * EINVAL is returned when the host attempts to set the flag for a guest that 5459 * does not support pv clocks. 5460 */ 5461 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 5462 { 5463 if (!vcpu->arch.pv_time.active) 5464 return -EINVAL; 5465 vcpu->arch.pvclock_set_guest_stopped_request = true; 5466 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 5467 return 0; 5468 } 5469 5470 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, 5471 struct kvm_device_attr *attr) 5472 { 5473 int r; 5474 5475 switch (attr->attr) { 5476 case KVM_VCPU_TSC_OFFSET: 5477 r = 0; 5478 break; 5479 default: 5480 r = -ENXIO; 5481 } 5482 5483 return r; 5484 } 5485 5486 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, 5487 struct kvm_device_attr *attr) 5488 { 5489 u64 __user *uaddr = kvm_get_attr_addr(attr); 5490 int r; 5491 5492 if (IS_ERR(uaddr)) 5493 return PTR_ERR(uaddr); 5494 5495 switch (attr->attr) { 5496 case KVM_VCPU_TSC_OFFSET: 5497 r = -EFAULT; 5498 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) 5499 break; 5500 r = 0; 5501 break; 5502 default: 5503 r = -ENXIO; 5504 } 5505 5506 return r; 5507 } 5508 5509 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, 5510 struct kvm_device_attr *attr) 5511 { 5512 u64 __user *uaddr = kvm_get_attr_addr(attr); 5513 struct kvm *kvm = vcpu->kvm; 5514 int r; 5515 5516 if (IS_ERR(uaddr)) 5517 return PTR_ERR(uaddr); 5518 5519 switch (attr->attr) { 5520 case KVM_VCPU_TSC_OFFSET: { 5521 u64 offset, tsc, ns; 5522 unsigned long flags; 5523 bool matched; 5524 5525 r = -EFAULT; 5526 if (get_user(offset, uaddr)) 5527 break; 5528 5529 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 5530 5531 matched = (vcpu->arch.virtual_tsc_khz && 5532 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && 5533 kvm->arch.last_tsc_offset == offset); 5534 5535 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; 5536 ns = get_kvmclock_base_ns(); 5537 5538 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); 5539 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 5540 5541 r = 0; 5542 break; 5543 } 5544 default: 5545 r = -ENXIO; 5546 } 5547 5548 return r; 5549 } 5550 5551 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, 5552 unsigned int ioctl, 5553 void __user *argp) 5554 { 5555 struct kvm_device_attr attr; 5556 int r; 5557 5558 if (copy_from_user(&attr, argp, sizeof(attr))) 5559 return -EFAULT; 5560 5561 if (attr.group != KVM_VCPU_TSC_CTRL) 5562 return -ENXIO; 5563 5564 switch (ioctl) { 5565 case KVM_HAS_DEVICE_ATTR: 5566 r = kvm_arch_tsc_has_attr(vcpu, &attr); 5567 break; 5568 case KVM_GET_DEVICE_ATTR: 5569 r = kvm_arch_tsc_get_attr(vcpu, &attr); 5570 break; 5571 case KVM_SET_DEVICE_ATTR: 5572 r = kvm_arch_tsc_set_attr(vcpu, &attr); 5573 break; 5574 } 5575 5576 return r; 5577 } 5578 5579 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 5580 struct kvm_enable_cap *cap) 5581 { 5582 int r; 5583 uint16_t vmcs_version; 5584 void __user *user_ptr; 5585 5586 if (cap->flags) 5587 return -EINVAL; 5588 5589 switch (cap->cap) { 5590 case KVM_CAP_HYPERV_SYNIC2: 5591 if (cap->args[0]) 5592 return -EINVAL; 5593 fallthrough; 5594 5595 case KVM_CAP_HYPERV_SYNIC: 5596 if (!irqchip_in_kernel(vcpu->kvm)) 5597 return -EINVAL; 5598 return kvm_hv_activate_synic(vcpu, cap->cap == 5599 KVM_CAP_HYPERV_SYNIC2); 5600 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: 5601 if (!kvm_x86_ops.nested_ops->enable_evmcs) 5602 return -ENOTTY; 5603 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); 5604 if (!r) { 5605 user_ptr = (void __user *)(uintptr_t)cap->args[0]; 5606 if (copy_to_user(user_ptr, &vmcs_version, 5607 sizeof(vmcs_version))) 5608 r = -EFAULT; 5609 } 5610 return r; 5611 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: 5612 if (!kvm_x86_ops.enable_l2_tlb_flush) 5613 return -ENOTTY; 5614 5615 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); 5616 5617 case KVM_CAP_HYPERV_ENFORCE_CPUID: 5618 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); 5619 5620 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 5621 vcpu->arch.pv_cpuid.enforce = cap->args[0]; 5622 if (vcpu->arch.pv_cpuid.enforce) 5623 kvm_update_pv_runtime(vcpu); 5624 5625 return 0; 5626 default: 5627 return -EINVAL; 5628 } 5629 } 5630 5631 long kvm_arch_vcpu_ioctl(struct file *filp, 5632 unsigned int ioctl, unsigned long arg) 5633 { 5634 struct kvm_vcpu *vcpu = filp->private_data; 5635 void __user *argp = (void __user *)arg; 5636 int r; 5637 union { 5638 struct kvm_sregs2 *sregs2; 5639 struct kvm_lapic_state *lapic; 5640 struct kvm_xsave *xsave; 5641 struct kvm_xcrs *xcrs; 5642 void *buffer; 5643 } u; 5644 5645 vcpu_load(vcpu); 5646 5647 u.buffer = NULL; 5648 switch (ioctl) { 5649 case KVM_GET_LAPIC: { 5650 r = -EINVAL; 5651 if (!lapic_in_kernel(vcpu)) 5652 goto out; 5653 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), 5654 GFP_KERNEL_ACCOUNT); 5655 5656 r = -ENOMEM; 5657 if (!u.lapic) 5658 goto out; 5659 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); 5660 if (r) 5661 goto out; 5662 r = -EFAULT; 5663 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) 5664 goto out; 5665 r = 0; 5666 break; 5667 } 5668 case KVM_SET_LAPIC: { 5669 r = -EINVAL; 5670 if (!lapic_in_kernel(vcpu)) 5671 goto out; 5672 u.lapic = memdup_user(argp, sizeof(*u.lapic)); 5673 if (IS_ERR(u.lapic)) { 5674 r = PTR_ERR(u.lapic); 5675 goto out_nofree; 5676 } 5677 5678 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); 5679 break; 5680 } 5681 case KVM_INTERRUPT: { 5682 struct kvm_interrupt irq; 5683 5684 r = -EFAULT; 5685 if (copy_from_user(&irq, argp, sizeof(irq))) 5686 goto out; 5687 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 5688 break; 5689 } 5690 case KVM_NMI: { 5691 r = kvm_vcpu_ioctl_nmi(vcpu); 5692 break; 5693 } 5694 case KVM_SMI: { 5695 r = kvm_inject_smi(vcpu); 5696 break; 5697 } 5698 case KVM_SET_CPUID: { 5699 struct kvm_cpuid __user *cpuid_arg = argp; 5700 struct kvm_cpuid cpuid; 5701 5702 r = -EFAULT; 5703 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5704 goto out; 5705 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 5706 break; 5707 } 5708 case KVM_SET_CPUID2: { 5709 struct kvm_cpuid2 __user *cpuid_arg = argp; 5710 struct kvm_cpuid2 cpuid; 5711 5712 r = -EFAULT; 5713 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5714 goto out; 5715 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 5716 cpuid_arg->entries); 5717 break; 5718 } 5719 case KVM_GET_CPUID2: { 5720 struct kvm_cpuid2 __user *cpuid_arg = argp; 5721 struct kvm_cpuid2 cpuid; 5722 5723 r = -EFAULT; 5724 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 5725 goto out; 5726 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 5727 cpuid_arg->entries); 5728 if (r) 5729 goto out; 5730 r = -EFAULT; 5731 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 5732 goto out; 5733 r = 0; 5734 break; 5735 } 5736 case KVM_GET_MSRS: { 5737 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5738 r = msr_io(vcpu, argp, do_get_msr, 1); 5739 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5740 break; 5741 } 5742 case KVM_SET_MSRS: { 5743 int idx = srcu_read_lock(&vcpu->kvm->srcu); 5744 r = msr_io(vcpu, argp, do_set_msr, 0); 5745 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5746 break; 5747 } 5748 case KVM_TPR_ACCESS_REPORTING: { 5749 struct kvm_tpr_access_ctl tac; 5750 5751 r = -EFAULT; 5752 if (copy_from_user(&tac, argp, sizeof(tac))) 5753 goto out; 5754 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 5755 if (r) 5756 goto out; 5757 r = -EFAULT; 5758 if (copy_to_user(argp, &tac, sizeof(tac))) 5759 goto out; 5760 r = 0; 5761 break; 5762 }; 5763 case KVM_SET_VAPIC_ADDR: { 5764 struct kvm_vapic_addr va; 5765 int idx; 5766 5767 r = -EINVAL; 5768 if (!lapic_in_kernel(vcpu)) 5769 goto out; 5770 r = -EFAULT; 5771 if (copy_from_user(&va, argp, sizeof(va))) 5772 goto out; 5773 idx = srcu_read_lock(&vcpu->kvm->srcu); 5774 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 5775 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5776 break; 5777 } 5778 case KVM_X86_SETUP_MCE: { 5779 u64 mcg_cap; 5780 5781 r = -EFAULT; 5782 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 5783 goto out; 5784 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 5785 break; 5786 } 5787 case KVM_X86_SET_MCE: { 5788 struct kvm_x86_mce mce; 5789 5790 r = -EFAULT; 5791 if (copy_from_user(&mce, argp, sizeof(mce))) 5792 goto out; 5793 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 5794 break; 5795 } 5796 case KVM_GET_VCPU_EVENTS: { 5797 struct kvm_vcpu_events events; 5798 5799 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); 5800 5801 r = -EFAULT; 5802 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) 5803 break; 5804 r = 0; 5805 break; 5806 } 5807 case KVM_SET_VCPU_EVENTS: { 5808 struct kvm_vcpu_events events; 5809 5810 r = -EFAULT; 5811 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) 5812 break; 5813 5814 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); 5815 break; 5816 } 5817 case KVM_GET_DEBUGREGS: { 5818 struct kvm_debugregs dbgregs; 5819 5820 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); 5821 5822 r = -EFAULT; 5823 if (copy_to_user(argp, &dbgregs, 5824 sizeof(struct kvm_debugregs))) 5825 break; 5826 r = 0; 5827 break; 5828 } 5829 case KVM_SET_DEBUGREGS: { 5830 struct kvm_debugregs dbgregs; 5831 5832 r = -EFAULT; 5833 if (copy_from_user(&dbgregs, argp, 5834 sizeof(struct kvm_debugregs))) 5835 break; 5836 5837 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); 5838 break; 5839 } 5840 case KVM_GET_XSAVE: { 5841 r = -EINVAL; 5842 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) 5843 break; 5844 5845 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT); 5846 r = -ENOMEM; 5847 if (!u.xsave) 5848 break; 5849 5850 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); 5851 5852 r = -EFAULT; 5853 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) 5854 break; 5855 r = 0; 5856 break; 5857 } 5858 case KVM_SET_XSAVE: { 5859 int size = vcpu->arch.guest_fpu.uabi_size; 5860 5861 u.xsave = memdup_user(argp, size); 5862 if (IS_ERR(u.xsave)) { 5863 r = PTR_ERR(u.xsave); 5864 goto out_nofree; 5865 } 5866 5867 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); 5868 break; 5869 } 5870 5871 case KVM_GET_XSAVE2: { 5872 int size = vcpu->arch.guest_fpu.uabi_size; 5873 5874 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT); 5875 r = -ENOMEM; 5876 if (!u.xsave) 5877 break; 5878 5879 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); 5880 5881 r = -EFAULT; 5882 if (copy_to_user(argp, u.xsave, size)) 5883 break; 5884 5885 r = 0; 5886 break; 5887 } 5888 5889 case KVM_GET_XCRS: { 5890 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT); 5891 r = -ENOMEM; 5892 if (!u.xcrs) 5893 break; 5894 5895 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); 5896 5897 r = -EFAULT; 5898 if (copy_to_user(argp, u.xcrs, 5899 sizeof(struct kvm_xcrs))) 5900 break; 5901 r = 0; 5902 break; 5903 } 5904 case KVM_SET_XCRS: { 5905 u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); 5906 if (IS_ERR(u.xcrs)) { 5907 r = PTR_ERR(u.xcrs); 5908 goto out_nofree; 5909 } 5910 5911 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); 5912 break; 5913 } 5914 case KVM_SET_TSC_KHZ: { 5915 u32 user_tsc_khz; 5916 5917 r = -EINVAL; 5918 user_tsc_khz = (u32)arg; 5919 5920 if (kvm_caps.has_tsc_control && 5921 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 5922 goto out; 5923 5924 if (user_tsc_khz == 0) 5925 user_tsc_khz = tsc_khz; 5926 5927 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) 5928 r = 0; 5929 5930 goto out; 5931 } 5932 case KVM_GET_TSC_KHZ: { 5933 r = vcpu->arch.virtual_tsc_khz; 5934 goto out; 5935 } 5936 case KVM_KVMCLOCK_CTRL: { 5937 r = kvm_set_guest_paused(vcpu); 5938 goto out; 5939 } 5940 case KVM_ENABLE_CAP: { 5941 struct kvm_enable_cap cap; 5942 5943 r = -EFAULT; 5944 if (copy_from_user(&cap, argp, sizeof(cap))) 5945 goto out; 5946 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 5947 break; 5948 } 5949 case KVM_GET_NESTED_STATE: { 5950 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5951 u32 user_data_size; 5952 5953 r = -EINVAL; 5954 if (!kvm_x86_ops.nested_ops->get_state) 5955 break; 5956 5957 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 5958 r = -EFAULT; 5959 if (get_user(user_data_size, &user_kvm_nested_state->size)) 5960 break; 5961 5962 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, 5963 user_data_size); 5964 if (r < 0) 5965 break; 5966 5967 if (r > user_data_size) { 5968 if (put_user(r, &user_kvm_nested_state->size)) 5969 r = -EFAULT; 5970 else 5971 r = -E2BIG; 5972 break; 5973 } 5974 5975 r = 0; 5976 break; 5977 } 5978 case KVM_SET_NESTED_STATE: { 5979 struct kvm_nested_state __user *user_kvm_nested_state = argp; 5980 struct kvm_nested_state kvm_state; 5981 int idx; 5982 5983 r = -EINVAL; 5984 if (!kvm_x86_ops.nested_ops->set_state) 5985 break; 5986 5987 r = -EFAULT; 5988 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 5989 break; 5990 5991 r = -EINVAL; 5992 if (kvm_state.size < sizeof(kvm_state)) 5993 break; 5994 5995 if (kvm_state.flags & 5996 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE 5997 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING 5998 | KVM_STATE_NESTED_GIF_SET)) 5999 break; 6000 6001 /* nested_run_pending implies guest_mode. */ 6002 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) 6003 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) 6004 break; 6005 6006 idx = srcu_read_lock(&vcpu->kvm->srcu); 6007 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); 6008 srcu_read_unlock(&vcpu->kvm->srcu, idx); 6009 break; 6010 } 6011 case KVM_GET_SUPPORTED_HV_CPUID: 6012 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); 6013 break; 6014 #ifdef CONFIG_KVM_XEN 6015 case KVM_XEN_VCPU_GET_ATTR: { 6016 struct kvm_xen_vcpu_attr xva; 6017 6018 r = -EFAULT; 6019 if (copy_from_user(&xva, argp, sizeof(xva))) 6020 goto out; 6021 r = kvm_xen_vcpu_get_attr(vcpu, &xva); 6022 if (!r && copy_to_user(argp, &xva, sizeof(xva))) 6023 r = -EFAULT; 6024 break; 6025 } 6026 case KVM_XEN_VCPU_SET_ATTR: { 6027 struct kvm_xen_vcpu_attr xva; 6028 6029 r = -EFAULT; 6030 if (copy_from_user(&xva, argp, sizeof(xva))) 6031 goto out; 6032 r = kvm_xen_vcpu_set_attr(vcpu, &xva); 6033 break; 6034 } 6035 #endif 6036 case KVM_GET_SREGS2: { 6037 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL); 6038 r = -ENOMEM; 6039 if (!u.sregs2) 6040 goto out; 6041 __get_sregs2(vcpu, u.sregs2); 6042 r = -EFAULT; 6043 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2))) 6044 goto out; 6045 r = 0; 6046 break; 6047 } 6048 case KVM_SET_SREGS2: { 6049 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2)); 6050 if (IS_ERR(u.sregs2)) { 6051 r = PTR_ERR(u.sregs2); 6052 u.sregs2 = NULL; 6053 goto out; 6054 } 6055 r = __set_sregs2(vcpu, u.sregs2); 6056 break; 6057 } 6058 case KVM_HAS_DEVICE_ATTR: 6059 case KVM_GET_DEVICE_ATTR: 6060 case KVM_SET_DEVICE_ATTR: 6061 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); 6062 break; 6063 default: 6064 r = -EINVAL; 6065 } 6066 out: 6067 kfree(u.buffer); 6068 out_nofree: 6069 vcpu_put(vcpu); 6070 return r; 6071 } 6072 6073 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 6074 { 6075 return VM_FAULT_SIGBUS; 6076 } 6077 6078 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) 6079 { 6080 int ret; 6081 6082 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 6083 return -EINVAL; 6084 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 6085 return ret; 6086 } 6087 6088 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 6089 u64 ident_addr) 6090 { 6091 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 6092 } 6093 6094 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 6095 unsigned long kvm_nr_mmu_pages) 6096 { 6097 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) 6098 return -EINVAL; 6099 6100 mutex_lock(&kvm->slots_lock); 6101 6102 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); 6103 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; 6104 6105 mutex_unlock(&kvm->slots_lock); 6106 return 0; 6107 } 6108 6109 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6110 { 6111 struct kvm_pic *pic = kvm->arch.vpic; 6112 int r; 6113 6114 r = 0; 6115 switch (chip->chip_id) { 6116 case KVM_IRQCHIP_PIC_MASTER: 6117 memcpy(&chip->chip.pic, &pic->pics[0], 6118 sizeof(struct kvm_pic_state)); 6119 break; 6120 case KVM_IRQCHIP_PIC_SLAVE: 6121 memcpy(&chip->chip.pic, &pic->pics[1], 6122 sizeof(struct kvm_pic_state)); 6123 break; 6124 case KVM_IRQCHIP_IOAPIC: 6125 kvm_get_ioapic(kvm, &chip->chip.ioapic); 6126 break; 6127 default: 6128 r = -EINVAL; 6129 break; 6130 } 6131 return r; 6132 } 6133 6134 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 6135 { 6136 struct kvm_pic *pic = kvm->arch.vpic; 6137 int r; 6138 6139 r = 0; 6140 switch (chip->chip_id) { 6141 case KVM_IRQCHIP_PIC_MASTER: 6142 spin_lock(&pic->lock); 6143 memcpy(&pic->pics[0], &chip->chip.pic, 6144 sizeof(struct kvm_pic_state)); 6145 spin_unlock(&pic->lock); 6146 break; 6147 case KVM_IRQCHIP_PIC_SLAVE: 6148 spin_lock(&pic->lock); 6149 memcpy(&pic->pics[1], &chip->chip.pic, 6150 sizeof(struct kvm_pic_state)); 6151 spin_unlock(&pic->lock); 6152 break; 6153 case KVM_IRQCHIP_IOAPIC: 6154 kvm_set_ioapic(kvm, &chip->chip.ioapic); 6155 break; 6156 default: 6157 r = -EINVAL; 6158 break; 6159 } 6160 kvm_pic_update_irq(pic); 6161 return r; 6162 } 6163 6164 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6165 { 6166 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; 6167 6168 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); 6169 6170 mutex_lock(&kps->lock); 6171 memcpy(ps, &kps->channels, sizeof(*ps)); 6172 mutex_unlock(&kps->lock); 6173 return 0; 6174 } 6175 6176 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 6177 { 6178 int i; 6179 struct kvm_pit *pit = kvm->arch.vpit; 6180 6181 mutex_lock(&pit->pit_state.lock); 6182 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); 6183 for (i = 0; i < 3; i++) 6184 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); 6185 mutex_unlock(&pit->pit_state.lock); 6186 return 0; 6187 } 6188 6189 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6190 { 6191 mutex_lock(&kvm->arch.vpit->pit_state.lock); 6192 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, 6193 sizeof(ps->channels)); 6194 ps->flags = kvm->arch.vpit->pit_state.flags; 6195 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 6196 memset(&ps->reserved, 0, sizeof(ps->reserved)); 6197 return 0; 6198 } 6199 6200 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 6201 { 6202 int start = 0; 6203 int i; 6204 u32 prev_legacy, cur_legacy; 6205 struct kvm_pit *pit = kvm->arch.vpit; 6206 6207 mutex_lock(&pit->pit_state.lock); 6208 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 6209 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; 6210 if (!prev_legacy && cur_legacy) 6211 start = 1; 6212 memcpy(&pit->pit_state.channels, &ps->channels, 6213 sizeof(pit->pit_state.channels)); 6214 pit->pit_state.flags = ps->flags; 6215 for (i = 0; i < 3; i++) 6216 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, 6217 start && i == 0); 6218 mutex_unlock(&pit->pit_state.lock); 6219 return 0; 6220 } 6221 6222 static int kvm_vm_ioctl_reinject(struct kvm *kvm, 6223 struct kvm_reinject_control *control) 6224 { 6225 struct kvm_pit *pit = kvm->arch.vpit; 6226 6227 /* pit->pit_state.lock was overloaded to prevent userspace from getting 6228 * an inconsistent state after running multiple KVM_REINJECT_CONTROL 6229 * ioctls in parallel. Use a separate lock if that ioctl isn't rare. 6230 */ 6231 mutex_lock(&pit->pit_state.lock); 6232 kvm_pit_set_reinject(pit, control->pit_reinject); 6233 mutex_unlock(&pit->pit_state.lock); 6234 6235 return 0; 6236 } 6237 6238 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 6239 { 6240 6241 /* 6242 * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called 6243 * before reporting dirty_bitmap to userspace. KVM flushes the buffers 6244 * on all VM-Exits, thus we only need to kick running vCPUs to force a 6245 * VM-Exit. 6246 */ 6247 struct kvm_vcpu *vcpu; 6248 unsigned long i; 6249 6250 kvm_for_each_vcpu(i, vcpu, kvm) 6251 kvm_vcpu_kick(vcpu); 6252 } 6253 6254 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, 6255 bool line_status) 6256 { 6257 if (!irqchip_in_kernel(kvm)) 6258 return -ENXIO; 6259 6260 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 6261 irq_event->irq, irq_event->level, 6262 line_status); 6263 return 0; 6264 } 6265 6266 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 6267 struct kvm_enable_cap *cap) 6268 { 6269 int r; 6270 6271 if (cap->flags) 6272 return -EINVAL; 6273 6274 switch (cap->cap) { 6275 case KVM_CAP_DISABLE_QUIRKS2: 6276 r = -EINVAL; 6277 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) 6278 break; 6279 fallthrough; 6280 case KVM_CAP_DISABLE_QUIRKS: 6281 kvm->arch.disabled_quirks = cap->args[0]; 6282 r = 0; 6283 break; 6284 case KVM_CAP_SPLIT_IRQCHIP: { 6285 mutex_lock(&kvm->lock); 6286 r = -EINVAL; 6287 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) 6288 goto split_irqchip_unlock; 6289 r = -EEXIST; 6290 if (irqchip_in_kernel(kvm)) 6291 goto split_irqchip_unlock; 6292 if (kvm->created_vcpus) 6293 goto split_irqchip_unlock; 6294 r = kvm_setup_empty_irq_routing(kvm); 6295 if (r) 6296 goto split_irqchip_unlock; 6297 /* Pairs with irqchip_in_kernel. */ 6298 smp_wmb(); 6299 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; 6300 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; 6301 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6302 r = 0; 6303 split_irqchip_unlock: 6304 mutex_unlock(&kvm->lock); 6305 break; 6306 } 6307 case KVM_CAP_X2APIC_API: 6308 r = -EINVAL; 6309 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) 6310 break; 6311 6312 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) 6313 kvm->arch.x2apic_format = true; 6314 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 6315 kvm->arch.x2apic_broadcast_quirk_disabled = true; 6316 6317 r = 0; 6318 break; 6319 case KVM_CAP_X86_DISABLE_EXITS: 6320 r = -EINVAL; 6321 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) 6322 break; 6323 6324 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6325 kvm->arch.pause_in_guest = true; 6326 6327 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \ 6328 "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests." 6329 6330 if (!mitigate_smt_rsb) { 6331 if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() && 6332 (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) 6333 pr_warn_once(SMT_RSB_MSG); 6334 6335 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && 6336 kvm_can_mwait_in_guest()) 6337 kvm->arch.mwait_in_guest = true; 6338 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6339 kvm->arch.hlt_in_guest = true; 6340 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6341 kvm->arch.cstate_in_guest = true; 6342 } 6343 6344 r = 0; 6345 break; 6346 case KVM_CAP_MSR_PLATFORM_INFO: 6347 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; 6348 r = 0; 6349 break; 6350 case KVM_CAP_EXCEPTION_PAYLOAD: 6351 kvm->arch.exception_payload_enabled = cap->args[0]; 6352 r = 0; 6353 break; 6354 case KVM_CAP_X86_TRIPLE_FAULT_EVENT: 6355 kvm->arch.triple_fault_event = cap->args[0]; 6356 r = 0; 6357 break; 6358 case KVM_CAP_X86_USER_SPACE_MSR: 6359 r = -EINVAL; 6360 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK) 6361 break; 6362 kvm->arch.user_space_msr_mask = cap->args[0]; 6363 r = 0; 6364 break; 6365 case KVM_CAP_X86_BUS_LOCK_EXIT: 6366 r = -EINVAL; 6367 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) 6368 break; 6369 6370 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && 6371 (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) 6372 break; 6373 6374 if (kvm_caps.has_bus_lock_exit && 6375 cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) 6376 kvm->arch.bus_lock_detection_enabled = true; 6377 r = 0; 6378 break; 6379 #ifdef CONFIG_X86_SGX_KVM 6380 case KVM_CAP_SGX_ATTRIBUTE: { 6381 unsigned long allowed_attributes = 0; 6382 6383 r = sgx_set_attribute(&allowed_attributes, cap->args[0]); 6384 if (r) 6385 break; 6386 6387 /* KVM only supports the PROVISIONKEY privileged attribute. */ 6388 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && 6389 !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) 6390 kvm->arch.sgx_provisioning_allowed = true; 6391 else 6392 r = -EINVAL; 6393 break; 6394 } 6395 #endif 6396 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM: 6397 r = -EINVAL; 6398 if (!kvm_x86_ops.vm_copy_enc_context_from) 6399 break; 6400 6401 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); 6402 break; 6403 case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: 6404 r = -EINVAL; 6405 if (!kvm_x86_ops.vm_move_enc_context_from) 6406 break; 6407 6408 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); 6409 break; 6410 case KVM_CAP_EXIT_HYPERCALL: 6411 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { 6412 r = -EINVAL; 6413 break; 6414 } 6415 kvm->arch.hypercall_exit_enabled = cap->args[0]; 6416 r = 0; 6417 break; 6418 case KVM_CAP_EXIT_ON_EMULATION_FAILURE: 6419 r = -EINVAL; 6420 if (cap->args[0] & ~1) 6421 break; 6422 kvm->arch.exit_on_emulation_error = cap->args[0]; 6423 r = 0; 6424 break; 6425 case KVM_CAP_PMU_CAPABILITY: 6426 r = -EINVAL; 6427 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) 6428 break; 6429 6430 mutex_lock(&kvm->lock); 6431 if (!kvm->created_vcpus) { 6432 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); 6433 r = 0; 6434 } 6435 mutex_unlock(&kvm->lock); 6436 break; 6437 case KVM_CAP_MAX_VCPU_ID: 6438 r = -EINVAL; 6439 if (cap->args[0] > KVM_MAX_VCPU_IDS) 6440 break; 6441 6442 mutex_lock(&kvm->lock); 6443 if (kvm->arch.max_vcpu_ids == cap->args[0]) { 6444 r = 0; 6445 } else if (!kvm->arch.max_vcpu_ids) { 6446 kvm->arch.max_vcpu_ids = cap->args[0]; 6447 r = 0; 6448 } 6449 mutex_unlock(&kvm->lock); 6450 break; 6451 case KVM_CAP_X86_NOTIFY_VMEXIT: 6452 r = -EINVAL; 6453 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS) 6454 break; 6455 if (!kvm_caps.has_notify_vmexit) 6456 break; 6457 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED)) 6458 break; 6459 mutex_lock(&kvm->lock); 6460 if (!kvm->created_vcpus) { 6461 kvm->arch.notify_window = cap->args[0] >> 32; 6462 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; 6463 r = 0; 6464 } 6465 mutex_unlock(&kvm->lock); 6466 break; 6467 case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES: 6468 r = -EINVAL; 6469 6470 /* 6471 * Since the risk of disabling NX hugepages is a guest crashing 6472 * the system, ensure the userspace process has permission to 6473 * reboot the system. 6474 * 6475 * Note that unlike the reboot() syscall, the process must have 6476 * this capability in the root namespace because exposing 6477 * /dev/kvm into a container does not limit the scope of the 6478 * iTLB multihit bug to that container. In other words, 6479 * this must use capable(), not ns_capable(). 6480 */ 6481 if (!capable(CAP_SYS_BOOT)) { 6482 r = -EPERM; 6483 break; 6484 } 6485 6486 if (cap->args[0]) 6487 break; 6488 6489 mutex_lock(&kvm->lock); 6490 if (!kvm->created_vcpus) { 6491 kvm->arch.disable_nx_huge_pages = true; 6492 r = 0; 6493 } 6494 mutex_unlock(&kvm->lock); 6495 break; 6496 default: 6497 r = -EINVAL; 6498 break; 6499 } 6500 return r; 6501 } 6502 6503 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow) 6504 { 6505 struct kvm_x86_msr_filter *msr_filter; 6506 6507 msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT); 6508 if (!msr_filter) 6509 return NULL; 6510 6511 msr_filter->default_allow = default_allow; 6512 return msr_filter; 6513 } 6514 6515 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter) 6516 { 6517 u32 i; 6518 6519 if (!msr_filter) 6520 return; 6521 6522 for (i = 0; i < msr_filter->count; i++) 6523 kfree(msr_filter->ranges[i].bitmap); 6524 6525 kfree(msr_filter); 6526 } 6527 6528 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, 6529 struct kvm_msr_filter_range *user_range) 6530 { 6531 unsigned long *bitmap; 6532 size_t bitmap_size; 6533 6534 if (!user_range->nmsrs) 6535 return 0; 6536 6537 if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK) 6538 return -EINVAL; 6539 6540 if (!user_range->flags) 6541 return -EINVAL; 6542 6543 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); 6544 if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE) 6545 return -EINVAL; 6546 6547 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); 6548 if (IS_ERR(bitmap)) 6549 return PTR_ERR(bitmap); 6550 6551 msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) { 6552 .flags = user_range->flags, 6553 .base = user_range->base, 6554 .nmsrs = user_range->nmsrs, 6555 .bitmap = bitmap, 6556 }; 6557 6558 msr_filter->count++; 6559 return 0; 6560 } 6561 6562 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, 6563 struct kvm_msr_filter *filter) 6564 { 6565 struct kvm_x86_msr_filter *new_filter, *old_filter; 6566 bool default_allow; 6567 bool empty = true; 6568 int r; 6569 u32 i; 6570 6571 if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK) 6572 return -EINVAL; 6573 6574 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) 6575 empty &= !filter->ranges[i].nmsrs; 6576 6577 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); 6578 if (empty && !default_allow) 6579 return -EINVAL; 6580 6581 new_filter = kvm_alloc_msr_filter(default_allow); 6582 if (!new_filter) 6583 return -ENOMEM; 6584 6585 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { 6586 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); 6587 if (r) { 6588 kvm_free_msr_filter(new_filter); 6589 return r; 6590 } 6591 } 6592 6593 mutex_lock(&kvm->lock); 6594 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter, 6595 mutex_is_locked(&kvm->lock)); 6596 mutex_unlock(&kvm->lock); 6597 synchronize_srcu(&kvm->srcu); 6598 6599 kvm_free_msr_filter(old_filter); 6600 6601 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED); 6602 6603 return 0; 6604 } 6605 6606 #ifdef CONFIG_KVM_COMPAT 6607 /* for KVM_X86_SET_MSR_FILTER */ 6608 struct kvm_msr_filter_range_compat { 6609 __u32 flags; 6610 __u32 nmsrs; 6611 __u32 base; 6612 __u32 bitmap; 6613 }; 6614 6615 struct kvm_msr_filter_compat { 6616 __u32 flags; 6617 struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; 6618 }; 6619 6620 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) 6621 6622 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 6623 unsigned long arg) 6624 { 6625 void __user *argp = (void __user *)arg; 6626 struct kvm *kvm = filp->private_data; 6627 long r = -ENOTTY; 6628 6629 switch (ioctl) { 6630 case KVM_X86_SET_MSR_FILTER_COMPAT: { 6631 struct kvm_msr_filter __user *user_msr_filter = argp; 6632 struct kvm_msr_filter_compat filter_compat; 6633 struct kvm_msr_filter filter; 6634 int i; 6635 6636 if (copy_from_user(&filter_compat, user_msr_filter, 6637 sizeof(filter_compat))) 6638 return -EFAULT; 6639 6640 filter.flags = filter_compat.flags; 6641 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { 6642 struct kvm_msr_filter_range_compat *cr; 6643 6644 cr = &filter_compat.ranges[i]; 6645 filter.ranges[i] = (struct kvm_msr_filter_range) { 6646 .flags = cr->flags, 6647 .nmsrs = cr->nmsrs, 6648 .base = cr->base, 6649 .bitmap = (__u8 *)(ulong)cr->bitmap, 6650 }; 6651 } 6652 6653 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 6654 break; 6655 } 6656 } 6657 6658 return r; 6659 } 6660 #endif 6661 6662 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 6663 static int kvm_arch_suspend_notifier(struct kvm *kvm) 6664 { 6665 struct kvm_vcpu *vcpu; 6666 unsigned long i; 6667 int ret = 0; 6668 6669 mutex_lock(&kvm->lock); 6670 kvm_for_each_vcpu(i, vcpu, kvm) { 6671 if (!vcpu->arch.pv_time.active) 6672 continue; 6673 6674 ret = kvm_set_guest_paused(vcpu); 6675 if (ret) { 6676 kvm_err("Failed to pause guest VCPU%d: %d\n", 6677 vcpu->vcpu_id, ret); 6678 break; 6679 } 6680 } 6681 mutex_unlock(&kvm->lock); 6682 6683 return ret ? NOTIFY_BAD : NOTIFY_DONE; 6684 } 6685 6686 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state) 6687 { 6688 switch (state) { 6689 case PM_HIBERNATION_PREPARE: 6690 case PM_SUSPEND_PREPARE: 6691 return kvm_arch_suspend_notifier(kvm); 6692 } 6693 6694 return NOTIFY_DONE; 6695 } 6696 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */ 6697 6698 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp) 6699 { 6700 struct kvm_clock_data data = { 0 }; 6701 6702 get_kvmclock(kvm, &data); 6703 if (copy_to_user(argp, &data, sizeof(data))) 6704 return -EFAULT; 6705 6706 return 0; 6707 } 6708 6709 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) 6710 { 6711 struct kvm_arch *ka = &kvm->arch; 6712 struct kvm_clock_data data; 6713 u64 now_raw_ns; 6714 6715 if (copy_from_user(&data, argp, sizeof(data))) 6716 return -EFAULT; 6717 6718 /* 6719 * Only KVM_CLOCK_REALTIME is used, but allow passing the 6720 * result of KVM_GET_CLOCK back to KVM_SET_CLOCK. 6721 */ 6722 if (data.flags & ~KVM_CLOCK_VALID_FLAGS) 6723 return -EINVAL; 6724 6725 kvm_hv_request_tsc_page_update(kvm); 6726 kvm_start_pvclock_update(kvm); 6727 pvclock_update_vm_gtod_copy(kvm); 6728 6729 /* 6730 * This pairs with kvm_guest_time_update(): when masterclock is 6731 * in use, we use master_kernel_ns + kvmclock_offset to set 6732 * unsigned 'system_time' so if we use get_kvmclock_ns() (which 6733 * is slightly ahead) here we risk going negative on unsigned 6734 * 'system_time' when 'data.clock' is very small. 6735 */ 6736 if (data.flags & KVM_CLOCK_REALTIME) { 6737 u64 now_real_ns = ktime_get_real_ns(); 6738 6739 /* 6740 * Avoid stepping the kvmclock backwards. 6741 */ 6742 if (now_real_ns > data.realtime) 6743 data.clock += now_real_ns - data.realtime; 6744 } 6745 6746 if (ka->use_master_clock) 6747 now_raw_ns = ka->master_kernel_ns; 6748 else 6749 now_raw_ns = get_kvmclock_base_ns(); 6750 ka->kvmclock_offset = data.clock - now_raw_ns; 6751 kvm_end_pvclock_update(kvm); 6752 return 0; 6753 } 6754 6755 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 6756 { 6757 struct kvm *kvm = filp->private_data; 6758 void __user *argp = (void __user *)arg; 6759 int r = -ENOTTY; 6760 /* 6761 * This union makes it completely explicit to gcc-3.x 6762 * that these two variables' stack usage should be 6763 * combined, not added together. 6764 */ 6765 union { 6766 struct kvm_pit_state ps; 6767 struct kvm_pit_state2 ps2; 6768 struct kvm_pit_config pit_config; 6769 } u; 6770 6771 switch (ioctl) { 6772 case KVM_SET_TSS_ADDR: 6773 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); 6774 break; 6775 case KVM_SET_IDENTITY_MAP_ADDR: { 6776 u64 ident_addr; 6777 6778 mutex_lock(&kvm->lock); 6779 r = -EINVAL; 6780 if (kvm->created_vcpus) 6781 goto set_identity_unlock; 6782 r = -EFAULT; 6783 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 6784 goto set_identity_unlock; 6785 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 6786 set_identity_unlock: 6787 mutex_unlock(&kvm->lock); 6788 break; 6789 } 6790 case KVM_SET_NR_MMU_PAGES: 6791 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); 6792 break; 6793 case KVM_CREATE_IRQCHIP: { 6794 mutex_lock(&kvm->lock); 6795 6796 r = -EEXIST; 6797 if (irqchip_in_kernel(kvm)) 6798 goto create_irqchip_unlock; 6799 6800 r = -EINVAL; 6801 if (kvm->created_vcpus) 6802 goto create_irqchip_unlock; 6803 6804 r = kvm_pic_init(kvm); 6805 if (r) 6806 goto create_irqchip_unlock; 6807 6808 r = kvm_ioapic_init(kvm); 6809 if (r) { 6810 kvm_pic_destroy(kvm); 6811 goto create_irqchip_unlock; 6812 } 6813 6814 r = kvm_setup_default_irq_routing(kvm); 6815 if (r) { 6816 kvm_ioapic_destroy(kvm); 6817 kvm_pic_destroy(kvm); 6818 goto create_irqchip_unlock; 6819 } 6820 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ 6821 smp_wmb(); 6822 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; 6823 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT); 6824 create_irqchip_unlock: 6825 mutex_unlock(&kvm->lock); 6826 break; 6827 } 6828 case KVM_CREATE_PIT: 6829 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; 6830 goto create_pit; 6831 case KVM_CREATE_PIT2: 6832 r = -EFAULT; 6833 if (copy_from_user(&u.pit_config, argp, 6834 sizeof(struct kvm_pit_config))) 6835 goto out; 6836 create_pit: 6837 mutex_lock(&kvm->lock); 6838 r = -EEXIST; 6839 if (kvm->arch.vpit) 6840 goto create_pit_unlock; 6841 r = -ENOMEM; 6842 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); 6843 if (kvm->arch.vpit) 6844 r = 0; 6845 create_pit_unlock: 6846 mutex_unlock(&kvm->lock); 6847 break; 6848 case KVM_GET_IRQCHIP: { 6849 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6850 struct kvm_irqchip *chip; 6851 6852 chip = memdup_user(argp, sizeof(*chip)); 6853 if (IS_ERR(chip)) { 6854 r = PTR_ERR(chip); 6855 goto out; 6856 } 6857 6858 r = -ENXIO; 6859 if (!irqchip_kernel(kvm)) 6860 goto get_irqchip_out; 6861 r = kvm_vm_ioctl_get_irqchip(kvm, chip); 6862 if (r) 6863 goto get_irqchip_out; 6864 r = -EFAULT; 6865 if (copy_to_user(argp, chip, sizeof(*chip))) 6866 goto get_irqchip_out; 6867 r = 0; 6868 get_irqchip_out: 6869 kfree(chip); 6870 break; 6871 } 6872 case KVM_SET_IRQCHIP: { 6873 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 6874 struct kvm_irqchip *chip; 6875 6876 chip = memdup_user(argp, sizeof(*chip)); 6877 if (IS_ERR(chip)) { 6878 r = PTR_ERR(chip); 6879 goto out; 6880 } 6881 6882 r = -ENXIO; 6883 if (!irqchip_kernel(kvm)) 6884 goto set_irqchip_out; 6885 r = kvm_vm_ioctl_set_irqchip(kvm, chip); 6886 set_irqchip_out: 6887 kfree(chip); 6888 break; 6889 } 6890 case KVM_GET_PIT: { 6891 r = -EFAULT; 6892 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 6893 goto out; 6894 r = -ENXIO; 6895 if (!kvm->arch.vpit) 6896 goto out; 6897 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 6898 if (r) 6899 goto out; 6900 r = -EFAULT; 6901 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 6902 goto out; 6903 r = 0; 6904 break; 6905 } 6906 case KVM_SET_PIT: { 6907 r = -EFAULT; 6908 if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 6909 goto out; 6910 mutex_lock(&kvm->lock); 6911 r = -ENXIO; 6912 if (!kvm->arch.vpit) 6913 goto set_pit_out; 6914 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 6915 set_pit_out: 6916 mutex_unlock(&kvm->lock); 6917 break; 6918 } 6919 case KVM_GET_PIT2: { 6920 r = -ENXIO; 6921 if (!kvm->arch.vpit) 6922 goto out; 6923 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); 6924 if (r) 6925 goto out; 6926 r = -EFAULT; 6927 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) 6928 goto out; 6929 r = 0; 6930 break; 6931 } 6932 case KVM_SET_PIT2: { 6933 r = -EFAULT; 6934 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) 6935 goto out; 6936 mutex_lock(&kvm->lock); 6937 r = -ENXIO; 6938 if (!kvm->arch.vpit) 6939 goto set_pit2_out; 6940 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); 6941 set_pit2_out: 6942 mutex_unlock(&kvm->lock); 6943 break; 6944 } 6945 case KVM_REINJECT_CONTROL: { 6946 struct kvm_reinject_control control; 6947 r = -EFAULT; 6948 if (copy_from_user(&control, argp, sizeof(control))) 6949 goto out; 6950 r = -ENXIO; 6951 if (!kvm->arch.vpit) 6952 goto out; 6953 r = kvm_vm_ioctl_reinject(kvm, &control); 6954 break; 6955 } 6956 case KVM_SET_BOOT_CPU_ID: 6957 r = 0; 6958 mutex_lock(&kvm->lock); 6959 if (kvm->created_vcpus) 6960 r = -EBUSY; 6961 else 6962 kvm->arch.bsp_vcpu_id = arg; 6963 mutex_unlock(&kvm->lock); 6964 break; 6965 #ifdef CONFIG_KVM_XEN 6966 case KVM_XEN_HVM_CONFIG: { 6967 struct kvm_xen_hvm_config xhc; 6968 r = -EFAULT; 6969 if (copy_from_user(&xhc, argp, sizeof(xhc))) 6970 goto out; 6971 r = kvm_xen_hvm_config(kvm, &xhc); 6972 break; 6973 } 6974 case KVM_XEN_HVM_GET_ATTR: { 6975 struct kvm_xen_hvm_attr xha; 6976 6977 r = -EFAULT; 6978 if (copy_from_user(&xha, argp, sizeof(xha))) 6979 goto out; 6980 r = kvm_xen_hvm_get_attr(kvm, &xha); 6981 if (!r && copy_to_user(argp, &xha, sizeof(xha))) 6982 r = -EFAULT; 6983 break; 6984 } 6985 case KVM_XEN_HVM_SET_ATTR: { 6986 struct kvm_xen_hvm_attr xha; 6987 6988 r = -EFAULT; 6989 if (copy_from_user(&xha, argp, sizeof(xha))) 6990 goto out; 6991 r = kvm_xen_hvm_set_attr(kvm, &xha); 6992 break; 6993 } 6994 case KVM_XEN_HVM_EVTCHN_SEND: { 6995 struct kvm_irq_routing_xen_evtchn uxe; 6996 6997 r = -EFAULT; 6998 if (copy_from_user(&uxe, argp, sizeof(uxe))) 6999 goto out; 7000 r = kvm_xen_hvm_evtchn_send(kvm, &uxe); 7001 break; 7002 } 7003 #endif 7004 case KVM_SET_CLOCK: 7005 r = kvm_vm_ioctl_set_clock(kvm, argp); 7006 break; 7007 case KVM_GET_CLOCK: 7008 r = kvm_vm_ioctl_get_clock(kvm, argp); 7009 break; 7010 case KVM_SET_TSC_KHZ: { 7011 u32 user_tsc_khz; 7012 7013 r = -EINVAL; 7014 user_tsc_khz = (u32)arg; 7015 7016 if (kvm_caps.has_tsc_control && 7017 user_tsc_khz >= kvm_caps.max_guest_tsc_khz) 7018 goto out; 7019 7020 if (user_tsc_khz == 0) 7021 user_tsc_khz = tsc_khz; 7022 7023 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); 7024 r = 0; 7025 7026 goto out; 7027 } 7028 case KVM_GET_TSC_KHZ: { 7029 r = READ_ONCE(kvm->arch.default_tsc_khz); 7030 goto out; 7031 } 7032 case KVM_MEMORY_ENCRYPT_OP: { 7033 r = -ENOTTY; 7034 if (!kvm_x86_ops.mem_enc_ioctl) 7035 goto out; 7036 7037 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); 7038 break; 7039 } 7040 case KVM_MEMORY_ENCRYPT_REG_REGION: { 7041 struct kvm_enc_region region; 7042 7043 r = -EFAULT; 7044 if (copy_from_user(®ion, argp, sizeof(region))) 7045 goto out; 7046 7047 r = -ENOTTY; 7048 if (!kvm_x86_ops.mem_enc_register_region) 7049 goto out; 7050 7051 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); 7052 break; 7053 } 7054 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { 7055 struct kvm_enc_region region; 7056 7057 r = -EFAULT; 7058 if (copy_from_user(®ion, argp, sizeof(region))) 7059 goto out; 7060 7061 r = -ENOTTY; 7062 if (!kvm_x86_ops.mem_enc_unregister_region) 7063 goto out; 7064 7065 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); 7066 break; 7067 } 7068 case KVM_HYPERV_EVENTFD: { 7069 struct kvm_hyperv_eventfd hvevfd; 7070 7071 r = -EFAULT; 7072 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd))) 7073 goto out; 7074 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd); 7075 break; 7076 } 7077 case KVM_SET_PMU_EVENT_FILTER: 7078 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); 7079 break; 7080 case KVM_X86_SET_MSR_FILTER: { 7081 struct kvm_msr_filter __user *user_msr_filter = argp; 7082 struct kvm_msr_filter filter; 7083 7084 if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) 7085 return -EFAULT; 7086 7087 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); 7088 break; 7089 } 7090 default: 7091 r = -ENOTTY; 7092 } 7093 out: 7094 return r; 7095 } 7096 7097 static void kvm_probe_feature_msr(u32 msr_index) 7098 { 7099 struct kvm_msr_entry msr = { 7100 .index = msr_index, 7101 }; 7102 7103 if (kvm_get_msr_feature(&msr)) 7104 return; 7105 7106 msr_based_features[num_msr_based_features++] = msr_index; 7107 } 7108 7109 static void kvm_probe_msr_to_save(u32 msr_index) 7110 { 7111 u32 dummy[2]; 7112 7113 if (rdmsr_safe(msr_index, &dummy[0], &dummy[1])) 7114 return; 7115 7116 /* 7117 * Even MSRs that are valid in the host may not be exposed to guests in 7118 * some cases. 7119 */ 7120 switch (msr_index) { 7121 case MSR_IA32_BNDCFGS: 7122 if (!kvm_mpx_supported()) 7123 return; 7124 break; 7125 case MSR_TSC_AUX: 7126 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) && 7127 !kvm_cpu_cap_has(X86_FEATURE_RDPID)) 7128 return; 7129 break; 7130 case MSR_IA32_UMWAIT_CONTROL: 7131 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) 7132 return; 7133 break; 7134 case MSR_IA32_RTIT_CTL: 7135 case MSR_IA32_RTIT_STATUS: 7136 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) 7137 return; 7138 break; 7139 case MSR_IA32_RTIT_CR3_MATCH: 7140 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7141 !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering)) 7142 return; 7143 break; 7144 case MSR_IA32_RTIT_OUTPUT_BASE: 7145 case MSR_IA32_RTIT_OUTPUT_MASK: 7146 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7147 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) && 7148 !intel_pt_validate_hw_cap(PT_CAP_single_range_output))) 7149 return; 7150 break; 7151 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: 7152 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) || 7153 (msr_index - MSR_IA32_RTIT_ADDR0_A >= 7154 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)) 7155 return; 7156 break; 7157 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: 7158 if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >= 7159 kvm_pmu_cap.num_counters_gp) 7160 return; 7161 break; 7162 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: 7163 if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >= 7164 kvm_pmu_cap.num_counters_gp) 7165 return; 7166 break; 7167 case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX: 7168 if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >= 7169 kvm_pmu_cap.num_counters_fixed) 7170 return; 7171 break; 7172 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: 7173 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: 7174 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: 7175 if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) 7176 return; 7177 break; 7178 case MSR_IA32_XFD: 7179 case MSR_IA32_XFD_ERR: 7180 if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) 7181 return; 7182 break; 7183 case MSR_IA32_TSX_CTRL: 7184 if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR)) 7185 return; 7186 break; 7187 default: 7188 break; 7189 } 7190 7191 msrs_to_save[num_msrs_to_save++] = msr_index; 7192 } 7193 7194 static void kvm_init_msr_lists(void) 7195 { 7196 unsigned i; 7197 7198 BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, 7199 "Please update the fixed PMCs in msrs_to_save_pmu[]"); 7200 7201 num_msrs_to_save = 0; 7202 num_emulated_msrs = 0; 7203 num_msr_based_features = 0; 7204 7205 for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++) 7206 kvm_probe_msr_to_save(msrs_to_save_base[i]); 7207 7208 if (enable_pmu) { 7209 for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++) 7210 kvm_probe_msr_to_save(msrs_to_save_pmu[i]); 7211 } 7212 7213 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 7214 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 7215 continue; 7216 7217 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; 7218 } 7219 7220 for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++) 7221 kvm_probe_feature_msr(i); 7222 7223 for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) 7224 kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]); 7225 } 7226 7227 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, 7228 const void *v) 7229 { 7230 int handled = 0; 7231 int n; 7232 7233 do { 7234 n = min(len, 8); 7235 if (!(lapic_in_kernel(vcpu) && 7236 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) 7237 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) 7238 break; 7239 handled += n; 7240 addr += n; 7241 len -= n; 7242 v += n; 7243 } while (len); 7244 7245 return handled; 7246 } 7247 7248 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) 7249 { 7250 int handled = 0; 7251 int n; 7252 7253 do { 7254 n = min(len, 8); 7255 if (!(lapic_in_kernel(vcpu) && 7256 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, 7257 addr, n, v)) 7258 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) 7259 break; 7260 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); 7261 handled += n; 7262 addr += n; 7263 len -= n; 7264 v += n; 7265 } while (len); 7266 7267 return handled; 7268 } 7269 7270 void kvm_set_segment(struct kvm_vcpu *vcpu, 7271 struct kvm_segment *var, int seg) 7272 { 7273 static_call(kvm_x86_set_segment)(vcpu, var, seg); 7274 } 7275 7276 void kvm_get_segment(struct kvm_vcpu *vcpu, 7277 struct kvm_segment *var, int seg) 7278 { 7279 static_call(kvm_x86_get_segment)(vcpu, var, seg); 7280 } 7281 7282 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, 7283 struct x86_exception *exception) 7284 { 7285 struct kvm_mmu *mmu = vcpu->arch.mmu; 7286 gpa_t t_gpa; 7287 7288 BUG_ON(!mmu_is_nested(vcpu)); 7289 7290 /* NPT walks are always user-walks */ 7291 access |= PFERR_USER_MASK; 7292 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); 7293 7294 return t_gpa; 7295 } 7296 7297 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 7298 struct x86_exception *exception) 7299 { 7300 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7301 7302 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7303 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7304 } 7305 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7306 7307 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7308 struct x86_exception *exception) 7309 { 7310 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7311 7312 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7313 access |= PFERR_WRITE_MASK; 7314 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7315 } 7316 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7317 7318 /* uses this to access any guest's mapped memory without checking CPL */ 7319 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 7320 struct x86_exception *exception) 7321 { 7322 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7323 7324 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); 7325 } 7326 7327 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7328 struct kvm_vcpu *vcpu, u64 access, 7329 struct x86_exception *exception) 7330 { 7331 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7332 void *data = val; 7333 int r = X86EMUL_CONTINUE; 7334 7335 while (bytes) { 7336 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7337 unsigned offset = addr & (PAGE_SIZE-1); 7338 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 7339 int ret; 7340 7341 if (gpa == INVALID_GPA) 7342 return X86EMUL_PROPAGATE_FAULT; 7343 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, 7344 offset, toread); 7345 if (ret < 0) { 7346 r = X86EMUL_IO_NEEDED; 7347 goto out; 7348 } 7349 7350 bytes -= toread; 7351 data += toread; 7352 addr += toread; 7353 } 7354 out: 7355 return r; 7356 } 7357 7358 /* used for instruction fetching */ 7359 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, 7360 gva_t addr, void *val, unsigned int bytes, 7361 struct x86_exception *exception) 7362 { 7363 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7364 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7365 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7366 unsigned offset; 7367 int ret; 7368 7369 /* Inline kvm_read_guest_virt_helper for speed. */ 7370 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, 7371 exception); 7372 if (unlikely(gpa == INVALID_GPA)) 7373 return X86EMUL_PROPAGATE_FAULT; 7374 7375 offset = addr & (PAGE_SIZE-1); 7376 if (WARN_ON(offset + bytes > PAGE_SIZE)) 7377 bytes = (unsigned)PAGE_SIZE - offset; 7378 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, 7379 offset, bytes); 7380 if (unlikely(ret < 0)) 7381 return X86EMUL_IO_NEEDED; 7382 7383 return X86EMUL_CONTINUE; 7384 } 7385 7386 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 7387 gva_t addr, void *val, unsigned int bytes, 7388 struct x86_exception *exception) 7389 { 7390 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7391 7392 /* 7393 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED 7394 * is returned, but our callers are not ready for that and they blindly 7395 * call kvm_inject_page_fault. Ensure that they at least do not leak 7396 * uninitialized kernel stack memory into cr2 and error code. 7397 */ 7398 memset(exception, 0, sizeof(*exception)); 7399 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7400 exception); 7401 } 7402 EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7403 7404 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7405 gva_t addr, void *val, unsigned int bytes, 7406 struct x86_exception *exception, bool system) 7407 { 7408 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7409 u64 access = 0; 7410 7411 if (system) 7412 access |= PFERR_IMPLICIT_ACCESS; 7413 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7414 access |= PFERR_USER_MASK; 7415 7416 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); 7417 } 7418 7419 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 7420 struct kvm_vcpu *vcpu, u64 access, 7421 struct x86_exception *exception) 7422 { 7423 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7424 void *data = val; 7425 int r = X86EMUL_CONTINUE; 7426 7427 while (bytes) { 7428 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); 7429 unsigned offset = addr & (PAGE_SIZE-1); 7430 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 7431 int ret; 7432 7433 if (gpa == INVALID_GPA) 7434 return X86EMUL_PROPAGATE_FAULT; 7435 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); 7436 if (ret < 0) { 7437 r = X86EMUL_IO_NEEDED; 7438 goto out; 7439 } 7440 7441 bytes -= towrite; 7442 data += towrite; 7443 addr += towrite; 7444 } 7445 out: 7446 return r; 7447 } 7448 7449 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, 7450 unsigned int bytes, struct x86_exception *exception, 7451 bool system) 7452 { 7453 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7454 u64 access = PFERR_WRITE_MASK; 7455 7456 if (system) 7457 access |= PFERR_IMPLICIT_ACCESS; 7458 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) 7459 access |= PFERR_USER_MASK; 7460 7461 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7462 access, exception); 7463 } 7464 7465 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, 7466 unsigned int bytes, struct x86_exception *exception) 7467 { 7468 /* kvm_write_guest_virt_system can pull in tons of pages. */ 7469 vcpu->arch.l1tf_flush_l1d = true; 7470 7471 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 7472 PFERR_WRITE_MASK, exception); 7473 } 7474 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 7475 7476 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 7477 void *insn, int insn_len) 7478 { 7479 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, 7480 insn, insn_len); 7481 } 7482 7483 int handle_ud(struct kvm_vcpu *vcpu) 7484 { 7485 static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX }; 7486 int fep_flags = READ_ONCE(force_emulation_prefix); 7487 int emul_type = EMULTYPE_TRAP_UD; 7488 char sig[5]; /* ud2; .ascii "kvm" */ 7489 struct x86_exception e; 7490 7491 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) 7492 return 1; 7493 7494 if (fep_flags && 7495 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), 7496 sig, sizeof(sig), &e) == 0 && 7497 memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) { 7498 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF) 7499 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); 7500 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); 7501 emul_type = EMULTYPE_TRAP_UD_FORCED; 7502 } 7503 7504 return kvm_emulate_instruction(vcpu, emul_type); 7505 } 7506 EXPORT_SYMBOL_GPL(handle_ud); 7507 7508 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7509 gpa_t gpa, bool write) 7510 { 7511 /* For APIC access vmexit */ 7512 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7513 return 1; 7514 7515 if (vcpu_match_mmio_gpa(vcpu, gpa)) { 7516 trace_vcpu_match_mmio(gva, gpa, write, true); 7517 return 1; 7518 } 7519 7520 return 0; 7521 } 7522 7523 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 7524 gpa_t *gpa, struct x86_exception *exception, 7525 bool write) 7526 { 7527 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 7528 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 7529 | (write ? PFERR_WRITE_MASK : 0); 7530 7531 /* 7532 * currently PKRU is only applied to ept enabled guest so 7533 * there is no pkey in EPT page table for L1 guest or EPT 7534 * shadow page table for L2 guest. 7535 */ 7536 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || 7537 !permission_fault(vcpu, vcpu->arch.walk_mmu, 7538 vcpu->arch.mmio_access, 0, access))) { 7539 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 7540 (gva & (PAGE_SIZE - 1)); 7541 trace_vcpu_match_mmio(gva, *gpa, write, false); 7542 return 1; 7543 } 7544 7545 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7546 7547 if (*gpa == INVALID_GPA) 7548 return -1; 7549 7550 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); 7551 } 7552 7553 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 7554 const void *val, int bytes) 7555 { 7556 int ret; 7557 7558 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); 7559 if (ret < 0) 7560 return 0; 7561 kvm_page_track_write(vcpu, gpa, val, bytes); 7562 return 1; 7563 } 7564 7565 struct read_write_emulator_ops { 7566 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, 7567 int bytes); 7568 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, 7569 void *val, int bytes); 7570 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7571 int bytes, void *val); 7572 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, 7573 void *val, int bytes); 7574 bool write; 7575 }; 7576 7577 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 7578 { 7579 if (vcpu->mmio_read_completed) { 7580 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 7581 vcpu->mmio_fragments[0].gpa, val); 7582 vcpu->mmio_read_completed = 0; 7583 return 1; 7584 } 7585 7586 return 0; 7587 } 7588 7589 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7590 void *val, int bytes) 7591 { 7592 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); 7593 } 7594 7595 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, 7596 void *val, int bytes) 7597 { 7598 return emulator_write_phys(vcpu, gpa, val, bytes); 7599 } 7600 7601 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) 7602 { 7603 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); 7604 return vcpu_mmio_write(vcpu, gpa, bytes, val); 7605 } 7606 7607 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7608 void *val, int bytes) 7609 { 7610 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); 7611 return X86EMUL_IO_NEEDED; 7612 } 7613 7614 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 7615 void *val, int bytes) 7616 { 7617 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; 7618 7619 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 7620 return X86EMUL_CONTINUE; 7621 } 7622 7623 static const struct read_write_emulator_ops read_emultor = { 7624 .read_write_prepare = read_prepare, 7625 .read_write_emulate = read_emulate, 7626 .read_write_mmio = vcpu_mmio_read, 7627 .read_write_exit_mmio = read_exit_mmio, 7628 }; 7629 7630 static const struct read_write_emulator_ops write_emultor = { 7631 .read_write_emulate = write_emulate, 7632 .read_write_mmio = write_mmio, 7633 .read_write_exit_mmio = write_exit_mmio, 7634 .write = true, 7635 }; 7636 7637 static int emulator_read_write_onepage(unsigned long addr, void *val, 7638 unsigned int bytes, 7639 struct x86_exception *exception, 7640 struct kvm_vcpu *vcpu, 7641 const struct read_write_emulator_ops *ops) 7642 { 7643 gpa_t gpa; 7644 int handled, ret; 7645 bool write = ops->write; 7646 struct kvm_mmio_fragment *frag; 7647 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 7648 7649 /* 7650 * If the exit was due to a NPF we may already have a GPA. 7651 * If the GPA is present, use it to avoid the GVA to GPA table walk. 7652 * Note, this cannot be used on string operations since string 7653 * operation using rep will only have the initial GPA from the NPF 7654 * occurred. 7655 */ 7656 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && 7657 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { 7658 gpa = ctxt->gpa_val; 7659 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); 7660 } else { 7661 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 7662 if (ret < 0) 7663 return X86EMUL_PROPAGATE_FAULT; 7664 } 7665 7666 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) 7667 return X86EMUL_CONTINUE; 7668 7669 /* 7670 * Is this MMIO handled locally? 7671 */ 7672 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); 7673 if (handled == bytes) 7674 return X86EMUL_CONTINUE; 7675 7676 gpa += handled; 7677 bytes -= handled; 7678 val += handled; 7679 7680 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); 7681 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; 7682 frag->gpa = gpa; 7683 frag->data = val; 7684 frag->len = bytes; 7685 return X86EMUL_CONTINUE; 7686 } 7687 7688 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, 7689 unsigned long addr, 7690 void *val, unsigned int bytes, 7691 struct x86_exception *exception, 7692 const struct read_write_emulator_ops *ops) 7693 { 7694 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7695 gpa_t gpa; 7696 int rc; 7697 7698 if (ops->read_write_prepare && 7699 ops->read_write_prepare(vcpu, val, bytes)) 7700 return X86EMUL_CONTINUE; 7701 7702 vcpu->mmio_nr_fragments = 0; 7703 7704 /* Crossing a page boundary? */ 7705 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 7706 int now; 7707 7708 now = -addr & ~PAGE_MASK; 7709 rc = emulator_read_write_onepage(addr, val, now, exception, 7710 vcpu, ops); 7711 7712 if (rc != X86EMUL_CONTINUE) 7713 return rc; 7714 addr += now; 7715 if (ctxt->mode != X86EMUL_MODE_PROT64) 7716 addr = (u32)addr; 7717 val += now; 7718 bytes -= now; 7719 } 7720 7721 rc = emulator_read_write_onepage(addr, val, bytes, exception, 7722 vcpu, ops); 7723 if (rc != X86EMUL_CONTINUE) 7724 return rc; 7725 7726 if (!vcpu->mmio_nr_fragments) 7727 return rc; 7728 7729 gpa = vcpu->mmio_fragments[0].gpa; 7730 7731 vcpu->mmio_needed = 1; 7732 vcpu->mmio_cur_fragment = 0; 7733 7734 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); 7735 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; 7736 vcpu->run->exit_reason = KVM_EXIT_MMIO; 7737 vcpu->run->mmio.phys_addr = gpa; 7738 7739 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 7740 } 7741 7742 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 7743 unsigned long addr, 7744 void *val, 7745 unsigned int bytes, 7746 struct x86_exception *exception) 7747 { 7748 return emulator_read_write(ctxt, addr, val, bytes, 7749 exception, &read_emultor); 7750 } 7751 7752 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, 7753 unsigned long addr, 7754 const void *val, 7755 unsigned int bytes, 7756 struct x86_exception *exception) 7757 { 7758 return emulator_read_write(ctxt, addr, (void *)val, bytes, 7759 exception, &write_emultor); 7760 } 7761 7762 #define emulator_try_cmpxchg_user(t, ptr, old, new) \ 7763 (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t)) 7764 7765 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, 7766 unsigned long addr, 7767 const void *old, 7768 const void *new, 7769 unsigned int bytes, 7770 struct x86_exception *exception) 7771 { 7772 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7773 u64 page_line_mask; 7774 unsigned long hva; 7775 gpa_t gpa; 7776 int r; 7777 7778 /* guests cmpxchg8b have to be emulated atomically */ 7779 if (bytes > 8 || (bytes & (bytes - 1))) 7780 goto emul_write; 7781 7782 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); 7783 7784 if (gpa == INVALID_GPA || 7785 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 7786 goto emul_write; 7787 7788 /* 7789 * Emulate the atomic as a straight write to avoid #AC if SLD is 7790 * enabled in the host and the access splits a cache line. 7791 */ 7792 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) 7793 page_line_mask = ~(cache_line_size() - 1); 7794 else 7795 page_line_mask = PAGE_MASK; 7796 7797 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) 7798 goto emul_write; 7799 7800 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); 7801 if (kvm_is_error_hva(hva)) 7802 goto emul_write; 7803 7804 hva += offset_in_page(gpa); 7805 7806 switch (bytes) { 7807 case 1: 7808 r = emulator_try_cmpxchg_user(u8, hva, old, new); 7809 break; 7810 case 2: 7811 r = emulator_try_cmpxchg_user(u16, hva, old, new); 7812 break; 7813 case 4: 7814 r = emulator_try_cmpxchg_user(u32, hva, old, new); 7815 break; 7816 case 8: 7817 r = emulator_try_cmpxchg_user(u64, hva, old, new); 7818 break; 7819 default: 7820 BUG(); 7821 } 7822 7823 if (r < 0) 7824 return X86EMUL_UNHANDLEABLE; 7825 if (r) 7826 return X86EMUL_CMPXCHG_FAILED; 7827 7828 kvm_page_track_write(vcpu, gpa, new, bytes); 7829 7830 return X86EMUL_CONTINUE; 7831 7832 emul_write: 7833 pr_warn_once("emulating exchange as write\n"); 7834 7835 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7836 } 7837 7838 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, 7839 unsigned short port, void *data, 7840 unsigned int count, bool in) 7841 { 7842 unsigned i; 7843 int r; 7844 7845 WARN_ON_ONCE(vcpu->arch.pio.count); 7846 for (i = 0; i < count; i++) { 7847 if (in) 7848 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); 7849 else 7850 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); 7851 7852 if (r) { 7853 if (i == 0) 7854 goto userspace_io; 7855 7856 /* 7857 * Userspace must have unregistered the device while PIO 7858 * was running. Drop writes / read as 0. 7859 */ 7860 if (in) 7861 memset(data, 0, size * (count - i)); 7862 break; 7863 } 7864 7865 data += size; 7866 } 7867 return 1; 7868 7869 userspace_io: 7870 vcpu->arch.pio.port = port; 7871 vcpu->arch.pio.in = in; 7872 vcpu->arch.pio.count = count; 7873 vcpu->arch.pio.size = size; 7874 7875 if (in) 7876 memset(vcpu->arch.pio_data, 0, size * count); 7877 else 7878 memcpy(vcpu->arch.pio_data, data, size * count); 7879 7880 vcpu->run->exit_reason = KVM_EXIT_IO; 7881 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 7882 vcpu->run->io.size = size; 7883 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; 7884 vcpu->run->io.count = count; 7885 vcpu->run->io.port = port; 7886 return 0; 7887 } 7888 7889 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, 7890 unsigned short port, void *val, unsigned int count) 7891 { 7892 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); 7893 if (r) 7894 trace_kvm_pio(KVM_PIO_IN, port, size, count, val); 7895 7896 return r; 7897 } 7898 7899 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) 7900 { 7901 int size = vcpu->arch.pio.size; 7902 unsigned int count = vcpu->arch.pio.count; 7903 memcpy(val, vcpu->arch.pio_data, size * count); 7904 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); 7905 vcpu->arch.pio.count = 0; 7906 } 7907 7908 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, 7909 int size, unsigned short port, void *val, 7910 unsigned int count) 7911 { 7912 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 7913 if (vcpu->arch.pio.count) { 7914 /* 7915 * Complete a previous iteration that required userspace I/O. 7916 * Note, @count isn't guaranteed to match pio.count as userspace 7917 * can modify ECX before rerunning the vCPU. Ignore any such 7918 * shenanigans as KVM doesn't support modifying the rep count, 7919 * and the emulator ensures @count doesn't overflow the buffer. 7920 */ 7921 complete_emulator_pio_in(vcpu, val); 7922 return 1; 7923 } 7924 7925 return emulator_pio_in(vcpu, size, port, val, count); 7926 } 7927 7928 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, 7929 unsigned short port, const void *val, 7930 unsigned int count) 7931 { 7932 trace_kvm_pio(KVM_PIO_OUT, port, size, count, val); 7933 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); 7934 } 7935 7936 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, 7937 int size, unsigned short port, 7938 const void *val, unsigned int count) 7939 { 7940 return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count); 7941 } 7942 7943 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 7944 { 7945 return static_call(kvm_x86_get_segment_base)(vcpu, seg); 7946 } 7947 7948 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) 7949 { 7950 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); 7951 } 7952 7953 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) 7954 { 7955 if (!need_emulate_wbinvd(vcpu)) 7956 return X86EMUL_CONTINUE; 7957 7958 if (static_call(kvm_x86_has_wbinvd_exit)()) { 7959 int cpu = get_cpu(); 7960 7961 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 7962 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, 7963 wbinvd_ipi, NULL, 1); 7964 put_cpu(); 7965 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 7966 } else 7967 wbinvd(); 7968 return X86EMUL_CONTINUE; 7969 } 7970 7971 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) 7972 { 7973 kvm_emulate_wbinvd_noskip(vcpu); 7974 return kvm_skip_emulated_instruction(vcpu); 7975 } 7976 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 7977 7978 7979 7980 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) 7981 { 7982 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); 7983 } 7984 7985 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, 7986 unsigned long *dest) 7987 { 7988 kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); 7989 } 7990 7991 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 7992 unsigned long value) 7993 { 7994 7995 return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); 7996 } 7997 7998 static u64 mk_cr_64(u64 curr_cr, u32 new_val) 7999 { 8000 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 8001 } 8002 8003 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) 8004 { 8005 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8006 unsigned long value; 8007 8008 switch (cr) { 8009 case 0: 8010 value = kvm_read_cr0(vcpu); 8011 break; 8012 case 2: 8013 value = vcpu->arch.cr2; 8014 break; 8015 case 3: 8016 value = kvm_read_cr3(vcpu); 8017 break; 8018 case 4: 8019 value = kvm_read_cr4(vcpu); 8020 break; 8021 case 8: 8022 value = kvm_get_cr8(vcpu); 8023 break; 8024 default: 8025 kvm_err("%s: unexpected cr %u\n", __func__, cr); 8026 return 0; 8027 } 8028 8029 return value; 8030 } 8031 8032 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) 8033 { 8034 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8035 int res = 0; 8036 8037 switch (cr) { 8038 case 0: 8039 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); 8040 break; 8041 case 2: 8042 vcpu->arch.cr2 = val; 8043 break; 8044 case 3: 8045 res = kvm_set_cr3(vcpu, val); 8046 break; 8047 case 4: 8048 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); 8049 break; 8050 case 8: 8051 res = kvm_set_cr8(vcpu, val); 8052 break; 8053 default: 8054 kvm_err("%s: unexpected cr %u\n", __func__, cr); 8055 res = -1; 8056 } 8057 8058 return res; 8059 } 8060 8061 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 8062 { 8063 return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 8064 } 8065 8066 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 8067 { 8068 static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 8069 } 8070 8071 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 8072 { 8073 static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 8074 } 8075 8076 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 8077 { 8078 static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 8079 } 8080 8081 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 8082 { 8083 static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 8084 } 8085 8086 static unsigned long emulator_get_cached_segment_base( 8087 struct x86_emulate_ctxt *ctxt, int seg) 8088 { 8089 return get_segment_base(emul_to_vcpu(ctxt), seg); 8090 } 8091 8092 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, 8093 struct desc_struct *desc, u32 *base3, 8094 int seg) 8095 { 8096 struct kvm_segment var; 8097 8098 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); 8099 *selector = var.selector; 8100 8101 if (var.unusable) { 8102 memset(desc, 0, sizeof(*desc)); 8103 if (base3) 8104 *base3 = 0; 8105 return false; 8106 } 8107 8108 if (var.g) 8109 var.limit >>= 12; 8110 set_desc_limit(desc, var.limit); 8111 set_desc_base(desc, (unsigned long)var.base); 8112 #ifdef CONFIG_X86_64 8113 if (base3) 8114 *base3 = var.base >> 32; 8115 #endif 8116 desc->type = var.type; 8117 desc->s = var.s; 8118 desc->dpl = var.dpl; 8119 desc->p = var.present; 8120 desc->avl = var.avl; 8121 desc->l = var.l; 8122 desc->d = var.db; 8123 desc->g = var.g; 8124 8125 return true; 8126 } 8127 8128 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, 8129 struct desc_struct *desc, u32 base3, 8130 int seg) 8131 { 8132 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8133 struct kvm_segment var; 8134 8135 var.selector = selector; 8136 var.base = get_desc_base(desc); 8137 #ifdef CONFIG_X86_64 8138 var.base |= ((u64)base3) << 32; 8139 #endif 8140 var.limit = get_desc_limit(desc); 8141 if (desc->g) 8142 var.limit = (var.limit << 12) | 0xfff; 8143 var.type = desc->type; 8144 var.dpl = desc->dpl; 8145 var.db = desc->d; 8146 var.s = desc->s; 8147 var.l = desc->l; 8148 var.g = desc->g; 8149 var.avl = desc->avl; 8150 var.present = desc->p; 8151 var.unusable = !var.present; 8152 var.padding = 0; 8153 8154 kvm_set_segment(vcpu, &var, seg); 8155 return; 8156 } 8157 8158 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8159 u32 msr_index, u64 *pdata) 8160 { 8161 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8162 int r; 8163 8164 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); 8165 if (r < 0) 8166 return X86EMUL_UNHANDLEABLE; 8167 8168 if (r) { 8169 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, 8170 complete_emulated_rdmsr, r)) 8171 return X86EMUL_IO_NEEDED; 8172 8173 trace_kvm_msr_read_ex(msr_index); 8174 return X86EMUL_PROPAGATE_FAULT; 8175 } 8176 8177 trace_kvm_msr_read(msr_index, *pdata); 8178 return X86EMUL_CONTINUE; 8179 } 8180 8181 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt, 8182 u32 msr_index, u64 data) 8183 { 8184 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8185 int r; 8186 8187 r = kvm_set_msr_with_filter(vcpu, msr_index, data); 8188 if (r < 0) 8189 return X86EMUL_UNHANDLEABLE; 8190 8191 if (r) { 8192 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, 8193 complete_emulated_msr_access, r)) 8194 return X86EMUL_IO_NEEDED; 8195 8196 trace_kvm_msr_write_ex(msr_index, data); 8197 return X86EMUL_PROPAGATE_FAULT; 8198 } 8199 8200 trace_kvm_msr_write(msr_index, data); 8201 return X86EMUL_CONTINUE; 8202 } 8203 8204 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, 8205 u32 msr_index, u64 *pdata) 8206 { 8207 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); 8208 } 8209 8210 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, 8211 u32 pmc) 8212 { 8213 if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc)) 8214 return 0; 8215 return -EINVAL; 8216 } 8217 8218 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, 8219 u32 pmc, u64 *pdata) 8220 { 8221 return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); 8222 } 8223 8224 static void emulator_halt(struct x86_emulate_ctxt *ctxt) 8225 { 8226 emul_to_vcpu(ctxt)->arch.halt_request = 1; 8227 } 8228 8229 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 8230 struct x86_instruction_info *info, 8231 enum x86_intercept_stage stage) 8232 { 8233 return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 8234 &ctxt->exception); 8235 } 8236 8237 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, 8238 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, 8239 bool exact_only) 8240 { 8241 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); 8242 } 8243 8244 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt) 8245 { 8246 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE); 8247 } 8248 8249 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) 8250 { 8251 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); 8252 } 8253 8254 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) 8255 { 8256 return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); 8257 } 8258 8259 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) 8260 { 8261 return kvm_register_read_raw(emul_to_vcpu(ctxt), reg); 8262 } 8263 8264 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) 8265 { 8266 kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val); 8267 } 8268 8269 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 8270 { 8271 static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 8272 } 8273 8274 static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt) 8275 { 8276 return is_smm(emul_to_vcpu(ctxt)); 8277 } 8278 8279 static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt) 8280 { 8281 return is_guest_mode(emul_to_vcpu(ctxt)); 8282 } 8283 8284 #ifndef CONFIG_KVM_SMM 8285 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt) 8286 { 8287 WARN_ON_ONCE(1); 8288 return X86EMUL_UNHANDLEABLE; 8289 } 8290 #endif 8291 8292 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) 8293 { 8294 kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt)); 8295 } 8296 8297 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) 8298 { 8299 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); 8300 } 8301 8302 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) 8303 { 8304 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm; 8305 8306 if (!kvm->vm_bugged) 8307 kvm_vm_bugged(kvm); 8308 } 8309 8310 static const struct x86_emulate_ops emulate_ops = { 8311 .vm_bugged = emulator_vm_bugged, 8312 .read_gpr = emulator_read_gpr, 8313 .write_gpr = emulator_write_gpr, 8314 .read_std = emulator_read_std, 8315 .write_std = emulator_write_std, 8316 .fetch = kvm_fetch_guest_virt, 8317 .read_emulated = emulator_read_emulated, 8318 .write_emulated = emulator_write_emulated, 8319 .cmpxchg_emulated = emulator_cmpxchg_emulated, 8320 .invlpg = emulator_invlpg, 8321 .pio_in_emulated = emulator_pio_in_emulated, 8322 .pio_out_emulated = emulator_pio_out_emulated, 8323 .get_segment = emulator_get_segment, 8324 .set_segment = emulator_set_segment, 8325 .get_cached_segment_base = emulator_get_cached_segment_base, 8326 .get_gdt = emulator_get_gdt, 8327 .get_idt = emulator_get_idt, 8328 .set_gdt = emulator_set_gdt, 8329 .set_idt = emulator_set_idt, 8330 .get_cr = emulator_get_cr, 8331 .set_cr = emulator_set_cr, 8332 .cpl = emulator_get_cpl, 8333 .get_dr = emulator_get_dr, 8334 .set_dr = emulator_set_dr, 8335 .set_msr_with_filter = emulator_set_msr_with_filter, 8336 .get_msr_with_filter = emulator_get_msr_with_filter, 8337 .get_msr = emulator_get_msr, 8338 .check_pmc = emulator_check_pmc, 8339 .read_pmc = emulator_read_pmc, 8340 .halt = emulator_halt, 8341 .wbinvd = emulator_wbinvd, 8342 .fix_hypercall = emulator_fix_hypercall, 8343 .intercept = emulator_intercept, 8344 .get_cpuid = emulator_get_cpuid, 8345 .guest_has_movbe = emulator_guest_has_movbe, 8346 .guest_has_fxsr = emulator_guest_has_fxsr, 8347 .guest_has_rdpid = emulator_guest_has_rdpid, 8348 .set_nmi_mask = emulator_set_nmi_mask, 8349 .is_smm = emulator_is_smm, 8350 .is_guest_mode = emulator_is_guest_mode, 8351 .leave_smm = emulator_leave_smm, 8352 .triple_fault = emulator_triple_fault, 8353 .set_xcr = emulator_set_xcr, 8354 }; 8355 8356 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 8357 { 8358 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8359 /* 8360 * an sti; sti; sequence only disable interrupts for the first 8361 * instruction. So, if the last instruction, be it emulated or 8362 * not, left the system with the INT_STI flag enabled, it 8363 * means that the last instruction is an sti. We should not 8364 * leave the flag on in this case. The same goes for mov ss 8365 */ 8366 if (int_shadow & mask) 8367 mask = 0; 8368 if (unlikely(int_shadow || mask)) { 8369 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 8370 if (!mask) 8371 kvm_make_request(KVM_REQ_EVENT, vcpu); 8372 } 8373 } 8374 8375 static void inject_emulated_exception(struct kvm_vcpu *vcpu) 8376 { 8377 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8378 8379 if (ctxt->exception.vector == PF_VECTOR) 8380 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); 8381 else if (ctxt->exception.error_code_valid) 8382 kvm_queue_exception_e(vcpu, ctxt->exception.vector, 8383 ctxt->exception.error_code); 8384 else 8385 kvm_queue_exception(vcpu, ctxt->exception.vector); 8386 } 8387 8388 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) 8389 { 8390 struct x86_emulate_ctxt *ctxt; 8391 8392 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8393 if (!ctxt) { 8394 pr_err("failed to allocate vcpu's emulator\n"); 8395 return NULL; 8396 } 8397 8398 ctxt->vcpu = vcpu; 8399 ctxt->ops = &emulate_ops; 8400 vcpu->arch.emulate_ctxt = ctxt; 8401 8402 return ctxt; 8403 } 8404 8405 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) 8406 { 8407 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8408 int cs_db, cs_l; 8409 8410 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 8411 8412 ctxt->gpa_available = false; 8413 ctxt->eflags = kvm_get_rflags(vcpu); 8414 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 8415 8416 ctxt->eip = kvm_rip_read(vcpu); 8417 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 8418 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : 8419 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : 8420 cs_db ? X86EMUL_MODE_PROT32 : 8421 X86EMUL_MODE_PROT16; 8422 ctxt->interruptibility = 0; 8423 ctxt->have_exception = false; 8424 ctxt->exception.vector = -1; 8425 ctxt->perm_ok = false; 8426 8427 init_decode_cache(ctxt); 8428 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8429 } 8430 8431 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) 8432 { 8433 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8434 int ret; 8435 8436 init_emulate_ctxt(vcpu); 8437 8438 ctxt->op_bytes = 2; 8439 ctxt->ad_bytes = 2; 8440 ctxt->_eip = ctxt->eip + inc_eip; 8441 ret = emulate_int_real(ctxt, irq); 8442 8443 if (ret != X86EMUL_CONTINUE) { 8444 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 8445 } else { 8446 ctxt->eip = ctxt->_eip; 8447 kvm_rip_write(vcpu, ctxt->eip); 8448 kvm_set_rflags(vcpu, ctxt->eflags); 8449 } 8450 } 8451 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 8452 8453 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8454 u8 ndata, u8 *insn_bytes, u8 insn_size) 8455 { 8456 struct kvm_run *run = vcpu->run; 8457 u64 info[5]; 8458 u8 info_start; 8459 8460 /* 8461 * Zero the whole array used to retrieve the exit info, as casting to 8462 * u32 for select entries will leave some chunks uninitialized. 8463 */ 8464 memset(&info, 0, sizeof(info)); 8465 8466 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], 8467 &info[2], (u32 *)&info[3], 8468 (u32 *)&info[4]); 8469 8470 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 8471 run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; 8472 8473 /* 8474 * There's currently space for 13 entries, but 5 are used for the exit 8475 * reason and info. Restrict to 4 to reduce the maintenance burden 8476 * when expanding kvm_run.emulation_failure in the future. 8477 */ 8478 if (WARN_ON_ONCE(ndata > 4)) 8479 ndata = 4; 8480 8481 /* Always include the flags as a 'data' entry. */ 8482 info_start = 1; 8483 run->emulation_failure.flags = 0; 8484 8485 if (insn_size) { 8486 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) + 8487 sizeof(run->emulation_failure.insn_bytes) != 16)); 8488 info_start += 2; 8489 run->emulation_failure.flags |= 8490 KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES; 8491 run->emulation_failure.insn_size = insn_size; 8492 memset(run->emulation_failure.insn_bytes, 0x90, 8493 sizeof(run->emulation_failure.insn_bytes)); 8494 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size); 8495 } 8496 8497 memcpy(&run->internal.data[info_start], info, sizeof(info)); 8498 memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data, 8499 ndata * sizeof(data[0])); 8500 8501 run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata; 8502 } 8503 8504 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) 8505 { 8506 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8507 8508 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, 8509 ctxt->fetch.end - ctxt->fetch.data); 8510 } 8511 8512 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 8513 u8 ndata) 8514 { 8515 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 8516 } 8517 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 8518 8519 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 8520 { 8521 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 8522 } 8523 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 8524 8525 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 8526 { 8527 struct kvm *kvm = vcpu->kvm; 8528 8529 ++vcpu->stat.insn_emulation_fail; 8530 trace_kvm_emulate_insn_failed(vcpu); 8531 8532 if (emulation_type & EMULTYPE_VMWARE_GP) { 8533 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8534 return 1; 8535 } 8536 8537 if (kvm->arch.exit_on_emulation_error || 8538 (emulation_type & EMULTYPE_SKIP)) { 8539 prepare_emulation_ctxt_failure_exit(vcpu); 8540 return 0; 8541 } 8542 8543 kvm_queue_exception(vcpu, UD_VECTOR); 8544 8545 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 8546 prepare_emulation_ctxt_failure_exit(vcpu); 8547 return 0; 8548 } 8549 8550 return 1; 8551 } 8552 8553 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8554 int emulation_type) 8555 { 8556 gpa_t gpa = cr2_or_gpa; 8557 kvm_pfn_t pfn; 8558 8559 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8560 return false; 8561 8562 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8563 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8564 return false; 8565 8566 if (!vcpu->arch.mmu->root_role.direct) { 8567 /* 8568 * Write permission should be allowed since only 8569 * write access need to be emulated. 8570 */ 8571 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8572 8573 /* 8574 * If the mapping is invalid in guest, let cpu retry 8575 * it to generate fault. 8576 */ 8577 if (gpa == INVALID_GPA) 8578 return true; 8579 } 8580 8581 /* 8582 * Do not retry the unhandleable instruction if it faults on the 8583 * readonly host memory, otherwise it will goto a infinite loop: 8584 * retry instruction -> write #PF -> emulation fail -> retry 8585 * instruction -> ... 8586 */ 8587 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); 8588 8589 /* 8590 * If the instruction failed on the error pfn, it can not be fixed, 8591 * report the error to userspace. 8592 */ 8593 if (is_error_noslot_pfn(pfn)) 8594 return false; 8595 8596 kvm_release_pfn_clean(pfn); 8597 8598 /* The instructions are well-emulated on direct mmu. */ 8599 if (vcpu->arch.mmu->root_role.direct) { 8600 unsigned int indirect_shadow_pages; 8601 8602 write_lock(&vcpu->kvm->mmu_lock); 8603 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; 8604 write_unlock(&vcpu->kvm->mmu_lock); 8605 8606 if (indirect_shadow_pages) 8607 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8608 8609 return true; 8610 } 8611 8612 /* 8613 * if emulation was due to access to shadowed page table 8614 * and it failed try to unshadow page and re-enter the 8615 * guest to let CPU execute the instruction. 8616 */ 8617 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8618 8619 /* 8620 * If the access faults on its page table, it can not 8621 * be fixed by unprotecting shadow page and it should 8622 * be reported to userspace. 8623 */ 8624 return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP); 8625 } 8626 8627 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, 8628 gpa_t cr2_or_gpa, int emulation_type) 8629 { 8630 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 8631 unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa; 8632 8633 last_retry_eip = vcpu->arch.last_retry_eip; 8634 last_retry_addr = vcpu->arch.last_retry_addr; 8635 8636 /* 8637 * If the emulation is caused by #PF and it is non-page_table 8638 * writing instruction, it means the VM-EXIT is caused by shadow 8639 * page protected, we can zap the shadow page and retry this 8640 * instruction directly. 8641 * 8642 * Note: if the guest uses a non-page-table modifying instruction 8643 * on the PDE that points to the instruction, then we will unmap 8644 * the instruction and go to an infinite loop. So, we cache the 8645 * last retried eip and the last fault address, if we meet the eip 8646 * and the address again, we can break out of the potential infinite 8647 * loop. 8648 */ 8649 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 8650 8651 if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF)) 8652 return false; 8653 8654 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || 8655 WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF))) 8656 return false; 8657 8658 if (x86_page_table_writing_insn(ctxt)) 8659 return false; 8660 8661 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) 8662 return false; 8663 8664 vcpu->arch.last_retry_eip = ctxt->eip; 8665 vcpu->arch.last_retry_addr = cr2_or_gpa; 8666 8667 if (!vcpu->arch.mmu->root_role.direct) 8668 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); 8669 8670 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); 8671 8672 return true; 8673 } 8674 8675 static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 8676 static int complete_emulated_pio(struct kvm_vcpu *vcpu); 8677 8678 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, 8679 unsigned long *db) 8680 { 8681 u32 dr6 = 0; 8682 int i; 8683 u32 enable, rwlen; 8684 8685 enable = dr7; 8686 rwlen = dr7 >> 16; 8687 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) 8688 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) 8689 dr6 |= (1 << i); 8690 return dr6; 8691 } 8692 8693 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) 8694 { 8695 struct kvm_run *kvm_run = vcpu->run; 8696 8697 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 8698 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; 8699 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); 8700 kvm_run->debug.arch.exception = DB_VECTOR; 8701 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8702 return 0; 8703 } 8704 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); 8705 return 1; 8706 } 8707 8708 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 8709 { 8710 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 8711 int r; 8712 8713 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 8714 if (unlikely(!r)) 8715 return 0; 8716 8717 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 8718 8719 /* 8720 * rflags is the old, "raw" value of the flags. The new value has 8721 * not been saved yet. 8722 * 8723 * This is correct even for TF set by the guest, because "the 8724 * processor will not generate this exception after the instruction 8725 * that sets the TF flag". 8726 */ 8727 if (unlikely(rflags & X86_EFLAGS_TF)) 8728 r = kvm_vcpu_do_singlestep(vcpu); 8729 return r; 8730 } 8731 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 8732 8733 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) 8734 { 8735 u32 shadow; 8736 8737 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) 8738 return true; 8739 8740 /* 8741 * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active, 8742 * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first 8743 * to avoid the relatively expensive CPUID lookup. 8744 */ 8745 shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 8746 return (shadow & KVM_X86_SHADOW_INT_MOV_SS) && 8747 guest_cpuid_is_intel(vcpu); 8748 } 8749 8750 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, 8751 int emulation_type, int *r) 8752 { 8753 WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE); 8754 8755 /* 8756 * Do not check for code breakpoints if hardware has already done the 8757 * checks, as inferred from the emulation type. On NO_DECODE and SKIP, 8758 * the instruction has passed all exception checks, and all intercepted 8759 * exceptions that trigger emulation have lower priority than code 8760 * breakpoints, i.e. the fact that the intercepted exception occurred 8761 * means any code breakpoints have already been serviced. 8762 * 8763 * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as 8764 * hardware has checked the RIP of the magic prefix, but not the RIP of 8765 * the instruction being emulated. The intent of forced emulation is 8766 * to behave as if KVM intercepted the instruction without an exception 8767 * and without a prefix. 8768 */ 8769 if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP | 8770 EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF)) 8771 return false; 8772 8773 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && 8774 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { 8775 struct kvm_run *kvm_run = vcpu->run; 8776 unsigned long eip = kvm_get_linear_rip(vcpu); 8777 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8778 vcpu->arch.guest_debug_dr7, 8779 vcpu->arch.eff_db); 8780 8781 if (dr6 != 0) { 8782 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; 8783 kvm_run->debug.arch.pc = eip; 8784 kvm_run->debug.arch.exception = DB_VECTOR; 8785 kvm_run->exit_reason = KVM_EXIT_DEBUG; 8786 *r = 0; 8787 return true; 8788 } 8789 } 8790 8791 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && 8792 !kvm_is_code_breakpoint_inhibited(vcpu)) { 8793 unsigned long eip = kvm_get_linear_rip(vcpu); 8794 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0, 8795 vcpu->arch.dr7, 8796 vcpu->arch.db); 8797 8798 if (dr6 != 0) { 8799 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); 8800 *r = 1; 8801 return true; 8802 } 8803 } 8804 8805 return false; 8806 } 8807 8808 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) 8809 { 8810 switch (ctxt->opcode_len) { 8811 case 1: 8812 switch (ctxt->b) { 8813 case 0xe4: /* IN */ 8814 case 0xe5: 8815 case 0xec: 8816 case 0xed: 8817 case 0xe6: /* OUT */ 8818 case 0xe7: 8819 case 0xee: 8820 case 0xef: 8821 case 0x6c: /* INS */ 8822 case 0x6d: 8823 case 0x6e: /* OUTS */ 8824 case 0x6f: 8825 return true; 8826 } 8827 break; 8828 case 2: 8829 switch (ctxt->b) { 8830 case 0x33: /* RDPMC */ 8831 return true; 8832 } 8833 break; 8834 } 8835 8836 return false; 8837 } 8838 8839 /* 8840 * Decode an instruction for emulation. The caller is responsible for handling 8841 * code breakpoints. Note, manually detecting code breakpoints is unnecessary 8842 * (and wrong) when emulating on an intercepted fault-like exception[*], as 8843 * code breakpoints have higher priority and thus have already been done by 8844 * hardware. 8845 * 8846 * [*] Except #MC, which is higher priority, but KVM should never emulate in 8847 * response to a machine check. 8848 */ 8849 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 8850 void *insn, int insn_len) 8851 { 8852 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8853 int r; 8854 8855 init_emulate_ctxt(vcpu); 8856 8857 r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); 8858 8859 trace_kvm_emulate_insn_start(vcpu); 8860 ++vcpu->stat.insn_emulation; 8861 8862 return r; 8863 } 8864 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 8865 8866 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 8867 int emulation_type, void *insn, int insn_len) 8868 { 8869 int r; 8870 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 8871 bool writeback = true; 8872 8873 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) 8874 return 1; 8875 8876 vcpu->arch.l1tf_flush_l1d = true; 8877 8878 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 8879 kvm_clear_exception_queue(vcpu); 8880 8881 /* 8882 * Return immediately if RIP hits a code breakpoint, such #DBs 8883 * are fault-like and are higher priority than any faults on 8884 * the code fetch itself. 8885 */ 8886 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) 8887 return r; 8888 8889 r = x86_decode_emulated_instruction(vcpu, emulation_type, 8890 insn, insn_len); 8891 if (r != EMULATION_OK) { 8892 if ((emulation_type & EMULTYPE_TRAP_UD) || 8893 (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { 8894 kvm_queue_exception(vcpu, UD_VECTOR); 8895 return 1; 8896 } 8897 if (reexecute_instruction(vcpu, cr2_or_gpa, 8898 emulation_type)) 8899 return 1; 8900 8901 if (ctxt->have_exception && 8902 !(emulation_type & EMULTYPE_SKIP)) { 8903 /* 8904 * #UD should result in just EMULATION_FAILED, and trap-like 8905 * exception should not be encountered during decode. 8906 */ 8907 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || 8908 exception_type(ctxt->exception.vector) == EXCPT_TRAP); 8909 inject_emulated_exception(vcpu); 8910 return 1; 8911 } 8912 return handle_emulation_failure(vcpu, emulation_type); 8913 } 8914 } 8915 8916 if ((emulation_type & EMULTYPE_VMWARE_GP) && 8917 !is_vmware_backdoor_opcode(ctxt)) { 8918 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 8919 return 1; 8920 } 8921 8922 /* 8923 * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for 8924 * use *only* by vendor callbacks for kvm_skip_emulated_instruction(). 8925 * The caller is responsible for updating interruptibility state and 8926 * injecting single-step #DBs. 8927 */ 8928 if (emulation_type & EMULTYPE_SKIP) { 8929 if (ctxt->mode != X86EMUL_MODE_PROT64) 8930 ctxt->eip = (u32)ctxt->_eip; 8931 else 8932 ctxt->eip = ctxt->_eip; 8933 8934 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) { 8935 r = 1; 8936 goto writeback; 8937 } 8938 8939 kvm_rip_write(vcpu, ctxt->eip); 8940 if (ctxt->eflags & X86_EFLAGS_RF) 8941 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); 8942 return 1; 8943 } 8944 8945 if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) 8946 return 1; 8947 8948 /* this is needed for vmware backdoor interface to work since it 8949 changes registers values during IO operation */ 8950 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 8951 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; 8952 emulator_invalidate_register_cache(ctxt); 8953 } 8954 8955 restart: 8956 if (emulation_type & EMULTYPE_PF) { 8957 /* Save the faulting GPA (cr2) in the address field */ 8958 ctxt->exception.address = cr2_or_gpa; 8959 8960 /* With shadow page tables, cr2 contains a GVA or nGPA. */ 8961 if (vcpu->arch.mmu->root_role.direct) { 8962 ctxt->gpa_available = true; 8963 ctxt->gpa_val = cr2_or_gpa; 8964 } 8965 } else { 8966 /* Sanitize the address out of an abundance of paranoia. */ 8967 ctxt->exception.address = 0; 8968 } 8969 8970 r = x86_emulate_insn(ctxt); 8971 8972 if (r == EMULATION_INTERCEPTED) 8973 return 1; 8974 8975 if (r == EMULATION_FAILED) { 8976 if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type)) 8977 return 1; 8978 8979 return handle_emulation_failure(vcpu, emulation_type); 8980 } 8981 8982 if (ctxt->have_exception) { 8983 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write); 8984 vcpu->mmio_needed = false; 8985 r = 1; 8986 inject_emulated_exception(vcpu); 8987 } else if (vcpu->arch.pio.count) { 8988 if (!vcpu->arch.pio.in) { 8989 /* FIXME: return into emulator if single-stepping. */ 8990 vcpu->arch.pio.count = 0; 8991 } else { 8992 writeback = false; 8993 vcpu->arch.complete_userspace_io = complete_emulated_pio; 8994 } 8995 r = 0; 8996 } else if (vcpu->mmio_needed) { 8997 ++vcpu->stat.mmio_exits; 8998 8999 if (!vcpu->mmio_is_write) 9000 writeback = false; 9001 r = 0; 9002 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 9003 } else if (vcpu->arch.complete_userspace_io) { 9004 writeback = false; 9005 r = 0; 9006 } else if (r == EMULATION_RESTART) 9007 goto restart; 9008 else 9009 r = 1; 9010 9011 writeback: 9012 if (writeback) { 9013 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 9014 toggle_interruptibility(vcpu, ctxt->interruptibility); 9015 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 9016 9017 /* 9018 * Note, EXCPT_DB is assumed to be fault-like as the emulator 9019 * only supports code breakpoints and general detect #DB, both 9020 * of which are fault-like. 9021 */ 9022 if (!ctxt->have_exception || 9023 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { 9024 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); 9025 if (ctxt->is_branch) 9026 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 9027 kvm_rip_write(vcpu, ctxt->eip); 9028 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 9029 r = kvm_vcpu_do_singlestep(vcpu); 9030 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 9031 __kvm_set_rflags(vcpu, ctxt->eflags); 9032 } 9033 9034 /* 9035 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 9036 * do nothing, and it will be requested again as soon as 9037 * the shadow expires. But we still need to check here, 9038 * because POPF has no interrupt shadow. 9039 */ 9040 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) 9041 kvm_make_request(KVM_REQ_EVENT, vcpu); 9042 } else 9043 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 9044 9045 return r; 9046 } 9047 9048 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) 9049 { 9050 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 9051 } 9052 EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 9053 9054 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 9055 void *insn, int insn_len) 9056 { 9057 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 9058 } 9059 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 9060 9061 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 9062 { 9063 vcpu->arch.pio.count = 0; 9064 return 1; 9065 } 9066 9067 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) 9068 { 9069 vcpu->arch.pio.count = 0; 9070 9071 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) 9072 return 1; 9073 9074 return kvm_skip_emulated_instruction(vcpu); 9075 } 9076 9077 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 9078 unsigned short port) 9079 { 9080 unsigned long val = kvm_rax_read(vcpu); 9081 int ret = emulator_pio_out(vcpu, size, port, &val, 1); 9082 9083 if (ret) 9084 return ret; 9085 9086 /* 9087 * Workaround userspace that relies on old KVM behavior of %rip being 9088 * incremented prior to exiting to userspace to handle "OUT 0x7e". 9089 */ 9090 if (port == 0x7e && 9091 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { 9092 vcpu->arch.complete_userspace_io = 9093 complete_fast_pio_out_port_0x7e; 9094 kvm_skip_emulated_instruction(vcpu); 9095 } else { 9096 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9097 vcpu->arch.complete_userspace_io = complete_fast_pio_out; 9098 } 9099 return 0; 9100 } 9101 9102 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) 9103 { 9104 unsigned long val; 9105 9106 /* We should only ever be called with arch.pio.count equal to 1 */ 9107 BUG_ON(vcpu->arch.pio.count != 1); 9108 9109 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { 9110 vcpu->arch.pio.count = 0; 9111 return 1; 9112 } 9113 9114 /* For size less than 4 we merge, else we zero extend */ 9115 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; 9116 9117 complete_emulator_pio_in(vcpu, &val); 9118 kvm_rax_write(vcpu, val); 9119 9120 return kvm_skip_emulated_instruction(vcpu); 9121 } 9122 9123 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 9124 unsigned short port) 9125 { 9126 unsigned long val; 9127 int ret; 9128 9129 /* For size less than 4 we merge, else we zero extend */ 9130 val = (size < 4) ? kvm_rax_read(vcpu) : 0; 9131 9132 ret = emulator_pio_in(vcpu, size, port, &val, 1); 9133 if (ret) { 9134 kvm_rax_write(vcpu, val); 9135 return ret; 9136 } 9137 9138 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); 9139 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 9140 9141 return 0; 9142 } 9143 9144 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 9145 { 9146 int ret; 9147 9148 if (in) 9149 ret = kvm_fast_pio_in(vcpu, size, port); 9150 else 9151 ret = kvm_fast_pio_out(vcpu, size, port); 9152 return ret && kvm_skip_emulated_instruction(vcpu); 9153 } 9154 EXPORT_SYMBOL_GPL(kvm_fast_pio); 9155 9156 static int kvmclock_cpu_down_prep(unsigned int cpu) 9157 { 9158 __this_cpu_write(cpu_tsc_khz, 0); 9159 return 0; 9160 } 9161 9162 static void tsc_khz_changed(void *data) 9163 { 9164 struct cpufreq_freqs *freq = data; 9165 unsigned long khz; 9166 9167 WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC)); 9168 9169 if (data) 9170 khz = freq->new; 9171 else 9172 khz = cpufreq_quick_get(raw_smp_processor_id()); 9173 if (!khz) 9174 khz = tsc_khz; 9175 __this_cpu_write(cpu_tsc_khz, khz); 9176 } 9177 9178 #ifdef CONFIG_X86_64 9179 static void kvm_hyperv_tsc_notifier(void) 9180 { 9181 struct kvm *kvm; 9182 int cpu; 9183 9184 mutex_lock(&kvm_lock); 9185 list_for_each_entry(kvm, &vm_list, vm_list) 9186 kvm_make_mclock_inprogress_request(kvm); 9187 9188 /* no guest entries from this point */ 9189 hyperv_stop_tsc_emulation(); 9190 9191 /* TSC frequency always matches when on Hyper-V */ 9192 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9193 for_each_present_cpu(cpu) 9194 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 9195 } 9196 kvm_caps.max_guest_tsc_khz = tsc_khz; 9197 9198 list_for_each_entry(kvm, &vm_list, vm_list) { 9199 __kvm_start_pvclock_update(kvm); 9200 pvclock_update_vm_gtod_copy(kvm); 9201 kvm_end_pvclock_update(kvm); 9202 } 9203 9204 mutex_unlock(&kvm_lock); 9205 } 9206 #endif 9207 9208 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) 9209 { 9210 struct kvm *kvm; 9211 struct kvm_vcpu *vcpu; 9212 int send_ipi = 0; 9213 unsigned long i; 9214 9215 /* 9216 * We allow guests to temporarily run on slowing clocks, 9217 * provided we notify them after, or to run on accelerating 9218 * clocks, provided we notify them before. Thus time never 9219 * goes backwards. 9220 * 9221 * However, we have a problem. We can't atomically update 9222 * the frequency of a given CPU from this function; it is 9223 * merely a notifier, which can be called from any CPU. 9224 * Changing the TSC frequency at arbitrary points in time 9225 * requires a recomputation of local variables related to 9226 * the TSC for each VCPU. We must flag these local variables 9227 * to be updated and be sure the update takes place with the 9228 * new frequency before any guests proceed. 9229 * 9230 * Unfortunately, the combination of hotplug CPU and frequency 9231 * change creates an intractable locking scenario; the order 9232 * of when these callouts happen is undefined with respect to 9233 * CPU hotplug, and they can race with each other. As such, 9234 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is 9235 * undefined; you can actually have a CPU frequency change take 9236 * place in between the computation of X and the setting of the 9237 * variable. To protect against this problem, all updates of 9238 * the per_cpu tsc_khz variable are done in an interrupt 9239 * protected IPI, and all callers wishing to update the value 9240 * must wait for a synchronous IPI to complete (which is trivial 9241 * if the caller is on the CPU already). This establishes the 9242 * necessary total order on variable updates. 9243 * 9244 * Note that because a guest time update may take place 9245 * anytime after the setting of the VCPU's request bit, the 9246 * correct TSC value must be set before the request. However, 9247 * to ensure the update actually makes it to any guest which 9248 * starts running in hardware virtualization between the set 9249 * and the acquisition of the spinlock, we must also ping the 9250 * CPU after setting the request bit. 9251 * 9252 */ 9253 9254 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9255 9256 mutex_lock(&kvm_lock); 9257 list_for_each_entry(kvm, &vm_list, vm_list) { 9258 kvm_for_each_vcpu(i, vcpu, kvm) { 9259 if (vcpu->cpu != cpu) 9260 continue; 9261 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 9262 if (vcpu->cpu != raw_smp_processor_id()) 9263 send_ipi = 1; 9264 } 9265 } 9266 mutex_unlock(&kvm_lock); 9267 9268 if (freq->old < freq->new && send_ipi) { 9269 /* 9270 * We upscale the frequency. Must make the guest 9271 * doesn't see old kvmclock values while running with 9272 * the new frequency, otherwise we risk the guest sees 9273 * time go backwards. 9274 * 9275 * In case we update the frequency for another cpu 9276 * (which might be in guest context) send an interrupt 9277 * to kick the cpu out of guest context. Next time 9278 * guest context is entered kvmclock will be updated, 9279 * so the guest will not see stale values. 9280 */ 9281 smp_call_function_single(cpu, tsc_khz_changed, freq, 1); 9282 } 9283 } 9284 9285 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 9286 void *data) 9287 { 9288 struct cpufreq_freqs *freq = data; 9289 int cpu; 9290 9291 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) 9292 return 0; 9293 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) 9294 return 0; 9295 9296 for_each_cpu(cpu, freq->policy->cpus) 9297 __kvmclock_cpufreq_notifier(freq, cpu); 9298 9299 return 0; 9300 } 9301 9302 static struct notifier_block kvmclock_cpufreq_notifier_block = { 9303 .notifier_call = kvmclock_cpufreq_notifier 9304 }; 9305 9306 static int kvmclock_cpu_online(unsigned int cpu) 9307 { 9308 tsc_khz_changed(NULL); 9309 return 0; 9310 } 9311 9312 static void kvm_timer_init(void) 9313 { 9314 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9315 max_tsc_khz = tsc_khz; 9316 9317 if (IS_ENABLED(CONFIG_CPU_FREQ)) { 9318 struct cpufreq_policy *policy; 9319 int cpu; 9320 9321 cpu = get_cpu(); 9322 policy = cpufreq_cpu_get(cpu); 9323 if (policy) { 9324 if (policy->cpuinfo.max_freq) 9325 max_tsc_khz = policy->cpuinfo.max_freq; 9326 cpufreq_cpu_put(policy); 9327 } 9328 put_cpu(); 9329 } 9330 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 9331 CPUFREQ_TRANSITION_NOTIFIER); 9332 9333 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online", 9334 kvmclock_cpu_online, kvmclock_cpu_down_prep); 9335 } 9336 } 9337 9338 #ifdef CONFIG_X86_64 9339 static void pvclock_gtod_update_fn(struct work_struct *work) 9340 { 9341 struct kvm *kvm; 9342 struct kvm_vcpu *vcpu; 9343 unsigned long i; 9344 9345 mutex_lock(&kvm_lock); 9346 list_for_each_entry(kvm, &vm_list, vm_list) 9347 kvm_for_each_vcpu(i, vcpu, kvm) 9348 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 9349 atomic_set(&kvm_guest_has_master_clock, 0); 9350 mutex_unlock(&kvm_lock); 9351 } 9352 9353 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); 9354 9355 /* 9356 * Indirection to move queue_work() out of the tk_core.seq write held 9357 * region to prevent possible deadlocks against time accessors which 9358 * are invoked with work related locks held. 9359 */ 9360 static void pvclock_irq_work_fn(struct irq_work *w) 9361 { 9362 queue_work(system_long_wq, &pvclock_gtod_work); 9363 } 9364 9365 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); 9366 9367 /* 9368 * Notification about pvclock gtod data update. 9369 */ 9370 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, 9371 void *priv) 9372 { 9373 struct pvclock_gtod_data *gtod = &pvclock_gtod_data; 9374 struct timekeeper *tk = priv; 9375 9376 update_pvclock_gtod(tk); 9377 9378 /* 9379 * Disable master clock if host does not trust, or does not use, 9380 * TSC based clocksource. Delegate queue_work() to irq_work as 9381 * this is invoked with tk_core.seq write held. 9382 */ 9383 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && 9384 atomic_read(&kvm_guest_has_master_clock) != 0) 9385 irq_work_queue(&pvclock_irq_work); 9386 return 0; 9387 } 9388 9389 static struct notifier_block pvclock_gtod_notifier = { 9390 .notifier_call = pvclock_gtod_notify, 9391 }; 9392 #endif 9393 9394 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) 9395 { 9396 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 9397 9398 #define __KVM_X86_OP(func) \ 9399 static_call_update(kvm_x86_##func, kvm_x86_ops.func); 9400 #define KVM_X86_OP(func) \ 9401 WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func) 9402 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP 9403 #define KVM_X86_OP_OPTIONAL_RET0(func) \ 9404 static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \ 9405 (void *)__static_call_return0); 9406 #include <asm/kvm-x86-ops.h> 9407 #undef __KVM_X86_OP 9408 9409 kvm_pmu_ops_update(ops->pmu_ops); 9410 } 9411 9412 static int kvm_x86_check_processor_compatibility(void) 9413 { 9414 int cpu = smp_processor_id(); 9415 struct cpuinfo_x86 *c = &cpu_data(cpu); 9416 9417 /* 9418 * Compatibility checks are done when loading KVM and when enabling 9419 * hardware, e.g. during CPU hotplug, to ensure all online CPUs are 9420 * compatible, i.e. KVM should never perform a compatibility check on 9421 * an offline CPU. 9422 */ 9423 WARN_ON(!cpu_online(cpu)); 9424 9425 if (__cr4_reserved_bits(cpu_has, c) != 9426 __cr4_reserved_bits(cpu_has, &boot_cpu_data)) 9427 return -EIO; 9428 9429 return static_call(kvm_x86_check_processor_compatibility)(); 9430 } 9431 9432 static void kvm_x86_check_cpu_compat(void *ret) 9433 { 9434 *(int *)ret = kvm_x86_check_processor_compatibility(); 9435 } 9436 9437 static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) 9438 { 9439 u64 host_pat; 9440 int r, cpu; 9441 9442 if (kvm_x86_ops.hardware_enable) { 9443 pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name); 9444 return -EEXIST; 9445 } 9446 9447 /* 9448 * KVM explicitly assumes that the guest has an FPU and 9449 * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the 9450 * vCPU's FPU state as a fxregs_state struct. 9451 */ 9452 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9453 pr_err("inadequate fpu\n"); 9454 return -EOPNOTSUPP; 9455 } 9456 9457 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9458 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n"); 9459 return -EOPNOTSUPP; 9460 } 9461 9462 /* 9463 * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes 9464 * the PAT bits in SPTEs. Bail if PAT[0] is programmed to something 9465 * other than WB. Note, EPT doesn't utilize the PAT, but don't bother 9466 * with an exception. PAT[0] is set to WB on RESET and also by the 9467 * kernel, i.e. failure indicates a kernel bug or broken firmware. 9468 */ 9469 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9470 (host_pat & GENMASK(2, 0)) != 6) { 9471 pr_err("host PAT[0] is not WB\n"); 9472 return -EIO; 9473 } 9474 9475 x86_emulator_cache = kvm_alloc_emulator_cache(); 9476 if (!x86_emulator_cache) { 9477 pr_err("failed to allocate cache for x86 emulator\n"); 9478 return -ENOMEM; 9479 } 9480 9481 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9482 if (!user_return_msrs) { 9483 pr_err("failed to allocate percpu kvm_user_return_msrs\n"); 9484 r = -ENOMEM; 9485 goto out_free_x86_emulator_cache; 9486 } 9487 kvm_nr_uret_msrs = 0; 9488 9489 r = kvm_mmu_vendor_module_init(); 9490 if (r) 9491 goto out_free_percpu; 9492 9493 if (boot_cpu_has(X86_FEATURE_XSAVE)) { 9494 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 9495 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0; 9496 } 9497 9498 rdmsrl_safe(MSR_EFER, &host_efer); 9499 9500 if (boot_cpu_has(X86_FEATURE_XSAVES)) 9501 rdmsrl(MSR_IA32_XSS, host_xss); 9502 9503 kvm_init_pmu_capability(ops->pmu_ops); 9504 9505 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 9506 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities); 9507 9508 r = ops->hardware_setup(); 9509 if (r != 0) 9510 goto out_mmu_exit; 9511 9512 kvm_ops_update(ops); 9513 9514 for_each_online_cpu(cpu) { 9515 smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1); 9516 if (r < 0) 9517 goto out_unwind_ops; 9518 } 9519 9520 /* 9521 * Point of no return! DO NOT add error paths below this point unless 9522 * absolutely necessary, as most operations from this point forward 9523 * require unwinding. 9524 */ 9525 kvm_timer_init(); 9526 9527 if (pi_inject_timer == -1) 9528 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER); 9529 #ifdef CONFIG_X86_64 9530 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); 9531 9532 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9533 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); 9534 #endif 9535 9536 kvm_register_perf_callbacks(ops->handle_intel_pt_intr); 9537 9538 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 9539 kvm_caps.supported_xss = 0; 9540 9541 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) 9542 cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_); 9543 #undef __kvm_cpu_cap_has 9544 9545 if (kvm_caps.has_tsc_control) { 9546 /* 9547 * Make sure the user can only configure tsc_khz values that 9548 * fit into a signed integer. 9549 * A min value is not calculated because it will always 9550 * be 1 on all machines. 9551 */ 9552 u64 max = min(0x7fffffffULL, 9553 __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz)); 9554 kvm_caps.max_guest_tsc_khz = max; 9555 } 9556 kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; 9557 kvm_init_msr_lists(); 9558 return 0; 9559 9560 out_unwind_ops: 9561 kvm_x86_ops.hardware_enable = NULL; 9562 static_call(kvm_x86_hardware_unsetup)(); 9563 out_mmu_exit: 9564 kvm_mmu_vendor_module_exit(); 9565 out_free_percpu: 9566 free_percpu(user_return_msrs); 9567 out_free_x86_emulator_cache: 9568 kmem_cache_destroy(x86_emulator_cache); 9569 return r; 9570 } 9571 9572 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) 9573 { 9574 int r; 9575 9576 mutex_lock(&vendor_module_lock); 9577 r = __kvm_x86_vendor_init(ops); 9578 mutex_unlock(&vendor_module_lock); 9579 9580 return r; 9581 } 9582 EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); 9583 9584 void kvm_x86_vendor_exit(void) 9585 { 9586 kvm_unregister_perf_callbacks(); 9587 9588 #ifdef CONFIG_X86_64 9589 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) 9590 clear_hv_tscchange_cb(); 9591 #endif 9592 kvm_lapic_exit(); 9593 9594 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 9595 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, 9596 CPUFREQ_TRANSITION_NOTIFIER); 9597 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); 9598 } 9599 #ifdef CONFIG_X86_64 9600 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); 9601 irq_work_sync(&pvclock_irq_work); 9602 cancel_work_sync(&pvclock_gtod_work); 9603 #endif 9604 static_call(kvm_x86_hardware_unsetup)(); 9605 kvm_mmu_vendor_module_exit(); 9606 free_percpu(user_return_msrs); 9607 kmem_cache_destroy(x86_emulator_cache); 9608 #ifdef CONFIG_KVM_XEN 9609 static_key_deferred_flush(&kvm_xen_enabled); 9610 WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); 9611 #endif 9612 mutex_lock(&vendor_module_lock); 9613 kvm_x86_ops.hardware_enable = NULL; 9614 mutex_unlock(&vendor_module_lock); 9615 } 9616 EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); 9617 9618 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) 9619 { 9620 /* 9621 * The vCPU has halted, e.g. executed HLT. Update the run state if the 9622 * local APIC is in-kernel, the run loop will detect the non-runnable 9623 * state and halt the vCPU. Exit to userspace if the local APIC is 9624 * managed by userspace, in which case userspace is responsible for 9625 * handling wake events. 9626 */ 9627 ++vcpu->stat.halt_exits; 9628 if (lapic_in_kernel(vcpu)) { 9629 vcpu->arch.mp_state = state; 9630 return 1; 9631 } else { 9632 vcpu->run->exit_reason = reason; 9633 return 0; 9634 } 9635 } 9636 9637 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) 9638 { 9639 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 9640 } 9641 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 9642 9643 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 9644 { 9645 int ret = kvm_skip_emulated_instruction(vcpu); 9646 /* 9647 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered 9648 * KVM_EXIT_DEBUG here. 9649 */ 9650 return kvm_emulate_halt_noskip(vcpu) && ret; 9651 } 9652 EXPORT_SYMBOL_GPL(kvm_emulate_halt); 9653 9654 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 9655 { 9656 int ret = kvm_skip_emulated_instruction(vcpu); 9657 9658 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 9659 KVM_EXIT_AP_RESET_HOLD) && ret; 9660 } 9661 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 9662 9663 #ifdef CONFIG_X86_64 9664 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, 9665 unsigned long clock_type) 9666 { 9667 struct kvm_clock_pairing clock_pairing; 9668 struct timespec64 ts; 9669 u64 cycle; 9670 int ret; 9671 9672 if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK) 9673 return -KVM_EOPNOTSUPP; 9674 9675 /* 9676 * When tsc is in permanent catchup mode guests won't be able to use 9677 * pvclock_read_retry loop to get consistent view of pvclock 9678 */ 9679 if (vcpu->arch.tsc_always_catchup) 9680 return -KVM_EOPNOTSUPP; 9681 9682 if (!kvm_get_walltime_and_clockread(&ts, &cycle)) 9683 return -KVM_EOPNOTSUPP; 9684 9685 clock_pairing.sec = ts.tv_sec; 9686 clock_pairing.nsec = ts.tv_nsec; 9687 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); 9688 clock_pairing.flags = 0; 9689 memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); 9690 9691 ret = 0; 9692 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, 9693 sizeof(struct kvm_clock_pairing))) 9694 ret = -KVM_EFAULT; 9695 9696 return ret; 9697 } 9698 #endif 9699 9700 /* 9701 * kvm_pv_kick_cpu_op: Kick a vcpu. 9702 * 9703 * @apicid - apicid of vcpu to be kicked. 9704 */ 9705 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid) 9706 { 9707 /* 9708 * All other fields are unused for APIC_DM_REMRD, but may be consumed by 9709 * common code, e.g. for tracing. Defer initialization to the compiler. 9710 */ 9711 struct kvm_lapic_irq lapic_irq = { 9712 .delivery_mode = APIC_DM_REMRD, 9713 .dest_mode = APIC_DEST_PHYSICAL, 9714 .shorthand = APIC_DEST_NOSHORT, 9715 .dest_id = apicid, 9716 }; 9717 9718 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 9719 } 9720 9721 bool kvm_apicv_activated(struct kvm *kvm) 9722 { 9723 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 9724 } 9725 EXPORT_SYMBOL_GPL(kvm_apicv_activated); 9726 9727 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 9728 { 9729 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); 9730 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); 9731 9732 return (vm_reasons | vcpu_reasons) == 0; 9733 } 9734 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 9735 9736 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 9737 enum kvm_apicv_inhibit reason, bool set) 9738 { 9739 if (set) 9740 __set_bit(reason, inhibits); 9741 else 9742 __clear_bit(reason, inhibits); 9743 9744 trace_kvm_apicv_inhibit_changed(reason, set, *inhibits); 9745 } 9746 9747 static void kvm_apicv_init(struct kvm *kvm) 9748 { 9749 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; 9750 9751 init_rwsem(&kvm->arch.apicv_update_lock); 9752 9753 set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true); 9754 9755 if (!enable_apicv) 9756 set_or_clear_apicv_inhibit(inhibits, 9757 APICV_INHIBIT_REASON_DISABLE, true); 9758 } 9759 9760 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) 9761 { 9762 struct kvm_vcpu *target = NULL; 9763 struct kvm_apic_map *map; 9764 9765 vcpu->stat.directed_yield_attempted++; 9766 9767 if (single_task_running()) 9768 goto no_yield; 9769 9770 rcu_read_lock(); 9771 map = rcu_dereference(vcpu->kvm->arch.apic_map); 9772 9773 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) 9774 target = map->phys_map[dest_id]->vcpu; 9775 9776 rcu_read_unlock(); 9777 9778 if (!target || !READ_ONCE(target->ready)) 9779 goto no_yield; 9780 9781 /* Ignore requests to yield to self */ 9782 if (vcpu == target) 9783 goto no_yield; 9784 9785 if (kvm_vcpu_yield_to(target) <= 0) 9786 goto no_yield; 9787 9788 vcpu->stat.directed_yield_successful++; 9789 9790 no_yield: 9791 return; 9792 } 9793 9794 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) 9795 { 9796 u64 ret = vcpu->run->hypercall.ret; 9797 9798 if (!is_64_bit_mode(vcpu)) 9799 ret = (u32)ret; 9800 kvm_rax_write(vcpu, ret); 9801 ++vcpu->stat.hypercalls; 9802 return kvm_skip_emulated_instruction(vcpu); 9803 } 9804 9805 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 9806 { 9807 unsigned long nr, a0, a1, a2, a3, ret; 9808 int op_64_bit; 9809 9810 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 9811 return kvm_xen_hypercall(vcpu); 9812 9813 if (kvm_hv_hypercall_enabled(vcpu)) 9814 return kvm_hv_hypercall(vcpu); 9815 9816 nr = kvm_rax_read(vcpu); 9817 a0 = kvm_rbx_read(vcpu); 9818 a1 = kvm_rcx_read(vcpu); 9819 a2 = kvm_rdx_read(vcpu); 9820 a3 = kvm_rsi_read(vcpu); 9821 9822 trace_kvm_hypercall(nr, a0, a1, a2, a3); 9823 9824 op_64_bit = is_64_bit_hypercall(vcpu); 9825 if (!op_64_bit) { 9826 nr &= 0xFFFFFFFF; 9827 a0 &= 0xFFFFFFFF; 9828 a1 &= 0xFFFFFFFF; 9829 a2 &= 0xFFFFFFFF; 9830 a3 &= 0xFFFFFFFF; 9831 } 9832 9833 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 9834 ret = -KVM_EPERM; 9835 goto out; 9836 } 9837 9838 ret = -KVM_ENOSYS; 9839 9840 switch (nr) { 9841 case KVM_HC_VAPIC_POLL_IRQ: 9842 ret = 0; 9843 break; 9844 case KVM_HC_KICK_CPU: 9845 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) 9846 break; 9847 9848 kvm_pv_kick_cpu_op(vcpu->kvm, a1); 9849 kvm_sched_yield(vcpu, a1); 9850 ret = 0; 9851 break; 9852 #ifdef CONFIG_X86_64 9853 case KVM_HC_CLOCK_PAIRING: 9854 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 9855 break; 9856 #endif 9857 case KVM_HC_SEND_IPI: 9858 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) 9859 break; 9860 9861 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 9862 break; 9863 case KVM_HC_SCHED_YIELD: 9864 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) 9865 break; 9866 9867 kvm_sched_yield(vcpu, a0); 9868 ret = 0; 9869 break; 9870 case KVM_HC_MAP_GPA_RANGE: { 9871 u64 gpa = a0, npages = a1, attrs = a2; 9872 9873 ret = -KVM_ENOSYS; 9874 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) 9875 break; 9876 9877 if (!PAGE_ALIGNED(gpa) || !npages || 9878 gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { 9879 ret = -KVM_EINVAL; 9880 break; 9881 } 9882 9883 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; 9884 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; 9885 vcpu->run->hypercall.args[0] = gpa; 9886 vcpu->run->hypercall.args[1] = npages; 9887 vcpu->run->hypercall.args[2] = attrs; 9888 vcpu->run->hypercall.flags = 0; 9889 if (op_64_bit) 9890 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE; 9891 9892 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); 9893 vcpu->arch.complete_userspace_io = complete_hypercall_exit; 9894 return 0; 9895 } 9896 default: 9897 ret = -KVM_ENOSYS; 9898 break; 9899 } 9900 out: 9901 if (!op_64_bit) 9902 ret = (u32)ret; 9903 kvm_rax_write(vcpu, ret); 9904 9905 ++vcpu->stat.hypercalls; 9906 return kvm_skip_emulated_instruction(vcpu); 9907 } 9908 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 9909 9910 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 9911 { 9912 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 9913 char instruction[3]; 9914 unsigned long rip = kvm_rip_read(vcpu); 9915 9916 /* 9917 * If the quirk is disabled, synthesize a #UD and let the guest pick up 9918 * the pieces. 9919 */ 9920 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 9921 ctxt->exception.error_code_valid = false; 9922 ctxt->exception.vector = UD_VECTOR; 9923 ctxt->have_exception = true; 9924 return X86EMUL_PROPAGATE_FAULT; 9925 } 9926 9927 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 9928 9929 return emulator_write_emulated(ctxt, rip, instruction, 3, 9930 &ctxt->exception); 9931 } 9932 9933 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 9934 { 9935 return vcpu->run->request_interrupt_window && 9936 likely(!pic_in_kernel(vcpu->kvm)); 9937 } 9938 9939 /* Called within kvm->srcu read side. */ 9940 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9941 { 9942 struct kvm_run *kvm_run = vcpu->run; 9943 9944 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); 9945 kvm_run->cr8 = kvm_get_cr8(vcpu); 9946 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9947 9948 kvm_run->ready_for_interrupt_injection = 9949 pic_in_kernel(vcpu->kvm) || 9950 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9951 9952 if (is_smm(vcpu)) 9953 kvm_run->flags |= KVM_RUN_X86_SMM; 9954 } 9955 9956 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 9957 { 9958 int max_irr, tpr; 9959 9960 if (!kvm_x86_ops.update_cr8_intercept) 9961 return; 9962 9963 if (!lapic_in_kernel(vcpu)) 9964 return; 9965 9966 if (vcpu->arch.apic->apicv_active) 9967 return; 9968 9969 if (!vcpu->arch.apic->vapic_addr) 9970 max_irr = kvm_lapic_find_highest_irr(vcpu); 9971 else 9972 max_irr = -1; 9973 9974 if (max_irr != -1) 9975 max_irr >>= 4; 9976 9977 tpr = kvm_lapic_get_cr8(vcpu); 9978 9979 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 9980 } 9981 9982 9983 int kvm_check_nested_events(struct kvm_vcpu *vcpu) 9984 { 9985 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 9986 kvm_x86_ops.nested_ops->triple_fault(vcpu); 9987 return 1; 9988 } 9989 9990 return kvm_x86_ops.nested_ops->check_events(vcpu); 9991 } 9992 9993 static void kvm_inject_exception(struct kvm_vcpu *vcpu) 9994 { 9995 /* 9996 * Suppress the error code if the vCPU is in Real Mode, as Real Mode 9997 * exceptions don't report error codes. The presence of an error code 9998 * is carried with the exception and only stripped when the exception 9999 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do 10000 * report an error code despite the CPU being in Real Mode. 10001 */ 10002 vcpu->arch.exception.has_error_code &= is_protmode(vcpu); 10003 10004 trace_kvm_inj_exception(vcpu->arch.exception.vector, 10005 vcpu->arch.exception.has_error_code, 10006 vcpu->arch.exception.error_code, 10007 vcpu->arch.exception.injected); 10008 10009 static_call(kvm_x86_inject_exception)(vcpu); 10010 } 10011 10012 /* 10013 * Check for any event (interrupt or exception) that is ready to be injected, 10014 * and if there is at least one event, inject the event with the highest 10015 * priority. This handles both "pending" events, i.e. events that have never 10016 * been injected into the guest, and "injected" events, i.e. events that were 10017 * injected as part of a previous VM-Enter, but weren't successfully delivered 10018 * and need to be re-injected. 10019 * 10020 * Note, this is not guaranteed to be invoked on a guest instruction boundary, 10021 * i.e. doesn't guarantee that there's an event window in the guest. KVM must 10022 * be able to inject exceptions in the "middle" of an instruction, and so must 10023 * also be able to re-inject NMIs and IRQs in the middle of an instruction. 10024 * I.e. for exceptions and re-injected events, NOT invoking this on instruction 10025 * boundaries is necessary and correct. 10026 * 10027 * For simplicity, KVM uses a single path to inject all events (except events 10028 * that are injected directly from L1 to L2) and doesn't explicitly track 10029 * instruction boundaries for asynchronous events. However, because VM-Exits 10030 * that can occur during instruction execution typically result in KVM skipping 10031 * the instruction or injecting an exception, e.g. instruction and exception 10032 * intercepts, and because pending exceptions have higher priority than pending 10033 * interrupts, KVM still honors instruction boundaries in most scenarios. 10034 * 10035 * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip 10036 * the instruction or inject an exception, then KVM can incorrecty inject a new 10037 * asynchrounous event if the event became pending after the CPU fetched the 10038 * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation) 10039 * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be 10040 * injected on the restarted instruction instead of being deferred until the 10041 * instruction completes. 10042 * 10043 * In practice, this virtualization hole is unlikely to be observed by the 10044 * guest, and even less likely to cause functional problems. To detect the 10045 * hole, the guest would have to trigger an event on a side effect of an early 10046 * phase of instruction execution, e.g. on the instruction fetch from memory. 10047 * And for it to be a functional problem, the guest would need to depend on the 10048 * ordering between that side effect, the instruction completing, _and_ the 10049 * delivery of the asynchronous event. 10050 */ 10051 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, 10052 bool *req_immediate_exit) 10053 { 10054 bool can_inject; 10055 int r; 10056 10057 /* 10058 * Process nested events first, as nested VM-Exit supercedes event 10059 * re-injection. If there's an event queued for re-injection, it will 10060 * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit. 10061 */ 10062 if (is_guest_mode(vcpu)) 10063 r = kvm_check_nested_events(vcpu); 10064 else 10065 r = 0; 10066 10067 /* 10068 * Re-inject exceptions and events *especially* if immediate entry+exit 10069 * to/from L2 is needed, as any event that has already been injected 10070 * into L2 needs to complete its lifecycle before injecting a new event. 10071 * 10072 * Don't re-inject an NMI or interrupt if there is a pending exception. 10073 * This collision arises if an exception occurred while vectoring the 10074 * injected event, KVM intercepted said exception, and KVM ultimately 10075 * determined the fault belongs to the guest and queues the exception 10076 * for injection back into the guest. 10077 * 10078 * "Injected" interrupts can also collide with pending exceptions if 10079 * userspace ignores the "ready for injection" flag and blindly queues 10080 * an interrupt. In that case, prioritizing the exception is correct, 10081 * as the exception "occurred" before the exit to userspace. Trap-like 10082 * exceptions, e.g. most #DBs, have higher priority than interrupts. 10083 * And while fault-like exceptions, e.g. #GP and #PF, are the lowest 10084 * priority, they're only generated (pended) during instruction 10085 * execution, and interrupts are recognized at instruction boundaries. 10086 * Thus a pending fault-like exception means the fault occurred on the 10087 * *previous* instruction and must be serviced prior to recognizing any 10088 * new events in order to fully complete the previous instruction. 10089 */ 10090 if (vcpu->arch.exception.injected) 10091 kvm_inject_exception(vcpu); 10092 else if (kvm_is_exception_pending(vcpu)) 10093 ; /* see above */ 10094 else if (vcpu->arch.nmi_injected) 10095 static_call(kvm_x86_inject_nmi)(vcpu); 10096 else if (vcpu->arch.interrupt.injected) 10097 static_call(kvm_x86_inject_irq)(vcpu, true); 10098 10099 /* 10100 * Exceptions that morph to VM-Exits are handled above, and pending 10101 * exceptions on top of injected exceptions that do not VM-Exit should 10102 * either morph to #DF or, sadly, override the injected exception. 10103 */ 10104 WARN_ON_ONCE(vcpu->arch.exception.injected && 10105 vcpu->arch.exception.pending); 10106 10107 /* 10108 * Bail if immediate entry+exit to/from the guest is needed to complete 10109 * nested VM-Enter or event re-injection so that a different pending 10110 * event can be serviced (or if KVM needs to exit to userspace). 10111 * 10112 * Otherwise, continue processing events even if VM-Exit occurred. The 10113 * VM-Exit will have cleared exceptions that were meant for L2, but 10114 * there may now be events that can be injected into L1. 10115 */ 10116 if (r < 0) 10117 goto out; 10118 10119 /* 10120 * A pending exception VM-Exit should either result in nested VM-Exit 10121 * or force an immediate re-entry and exit to/from L2, and exception 10122 * VM-Exits cannot be injected (flag should _never_ be set). 10123 */ 10124 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || 10125 vcpu->arch.exception_vmexit.pending); 10126 10127 /* 10128 * New events, other than exceptions, cannot be injected if KVM needs 10129 * to re-inject a previous event. See above comments on re-injecting 10130 * for why pending exceptions get priority. 10131 */ 10132 can_inject = !kvm_event_needs_reinjection(vcpu); 10133 10134 if (vcpu->arch.exception.pending) { 10135 /* 10136 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS 10137 * value pushed on the stack. Trap-like exception and all #DBs 10138 * leave RF as-is (KVM follows Intel's behavior in this regard; 10139 * AMD states that code breakpoint #DBs excplitly clear RF=0). 10140 * 10141 * Note, most versions of Intel's SDM and AMD's APM incorrectly 10142 * describe the behavior of General Detect #DBs, which are 10143 * fault-like. They do _not_ set RF, a la code breakpoints. 10144 */ 10145 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) 10146 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | 10147 X86_EFLAGS_RF); 10148 10149 if (vcpu->arch.exception.vector == DB_VECTOR) { 10150 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); 10151 if (vcpu->arch.dr7 & DR7_GD) { 10152 vcpu->arch.dr7 &= ~DR7_GD; 10153 kvm_update_dr7(vcpu); 10154 } 10155 } 10156 10157 kvm_inject_exception(vcpu); 10158 10159 vcpu->arch.exception.pending = false; 10160 vcpu->arch.exception.injected = true; 10161 10162 can_inject = false; 10163 } 10164 10165 /* Don't inject interrupts if the user asked to avoid doing so */ 10166 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) 10167 return 0; 10168 10169 /* 10170 * Finally, inject interrupt events. If an event cannot be injected 10171 * due to architectural conditions (e.g. IF=0) a window-open exit 10172 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending 10173 * and can architecturally be injected, but we cannot do it right now: 10174 * an interrupt could have arrived just now and we have to inject it 10175 * as a vmexit, or there could already an event in the queue, which is 10176 * indicated by can_inject. In that case we request an immediate exit 10177 * in order to make progress and get back here for another iteration. 10178 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 10179 */ 10180 #ifdef CONFIG_KVM_SMM 10181 if (vcpu->arch.smi_pending) { 10182 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 10183 if (r < 0) 10184 goto out; 10185 if (r) { 10186 vcpu->arch.smi_pending = false; 10187 ++vcpu->arch.smi_count; 10188 enter_smm(vcpu); 10189 can_inject = false; 10190 } else 10191 static_call(kvm_x86_enable_smi_window)(vcpu); 10192 } 10193 #endif 10194 10195 if (vcpu->arch.nmi_pending) { 10196 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 10197 if (r < 0) 10198 goto out; 10199 if (r) { 10200 --vcpu->arch.nmi_pending; 10201 vcpu->arch.nmi_injected = true; 10202 static_call(kvm_x86_inject_nmi)(vcpu); 10203 can_inject = false; 10204 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 10205 } 10206 if (vcpu->arch.nmi_pending) 10207 static_call(kvm_x86_enable_nmi_window)(vcpu); 10208 } 10209 10210 if (kvm_cpu_has_injectable_intr(vcpu)) { 10211 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 10212 if (r < 0) 10213 goto out; 10214 if (r) { 10215 int irq = kvm_cpu_get_interrupt(vcpu); 10216 10217 if (!WARN_ON_ONCE(irq == -1)) { 10218 kvm_queue_interrupt(vcpu, irq, false); 10219 static_call(kvm_x86_inject_irq)(vcpu, false); 10220 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 10221 } 10222 } 10223 if (kvm_cpu_has_injectable_intr(vcpu)) 10224 static_call(kvm_x86_enable_irq_window)(vcpu); 10225 } 10226 10227 if (is_guest_mode(vcpu) && 10228 kvm_x86_ops.nested_ops->has_events && 10229 kvm_x86_ops.nested_ops->has_events(vcpu)) 10230 *req_immediate_exit = true; 10231 10232 /* 10233 * KVM must never queue a new exception while injecting an event; KVM 10234 * is done emulating and should only propagate the to-be-injected event 10235 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an 10236 * infinite loop as KVM will bail from VM-Enter to inject the pending 10237 * exception and start the cycle all over. 10238 * 10239 * Exempt triple faults as they have special handling and won't put the 10240 * vCPU into an infinite loop. Triple fault can be queued when running 10241 * VMX without unrestricted guest, as that requires KVM to emulate Real 10242 * Mode events (see kvm_inject_realmode_interrupt()). 10243 */ 10244 WARN_ON_ONCE(vcpu->arch.exception.pending || 10245 vcpu->arch.exception_vmexit.pending); 10246 return 0; 10247 10248 out: 10249 if (r == -EBUSY) { 10250 *req_immediate_exit = true; 10251 r = 0; 10252 } 10253 return r; 10254 } 10255 10256 static void process_nmi(struct kvm_vcpu *vcpu) 10257 { 10258 unsigned int limit; 10259 10260 /* 10261 * x86 is limited to one NMI pending, but because KVM can't react to 10262 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is 10263 * scheduled out, KVM needs to play nice with two queued NMIs showing 10264 * up at the same time. To handle this scenario, allow two NMIs to be 10265 * (temporarily) pending so long as NMIs are not blocked and KVM is not 10266 * waiting for a previous NMI injection to complete (which effectively 10267 * blocks NMIs). KVM will immediately inject one of the two NMIs, and 10268 * will request an NMI window to handle the second NMI. 10269 */ 10270 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 10271 limit = 1; 10272 else 10273 limit = 2; 10274 10275 /* 10276 * Adjust the limit to account for pending virtual NMIs, which aren't 10277 * tracked in vcpu->arch.nmi_pending. 10278 */ 10279 if (static_call(kvm_x86_is_vnmi_pending)(vcpu)) 10280 limit--; 10281 10282 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); 10283 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); 10284 10285 if (vcpu->arch.nmi_pending && 10286 (static_call(kvm_x86_set_vnmi_pending)(vcpu))) 10287 vcpu->arch.nmi_pending--; 10288 10289 if (vcpu->arch.nmi_pending) 10290 kvm_make_request(KVM_REQ_EVENT, vcpu); 10291 } 10292 10293 /* Return total number of NMIs pending injection to the VM */ 10294 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu) 10295 { 10296 return vcpu->arch.nmi_pending + 10297 static_call(kvm_x86_is_vnmi_pending)(vcpu); 10298 } 10299 10300 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, 10301 unsigned long *vcpu_bitmap) 10302 { 10303 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap); 10304 } 10305 10306 void kvm_make_scan_ioapic_request(struct kvm *kvm) 10307 { 10308 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 10309 } 10310 10311 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10312 { 10313 struct kvm_lapic *apic = vcpu->arch.apic; 10314 bool activate; 10315 10316 if (!lapic_in_kernel(vcpu)) 10317 return; 10318 10319 down_read(&vcpu->kvm->arch.apicv_update_lock); 10320 preempt_disable(); 10321 10322 /* Do not activate APICV when APIC is disabled */ 10323 activate = kvm_vcpu_apicv_activated(vcpu) && 10324 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); 10325 10326 if (apic->apicv_active == activate) 10327 goto out; 10328 10329 apic->apicv_active = activate; 10330 kvm_apic_update_apicv(vcpu); 10331 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 10332 10333 /* 10334 * When APICv gets disabled, we may still have injected interrupts 10335 * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was 10336 * still active when the interrupt got accepted. Make sure 10337 * kvm_check_and_inject_events() is called to check for that. 10338 */ 10339 if (!apic->apicv_active) 10340 kvm_make_request(KVM_REQ_EVENT, vcpu); 10341 10342 out: 10343 preempt_enable(); 10344 up_read(&vcpu->kvm->arch.apicv_update_lock); 10345 } 10346 EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv); 10347 10348 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10349 { 10350 if (!lapic_in_kernel(vcpu)) 10351 return; 10352 10353 /* 10354 * Due to sharing page tables across vCPUs, the xAPIC memslot must be 10355 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but 10356 * and hardware doesn't support x2APIC virtualization. E.g. some AMD 10357 * CPUs support AVIC but not x2APIC. KVM still allows enabling AVIC in 10358 * this case so that KVM can the AVIC doorbell to inject interrupts to 10359 * running vCPUs, but KVM must not create SPTEs for the APIC base as 10360 * the vCPU would incorrectly be able to access the vAPIC page via MMIO 10361 * despite being in x2APIC mode. For simplicity, inhibiting the APIC 10362 * access page is sticky. 10363 */ 10364 if (apic_x2apic_mode(vcpu->arch.apic) && 10365 kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization) 10366 kvm_inhibit_apic_access_page(vcpu); 10367 10368 __kvm_vcpu_update_apicv(vcpu); 10369 } 10370 10371 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10372 enum kvm_apicv_inhibit reason, bool set) 10373 { 10374 unsigned long old, new; 10375 10376 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); 10377 10378 if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason))) 10379 return; 10380 10381 old = new = kvm->arch.apicv_inhibit_reasons; 10382 10383 set_or_clear_apicv_inhibit(&new, reason, set); 10384 10385 if (!!old != !!new) { 10386 /* 10387 * Kick all vCPUs before setting apicv_inhibit_reasons to avoid 10388 * false positives in the sanity check WARN in svm_vcpu_run(). 10389 * This task will wait for all vCPUs to ack the kick IRQ before 10390 * updating apicv_inhibit_reasons, and all other vCPUs will 10391 * block on acquiring apicv_update_lock so that vCPUs can't 10392 * redo svm_vcpu_run() without seeing the new inhibit state. 10393 * 10394 * Note, holding apicv_update_lock and taking it in the read 10395 * side (handling the request) also prevents other vCPUs from 10396 * servicing the request with a stale apicv_inhibit_reasons. 10397 */ 10398 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE); 10399 kvm->arch.apicv_inhibit_reasons = new; 10400 if (new) { 10401 unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); 10402 int idx = srcu_read_lock(&kvm->srcu); 10403 10404 kvm_zap_gfn_range(kvm, gfn, gfn+1); 10405 srcu_read_unlock(&kvm->srcu, idx); 10406 } 10407 } else { 10408 kvm->arch.apicv_inhibit_reasons = new; 10409 } 10410 } 10411 10412 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, 10413 enum kvm_apicv_inhibit reason, bool set) 10414 { 10415 if (!enable_apicv) 10416 return; 10417 10418 down_write(&kvm->arch.apicv_update_lock); 10419 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10420 up_write(&kvm->arch.apicv_update_lock); 10421 } 10422 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10423 10424 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10425 { 10426 if (!kvm_apic_present(vcpu)) 10427 return; 10428 10429 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10430 10431 if (irqchip_split(vcpu->kvm)) 10432 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 10433 else { 10434 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10435 if (ioapic_in_kernel(vcpu->kvm)) 10436 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 10437 } 10438 10439 if (is_guest_mode(vcpu)) 10440 vcpu->arch.load_eoi_exitmap_pending = true; 10441 else 10442 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); 10443 } 10444 10445 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) 10446 { 10447 u64 eoi_exit_bitmap[4]; 10448 10449 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 10450 return; 10451 10452 if (to_hv_vcpu(vcpu)) { 10453 bitmap_or((ulong *)eoi_exit_bitmap, 10454 vcpu->arch.ioapic_handled_vectors, 10455 to_hv_synic(vcpu)->vec_bitmap, 256); 10456 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10457 return; 10458 } 10459 10460 static_call_cond(kvm_x86_load_eoi_exitmap)( 10461 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10462 } 10463 10464 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10465 { 10466 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10467 } 10468 10469 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 10470 { 10471 if (!lapic_in_kernel(vcpu)) 10472 return; 10473 10474 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10475 } 10476 10477 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) 10478 { 10479 smp_send_reschedule(vcpu->cpu); 10480 } 10481 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 10482 10483 /* 10484 * Called within kvm->srcu read side. 10485 * Returns 1 to let vcpu_run() continue the guest execution loop without 10486 * exiting to the userspace. Otherwise, the value will be returned to the 10487 * userspace. 10488 */ 10489 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 10490 { 10491 int r; 10492 bool req_int_win = 10493 dm_request_for_irq_injection(vcpu) && 10494 kvm_cpu_accept_dm_intr(vcpu); 10495 fastpath_t exit_fastpath; 10496 10497 bool req_immediate_exit = false; 10498 10499 if (kvm_request_pending(vcpu)) { 10500 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { 10501 r = -EIO; 10502 goto out; 10503 } 10504 10505 if (kvm_dirty_ring_check_request(vcpu)) { 10506 r = 0; 10507 goto out; 10508 } 10509 10510 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { 10511 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { 10512 r = 0; 10513 goto out; 10514 } 10515 } 10516 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) 10517 kvm_mmu_free_obsolete_roots(vcpu); 10518 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) 10519 __kvm_migrate_timers(vcpu); 10520 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) 10521 kvm_update_masterclock(vcpu->kvm); 10522 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) 10523 kvm_gen_kvmclock_update(vcpu); 10524 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { 10525 r = kvm_guest_time_update(vcpu); 10526 if (unlikely(r)) 10527 goto out; 10528 } 10529 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) 10530 kvm_mmu_sync_roots(vcpu); 10531 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) 10532 kvm_mmu_load_pgd(vcpu); 10533 10534 /* 10535 * Note, the order matters here, as flushing "all" TLB entries 10536 * also flushes the "current" TLB entries, i.e. servicing the 10537 * flush "all" will clear any request to flush "current". 10538 */ 10539 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) 10540 kvm_vcpu_flush_tlb_all(vcpu); 10541 10542 kvm_service_local_tlb_flush_requests(vcpu); 10543 10544 /* 10545 * Fall back to a "full" guest flush if Hyper-V's precise 10546 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but 10547 * the flushes are considered "remote" and not "local" because 10548 * the requests can be initiated from other vCPUs. 10549 */ 10550 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && 10551 kvm_hv_vcpu_flush_tlb(vcpu)) 10552 kvm_vcpu_flush_tlb_guest(vcpu); 10553 10554 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { 10555 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; 10556 r = 0; 10557 goto out; 10558 } 10559 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10560 if (is_guest_mode(vcpu)) 10561 kvm_x86_ops.nested_ops->triple_fault(vcpu); 10562 10563 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { 10564 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; 10565 vcpu->mmio_needed = 0; 10566 r = 0; 10567 goto out; 10568 } 10569 } 10570 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { 10571 /* Page is swapped out. Do synthetic halt */ 10572 vcpu->arch.apf.halted = true; 10573 r = 1; 10574 goto out; 10575 } 10576 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) 10577 record_steal_time(vcpu); 10578 #ifdef CONFIG_KVM_SMM 10579 if (kvm_check_request(KVM_REQ_SMI, vcpu)) 10580 process_smi(vcpu); 10581 #endif 10582 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 10583 process_nmi(vcpu); 10584 if (kvm_check_request(KVM_REQ_PMU, vcpu)) 10585 kvm_pmu_handle_event(vcpu); 10586 if (kvm_check_request(KVM_REQ_PMI, vcpu)) 10587 kvm_pmu_deliver_pmi(vcpu); 10588 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { 10589 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); 10590 if (test_bit(vcpu->arch.pending_ioapic_eoi, 10591 vcpu->arch.ioapic_handled_vectors)) { 10592 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; 10593 vcpu->run->eoi.vector = 10594 vcpu->arch.pending_ioapic_eoi; 10595 r = 0; 10596 goto out; 10597 } 10598 } 10599 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) 10600 vcpu_scan_ioapic(vcpu); 10601 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) 10602 vcpu_load_eoi_exitmap(vcpu); 10603 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) 10604 kvm_vcpu_reload_apic_access_page(vcpu); 10605 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10606 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10607 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10608 vcpu->run->system_event.ndata = 0; 10609 r = 0; 10610 goto out; 10611 } 10612 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10613 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10614 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10615 vcpu->run->system_event.ndata = 0; 10616 r = 0; 10617 goto out; 10618 } 10619 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { 10620 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 10621 10622 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 10623 vcpu->run->hyperv = hv_vcpu->exit; 10624 r = 0; 10625 goto out; 10626 } 10627 10628 /* 10629 * KVM_REQ_HV_STIMER has to be processed after 10630 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers 10631 * depend on the guest clock being up-to-date 10632 */ 10633 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) 10634 kvm_hv_process_stimers(vcpu); 10635 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) 10636 kvm_vcpu_update_apicv(vcpu); 10637 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 10638 kvm_check_async_pf_completion(vcpu); 10639 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 10640 static_call(kvm_x86_msr_filter_changed)(vcpu); 10641 10642 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) 10643 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); 10644 } 10645 10646 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || 10647 kvm_xen_has_interrupt(vcpu)) { 10648 ++vcpu->stat.req_event; 10649 r = kvm_apic_accept_events(vcpu); 10650 if (r < 0) { 10651 r = 0; 10652 goto out; 10653 } 10654 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 10655 r = 1; 10656 goto out; 10657 } 10658 10659 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); 10660 if (r < 0) { 10661 r = 0; 10662 goto out; 10663 } 10664 if (req_int_win) 10665 static_call(kvm_x86_enable_irq_window)(vcpu); 10666 10667 if (kvm_lapic_enabled(vcpu)) { 10668 update_cr8_intercept(vcpu); 10669 kvm_lapic_sync_to_vapic(vcpu); 10670 } 10671 } 10672 10673 r = kvm_mmu_reload(vcpu); 10674 if (unlikely(r)) { 10675 goto cancel_injection; 10676 } 10677 10678 preempt_disable(); 10679 10680 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); 10681 10682 /* 10683 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 10684 * IPI are then delayed after guest entry, which ensures that they 10685 * result in virtual interrupt delivery. 10686 */ 10687 local_irq_disable(); 10688 10689 /* Store vcpu->apicv_active before vcpu->mode. */ 10690 smp_store_release(&vcpu->mode, IN_GUEST_MODE); 10691 10692 kvm_vcpu_srcu_read_unlock(vcpu); 10693 10694 /* 10695 * 1) We should set ->mode before checking ->requests. Please see 10696 * the comment in kvm_vcpu_exiting_guest_mode(). 10697 * 10698 * 2) For APICv, we should set ->mode before checking PID.ON. This 10699 * pairs with the memory barrier implicit in pi_test_and_set_on 10700 * (see vmx_deliver_posted_interrupt). 10701 * 10702 * 3) This also orders the write to mode from any reads to the page 10703 * tables done while the VCPU is running. Please see the comment 10704 * in kvm_flush_remote_tlbs. 10705 */ 10706 smp_mb__after_srcu_read_unlock(); 10707 10708 /* 10709 * Process pending posted interrupts to handle the case where the 10710 * notification IRQ arrived in the host, or was never sent (because the 10711 * target vCPU wasn't running). Do this regardless of the vCPU's APICv 10712 * status, KVM doesn't update assigned devices when APICv is inhibited, 10713 * i.e. they can post interrupts even if APICv is temporarily disabled. 10714 */ 10715 if (kvm_lapic_enabled(vcpu)) 10716 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10717 10718 if (kvm_vcpu_exit_request(vcpu)) { 10719 vcpu->mode = OUTSIDE_GUEST_MODE; 10720 smp_wmb(); 10721 local_irq_enable(); 10722 preempt_enable(); 10723 kvm_vcpu_srcu_read_lock(vcpu); 10724 r = 1; 10725 goto cancel_injection; 10726 } 10727 10728 if (req_immediate_exit) { 10729 kvm_make_request(KVM_REQ_EVENT, vcpu); 10730 static_call(kvm_x86_request_immediate_exit)(vcpu); 10731 } 10732 10733 fpregs_assert_state_consistent(); 10734 if (test_thread_flag(TIF_NEED_FPU_LOAD)) 10735 switch_fpu_return(); 10736 10737 if (vcpu->arch.guest_fpu.xfd_err) 10738 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); 10739 10740 if (unlikely(vcpu->arch.switch_db_regs)) { 10741 set_debugreg(0, 7); 10742 set_debugreg(vcpu->arch.eff_db[0], 0); 10743 set_debugreg(vcpu->arch.eff_db[1], 1); 10744 set_debugreg(vcpu->arch.eff_db[2], 2); 10745 set_debugreg(vcpu->arch.eff_db[3], 3); 10746 } else if (unlikely(hw_breakpoint_active())) { 10747 set_debugreg(0, 7); 10748 } 10749 10750 guest_timing_enter_irqoff(); 10751 10752 for (;;) { 10753 /* 10754 * Assert that vCPU vs. VM APICv state is consistent. An APICv 10755 * update must kick and wait for all vCPUs before toggling the 10756 * per-VM state, and responsing vCPUs must wait for the update 10757 * to complete before servicing KVM_REQ_APICV_UPDATE. 10758 */ 10759 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && 10760 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); 10761 10762 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); 10763 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) 10764 break; 10765 10766 if (kvm_lapic_enabled(vcpu)) 10767 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10768 10769 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 10770 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; 10771 break; 10772 } 10773 10774 /* Note, VM-Exits that go down the "slow" path are accounted below. */ 10775 ++vcpu->stat.exits; 10776 } 10777 10778 /* 10779 * Do this here before restoring debug registers on the host. And 10780 * since we do this before handling the vmexit, a DR access vmexit 10781 * can (a) read the correct value of the debug registers, (b) set 10782 * KVM_DEBUGREG_WONT_EXIT again. 10783 */ 10784 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 10785 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 10786 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 10787 kvm_update_dr0123(vcpu); 10788 kvm_update_dr7(vcpu); 10789 } 10790 10791 /* 10792 * If the guest has used debug registers, at least dr7 10793 * will be disabled while returning to the host. 10794 * If we don't have active breakpoints in the host, we don't 10795 * care about the messed up debug address registers. But if 10796 * we have some of them active, restore the old state. 10797 */ 10798 if (hw_breakpoint_active()) 10799 hw_breakpoint_restore(); 10800 10801 vcpu->arch.last_vmentry_cpu = vcpu->cpu; 10802 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 10803 10804 vcpu->mode = OUTSIDE_GUEST_MODE; 10805 smp_wmb(); 10806 10807 /* 10808 * Sync xfd before calling handle_exit_irqoff() which may 10809 * rely on the fact that guest_fpu::xfd is up-to-date (e.g. 10810 * in #NM irqoff handler). 10811 */ 10812 if (vcpu->arch.xfd_no_write_intercept) 10813 fpu_sync_guest_vmexit_xfd_state(); 10814 10815 static_call(kvm_x86_handle_exit_irqoff)(vcpu); 10816 10817 if (vcpu->arch.guest_fpu.xfd_err) 10818 wrmsrl(MSR_IA32_XFD_ERR, 0); 10819 10820 /* 10821 * Consume any pending interrupts, including the possible source of 10822 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. 10823 * An instruction is required after local_irq_enable() to fully unblock 10824 * interrupts on processors that implement an interrupt shadow, the 10825 * stat.exits increment will do nicely. 10826 */ 10827 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); 10828 local_irq_enable(); 10829 ++vcpu->stat.exits; 10830 local_irq_disable(); 10831 kvm_after_interrupt(vcpu); 10832 10833 /* 10834 * Wait until after servicing IRQs to account guest time so that any 10835 * ticks that occurred while running the guest are properly accounted 10836 * to the guest. Waiting until IRQs are enabled degrades the accuracy 10837 * of accounting via context tracking, but the loss of accuracy is 10838 * acceptable for all known use cases. 10839 */ 10840 guest_timing_exit_irqoff(); 10841 10842 local_irq_enable(); 10843 preempt_enable(); 10844 10845 kvm_vcpu_srcu_read_lock(vcpu); 10846 10847 /* 10848 * Profile KVM exit RIPs: 10849 */ 10850 if (unlikely(prof_on == KVM_PROFILING)) { 10851 unsigned long rip = kvm_rip_read(vcpu); 10852 profile_hit(KVM_PROFILING, (void *)rip); 10853 } 10854 10855 if (unlikely(vcpu->arch.tsc_always_catchup)) 10856 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 10857 10858 if (vcpu->arch.apic_attention) 10859 kvm_lapic_sync_from_vapic(vcpu); 10860 10861 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 10862 return r; 10863 10864 cancel_injection: 10865 if (req_immediate_exit) 10866 kvm_make_request(KVM_REQ_EVENT, vcpu); 10867 static_call(kvm_x86_cancel_injection)(vcpu); 10868 if (unlikely(vcpu->arch.apic_attention)) 10869 kvm_lapic_sync_from_vapic(vcpu); 10870 out: 10871 return r; 10872 } 10873 10874 /* Called within kvm->srcu read side. */ 10875 static inline int vcpu_block(struct kvm_vcpu *vcpu) 10876 { 10877 bool hv_timer; 10878 10879 if (!kvm_arch_vcpu_runnable(vcpu)) { 10880 /* 10881 * Switch to the software timer before halt-polling/blocking as 10882 * the guest's timer may be a break event for the vCPU, and the 10883 * hypervisor timer runs only when the CPU is in guest mode. 10884 * Switch before halt-polling so that KVM recognizes an expired 10885 * timer before blocking. 10886 */ 10887 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); 10888 if (hv_timer) 10889 kvm_lapic_switch_to_sw_timer(vcpu); 10890 10891 kvm_vcpu_srcu_read_unlock(vcpu); 10892 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 10893 kvm_vcpu_halt(vcpu); 10894 else 10895 kvm_vcpu_block(vcpu); 10896 kvm_vcpu_srcu_read_lock(vcpu); 10897 10898 if (hv_timer) 10899 kvm_lapic_switch_to_hv_timer(vcpu); 10900 10901 /* 10902 * If the vCPU is not runnable, a signal or another host event 10903 * of some kind is pending; service it without changing the 10904 * vCPU's activity state. 10905 */ 10906 if (!kvm_arch_vcpu_runnable(vcpu)) 10907 return 1; 10908 } 10909 10910 /* 10911 * Evaluate nested events before exiting the halted state. This allows 10912 * the halt state to be recorded properly in the VMCS12's activity 10913 * state field (AMD does not have a similar field and a VM-Exit always 10914 * causes a spurious wakeup from HLT). 10915 */ 10916 if (is_guest_mode(vcpu)) { 10917 if (kvm_check_nested_events(vcpu) < 0) 10918 return 0; 10919 } 10920 10921 if (kvm_apic_accept_events(vcpu) < 0) 10922 return 0; 10923 switch(vcpu->arch.mp_state) { 10924 case KVM_MP_STATE_HALTED: 10925 case KVM_MP_STATE_AP_RESET_HOLD: 10926 vcpu->arch.pv.pv_unhalted = false; 10927 vcpu->arch.mp_state = 10928 KVM_MP_STATE_RUNNABLE; 10929 fallthrough; 10930 case KVM_MP_STATE_RUNNABLE: 10931 vcpu->arch.apf.halted = false; 10932 break; 10933 case KVM_MP_STATE_INIT_RECEIVED: 10934 break; 10935 default: 10936 WARN_ON_ONCE(1); 10937 break; 10938 } 10939 return 1; 10940 } 10941 10942 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 10943 { 10944 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 10945 !vcpu->arch.apf.halted); 10946 } 10947 10948 /* Called within kvm->srcu read side. */ 10949 static int vcpu_run(struct kvm_vcpu *vcpu) 10950 { 10951 int r; 10952 10953 vcpu->arch.l1tf_flush_l1d = true; 10954 10955 for (;;) { 10956 /* 10957 * If another guest vCPU requests a PV TLB flush in the middle 10958 * of instruction emulation, the rest of the emulation could 10959 * use a stale page translation. Assume that any code after 10960 * this point can start executing an instruction. 10961 */ 10962 vcpu->arch.at_instruction_boundary = false; 10963 if (kvm_vcpu_running(vcpu)) { 10964 r = vcpu_enter_guest(vcpu); 10965 } else { 10966 r = vcpu_block(vcpu); 10967 } 10968 10969 if (r <= 0) 10970 break; 10971 10972 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); 10973 if (kvm_xen_has_pending_events(vcpu)) 10974 kvm_xen_inject_pending_events(vcpu); 10975 10976 if (kvm_cpu_has_pending_timer(vcpu)) 10977 kvm_inject_pending_timer_irqs(vcpu); 10978 10979 if (dm_request_for_irq_injection(vcpu) && 10980 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { 10981 r = 0; 10982 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 10983 ++vcpu->stat.request_irq_exits; 10984 break; 10985 } 10986 10987 if (__xfer_to_guest_mode_work_pending()) { 10988 kvm_vcpu_srcu_read_unlock(vcpu); 10989 r = xfer_to_guest_mode_handle_work(vcpu); 10990 kvm_vcpu_srcu_read_lock(vcpu); 10991 if (r) 10992 return r; 10993 } 10994 } 10995 10996 return r; 10997 } 10998 10999 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) 11000 { 11001 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 11002 } 11003 11004 static int complete_emulated_pio(struct kvm_vcpu *vcpu) 11005 { 11006 BUG_ON(!vcpu->arch.pio.count); 11007 11008 return complete_emulated_io(vcpu); 11009 } 11010 11011 /* 11012 * Implements the following, as a state machine: 11013 * 11014 * read: 11015 * for each fragment 11016 * for each mmio piece in the fragment 11017 * write gpa, len 11018 * exit 11019 * copy data 11020 * execute insn 11021 * 11022 * write: 11023 * for each fragment 11024 * for each mmio piece in the fragment 11025 * write gpa, len 11026 * copy data 11027 * exit 11028 */ 11029 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) 11030 { 11031 struct kvm_run *run = vcpu->run; 11032 struct kvm_mmio_fragment *frag; 11033 unsigned len; 11034 11035 BUG_ON(!vcpu->mmio_needed); 11036 11037 /* Complete previous fragment */ 11038 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 11039 len = min(8u, frag->len); 11040 if (!vcpu->mmio_is_write) 11041 memcpy(frag->data, run->mmio.data, len); 11042 11043 if (frag->len <= 8) { 11044 /* Switch to the next fragment. */ 11045 frag++; 11046 vcpu->mmio_cur_fragment++; 11047 } else { 11048 /* Go forward to the next mmio piece. */ 11049 frag->data += len; 11050 frag->gpa += len; 11051 frag->len -= len; 11052 } 11053 11054 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 11055 vcpu->mmio_needed = 0; 11056 11057 /* FIXME: return into emulator if single-stepping. */ 11058 if (vcpu->mmio_is_write) 11059 return 1; 11060 vcpu->mmio_read_completed = 1; 11061 return complete_emulated_io(vcpu); 11062 } 11063 11064 run->exit_reason = KVM_EXIT_MMIO; 11065 run->mmio.phys_addr = frag->gpa; 11066 if (vcpu->mmio_is_write) 11067 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 11068 run->mmio.len = min(8u, frag->len); 11069 run->mmio.is_write = vcpu->mmio_is_write; 11070 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 11071 return 0; 11072 } 11073 11074 /* Swap (qemu) user FPU context for the guest FPU context. */ 11075 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 11076 { 11077 /* Exclude PKRU, it's restored separately immediately after VM-Exit. */ 11078 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); 11079 trace_kvm_fpu(1); 11080 } 11081 11082 /* When vcpu_run ends, restore user space FPU context. */ 11083 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 11084 { 11085 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); 11086 ++vcpu->stat.fpu_reload; 11087 trace_kvm_fpu(0); 11088 } 11089 11090 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 11091 { 11092 struct kvm_queued_exception *ex = &vcpu->arch.exception; 11093 struct kvm_run *kvm_run = vcpu->run; 11094 int r; 11095 11096 vcpu_load(vcpu); 11097 kvm_sigset_activate(vcpu); 11098 kvm_run->flags = 0; 11099 kvm_load_guest_fpu(vcpu); 11100 11101 kvm_vcpu_srcu_read_lock(vcpu); 11102 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 11103 if (kvm_run->immediate_exit) { 11104 r = -EINTR; 11105 goto out; 11106 } 11107 11108 /* 11109 * Don't bother switching APIC timer emulation from the 11110 * hypervisor timer to the software timer, the only way for the 11111 * APIC timer to be active is if userspace stuffed vCPU state, 11112 * i.e. put the vCPU into a nonsensical state. Only an INIT 11113 * will transition the vCPU out of UNINITIALIZED (without more 11114 * state stuffing from userspace), which will reset the local 11115 * APIC and thus cancel the timer or drop the IRQ (if the timer 11116 * already expired). 11117 */ 11118 kvm_vcpu_srcu_read_unlock(vcpu); 11119 kvm_vcpu_block(vcpu); 11120 kvm_vcpu_srcu_read_lock(vcpu); 11121 11122 if (kvm_apic_accept_events(vcpu) < 0) { 11123 r = 0; 11124 goto out; 11125 } 11126 r = -EAGAIN; 11127 if (signal_pending(current)) { 11128 r = -EINTR; 11129 kvm_run->exit_reason = KVM_EXIT_INTR; 11130 ++vcpu->stat.signal_exits; 11131 } 11132 goto out; 11133 } 11134 11135 if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) || 11136 (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) { 11137 r = -EINVAL; 11138 goto out; 11139 } 11140 11141 if (kvm_run->kvm_dirty_regs) { 11142 r = sync_regs(vcpu); 11143 if (r != 0) 11144 goto out; 11145 } 11146 11147 /* re-sync apic's tpr */ 11148 if (!lapic_in_kernel(vcpu)) { 11149 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { 11150 r = -EINVAL; 11151 goto out; 11152 } 11153 } 11154 11155 /* 11156 * If userspace set a pending exception and L2 is active, convert it to 11157 * a pending VM-Exit if L1 wants to intercept the exception. 11158 */ 11159 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && 11160 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, 11161 ex->error_code)) { 11162 kvm_queue_exception_vmexit(vcpu, ex->vector, 11163 ex->has_error_code, ex->error_code, 11164 ex->has_payload, ex->payload); 11165 ex->injected = false; 11166 ex->pending = false; 11167 } 11168 vcpu->arch.exception_from_userspace = false; 11169 11170 if (unlikely(vcpu->arch.complete_userspace_io)) { 11171 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 11172 vcpu->arch.complete_userspace_io = NULL; 11173 r = cui(vcpu); 11174 if (r <= 0) 11175 goto out; 11176 } else { 11177 WARN_ON_ONCE(vcpu->arch.pio.count); 11178 WARN_ON_ONCE(vcpu->mmio_needed); 11179 } 11180 11181 if (kvm_run->immediate_exit) { 11182 r = -EINTR; 11183 goto out; 11184 } 11185 11186 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); 11187 if (r <= 0) 11188 goto out; 11189 11190 r = vcpu_run(vcpu); 11191 11192 out: 11193 kvm_put_guest_fpu(vcpu); 11194 if (kvm_run->kvm_valid_regs) 11195 store_regs(vcpu); 11196 post_kvm_run_save(vcpu); 11197 kvm_vcpu_srcu_read_unlock(vcpu); 11198 11199 kvm_sigset_deactivate(vcpu); 11200 vcpu_put(vcpu); 11201 return r; 11202 } 11203 11204 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11205 { 11206 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { 11207 /* 11208 * We are here if userspace calls get_regs() in the middle of 11209 * instruction emulation. Registers state needs to be copied 11210 * back from emulation context to vcpu. Userspace shouldn't do 11211 * that usually, but some bad designed PV devices (vmware 11212 * backdoor interface) need this to work 11213 */ 11214 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); 11215 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 11216 } 11217 regs->rax = kvm_rax_read(vcpu); 11218 regs->rbx = kvm_rbx_read(vcpu); 11219 regs->rcx = kvm_rcx_read(vcpu); 11220 regs->rdx = kvm_rdx_read(vcpu); 11221 regs->rsi = kvm_rsi_read(vcpu); 11222 regs->rdi = kvm_rdi_read(vcpu); 11223 regs->rsp = kvm_rsp_read(vcpu); 11224 regs->rbp = kvm_rbp_read(vcpu); 11225 #ifdef CONFIG_X86_64 11226 regs->r8 = kvm_r8_read(vcpu); 11227 regs->r9 = kvm_r9_read(vcpu); 11228 regs->r10 = kvm_r10_read(vcpu); 11229 regs->r11 = kvm_r11_read(vcpu); 11230 regs->r12 = kvm_r12_read(vcpu); 11231 regs->r13 = kvm_r13_read(vcpu); 11232 regs->r14 = kvm_r14_read(vcpu); 11233 regs->r15 = kvm_r15_read(vcpu); 11234 #endif 11235 11236 regs->rip = kvm_rip_read(vcpu); 11237 regs->rflags = kvm_get_rflags(vcpu); 11238 } 11239 11240 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11241 { 11242 vcpu_load(vcpu); 11243 __get_regs(vcpu, regs); 11244 vcpu_put(vcpu); 11245 return 0; 11246 } 11247 11248 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11249 { 11250 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; 11251 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 11252 11253 kvm_rax_write(vcpu, regs->rax); 11254 kvm_rbx_write(vcpu, regs->rbx); 11255 kvm_rcx_write(vcpu, regs->rcx); 11256 kvm_rdx_write(vcpu, regs->rdx); 11257 kvm_rsi_write(vcpu, regs->rsi); 11258 kvm_rdi_write(vcpu, regs->rdi); 11259 kvm_rsp_write(vcpu, regs->rsp); 11260 kvm_rbp_write(vcpu, regs->rbp); 11261 #ifdef CONFIG_X86_64 11262 kvm_r8_write(vcpu, regs->r8); 11263 kvm_r9_write(vcpu, regs->r9); 11264 kvm_r10_write(vcpu, regs->r10); 11265 kvm_r11_write(vcpu, regs->r11); 11266 kvm_r12_write(vcpu, regs->r12); 11267 kvm_r13_write(vcpu, regs->r13); 11268 kvm_r14_write(vcpu, regs->r14); 11269 kvm_r15_write(vcpu, regs->r15); 11270 #endif 11271 11272 kvm_rip_write(vcpu, regs->rip); 11273 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); 11274 11275 vcpu->arch.exception.pending = false; 11276 vcpu->arch.exception_vmexit.pending = false; 11277 11278 kvm_make_request(KVM_REQ_EVENT, vcpu); 11279 } 11280 11281 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 11282 { 11283 vcpu_load(vcpu); 11284 __set_regs(vcpu, regs); 11285 vcpu_put(vcpu); 11286 return 0; 11287 } 11288 11289 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11290 { 11291 struct desc_ptr dt; 11292 11293 if (vcpu->arch.guest_state_protected) 11294 goto skip_protected_regs; 11295 11296 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11297 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11298 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11299 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11300 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11301 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11302 11303 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11304 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11305 11306 static_call(kvm_x86_get_idt)(vcpu, &dt); 11307 sregs->idt.limit = dt.size; 11308 sregs->idt.base = dt.address; 11309 static_call(kvm_x86_get_gdt)(vcpu, &dt); 11310 sregs->gdt.limit = dt.size; 11311 sregs->gdt.base = dt.address; 11312 11313 sregs->cr2 = vcpu->arch.cr2; 11314 sregs->cr3 = kvm_read_cr3(vcpu); 11315 11316 skip_protected_regs: 11317 sregs->cr0 = kvm_read_cr0(vcpu); 11318 sregs->cr4 = kvm_read_cr4(vcpu); 11319 sregs->cr8 = kvm_get_cr8(vcpu); 11320 sregs->efer = vcpu->arch.efer; 11321 sregs->apic_base = kvm_get_apic_base(vcpu); 11322 } 11323 11324 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11325 { 11326 __get_sregs_common(vcpu, sregs); 11327 11328 if (vcpu->arch.guest_state_protected) 11329 return; 11330 11331 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 11332 set_bit(vcpu->arch.interrupt.nr, 11333 (unsigned long *)sregs->interrupt_bitmap); 11334 } 11335 11336 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11337 { 11338 int i; 11339 11340 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); 11341 11342 if (vcpu->arch.guest_state_protected) 11343 return; 11344 11345 if (is_pae_paging(vcpu)) { 11346 for (i = 0 ; i < 4 ; i++) 11347 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); 11348 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID; 11349 } 11350 } 11351 11352 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 11353 struct kvm_sregs *sregs) 11354 { 11355 vcpu_load(vcpu); 11356 __get_sregs(vcpu, sregs); 11357 vcpu_put(vcpu); 11358 return 0; 11359 } 11360 11361 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 11362 struct kvm_mp_state *mp_state) 11363 { 11364 int r; 11365 11366 vcpu_load(vcpu); 11367 if (kvm_mpx_supported()) 11368 kvm_load_guest_fpu(vcpu); 11369 11370 r = kvm_apic_accept_events(vcpu); 11371 if (r < 0) 11372 goto out; 11373 r = 0; 11374 11375 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || 11376 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && 11377 vcpu->arch.pv.pv_unhalted) 11378 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 11379 else 11380 mp_state->mp_state = vcpu->arch.mp_state; 11381 11382 out: 11383 if (kvm_mpx_supported()) 11384 kvm_put_guest_fpu(vcpu); 11385 vcpu_put(vcpu); 11386 return r; 11387 } 11388 11389 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 11390 struct kvm_mp_state *mp_state) 11391 { 11392 int ret = -EINVAL; 11393 11394 vcpu_load(vcpu); 11395 11396 switch (mp_state->mp_state) { 11397 case KVM_MP_STATE_UNINITIALIZED: 11398 case KVM_MP_STATE_HALTED: 11399 case KVM_MP_STATE_AP_RESET_HOLD: 11400 case KVM_MP_STATE_INIT_RECEIVED: 11401 case KVM_MP_STATE_SIPI_RECEIVED: 11402 if (!lapic_in_kernel(vcpu)) 11403 goto out; 11404 break; 11405 11406 case KVM_MP_STATE_RUNNABLE: 11407 break; 11408 11409 default: 11410 goto out; 11411 } 11412 11413 /* 11414 * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow 11415 * forcing the guest into INIT/SIPI if those events are supposed to be 11416 * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state 11417 * if an SMI is pending as well. 11418 */ 11419 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && 11420 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || 11421 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) 11422 goto out; 11423 11424 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { 11425 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 11426 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); 11427 } else 11428 vcpu->arch.mp_state = mp_state->mp_state; 11429 kvm_make_request(KVM_REQ_EVENT, vcpu); 11430 11431 ret = 0; 11432 out: 11433 vcpu_put(vcpu); 11434 return ret; 11435 } 11436 11437 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, 11438 int reason, bool has_error_code, u32 error_code) 11439 { 11440 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 11441 int ret; 11442 11443 init_emulate_ctxt(vcpu); 11444 11445 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, 11446 has_error_code, error_code); 11447 if (ret) { 11448 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 11449 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 11450 vcpu->run->internal.ndata = 0; 11451 return 0; 11452 } 11453 11454 kvm_rip_write(vcpu, ctxt->eip); 11455 kvm_set_rflags(vcpu, ctxt->eflags); 11456 return 1; 11457 } 11458 EXPORT_SYMBOL_GPL(kvm_task_switch); 11459 11460 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11461 { 11462 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 11463 /* 11464 * When EFER.LME and CR0.PG are set, the processor is in 11465 * 64-bit mode (though maybe in a 32-bit code segment). 11466 * CR4.PAE and EFER.LMA must be set. 11467 */ 11468 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) 11469 return false; 11470 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) 11471 return false; 11472 } else { 11473 /* 11474 * Not in 64-bit mode: EFER.LMA is clear and the code 11475 * segment cannot be 64-bit. 11476 */ 11477 if (sregs->efer & EFER_LMA || sregs->cs.l) 11478 return false; 11479 } 11480 11481 return kvm_is_valid_cr4(vcpu, sregs->cr4) && 11482 kvm_is_valid_cr0(vcpu, sregs->cr0); 11483 } 11484 11485 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, 11486 int *mmu_reset_needed, bool update_pdptrs) 11487 { 11488 struct msr_data apic_base_msr; 11489 int idx; 11490 struct desc_ptr dt; 11491 11492 if (!kvm_is_valid_sregs(vcpu, sregs)) 11493 return -EINVAL; 11494 11495 apic_base_msr.data = sregs->apic_base; 11496 apic_base_msr.host_initiated = true; 11497 if (kvm_set_apic_base(vcpu, &apic_base_msr)) 11498 return -EINVAL; 11499 11500 if (vcpu->arch.guest_state_protected) 11501 return 0; 11502 11503 dt.size = sregs->idt.limit; 11504 dt.address = sregs->idt.base; 11505 static_call(kvm_x86_set_idt)(vcpu, &dt); 11506 dt.size = sregs->gdt.limit; 11507 dt.address = sregs->gdt.base; 11508 static_call(kvm_x86_set_gdt)(vcpu, &dt); 11509 11510 vcpu->arch.cr2 = sregs->cr2; 11511 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11512 vcpu->arch.cr3 = sregs->cr3; 11513 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11514 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11515 11516 kvm_set_cr8(vcpu, sregs->cr8); 11517 11518 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 11519 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 11520 11521 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 11522 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 11523 vcpu->arch.cr0 = sregs->cr0; 11524 11525 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 11526 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 11527 11528 if (update_pdptrs) { 11529 idx = srcu_read_lock(&vcpu->kvm->srcu); 11530 if (is_pae_paging(vcpu)) { 11531 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); 11532 *mmu_reset_needed = 1; 11533 } 11534 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11535 } 11536 11537 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); 11538 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); 11539 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); 11540 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); 11541 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); 11542 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); 11543 11544 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 11545 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 11546 11547 update_cr8_intercept(vcpu); 11548 11549 /* Older userspace won't unhalt the vcpu on reset. */ 11550 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && 11551 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 11552 !is_protmode(vcpu)) 11553 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11554 11555 return 0; 11556 } 11557 11558 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 11559 { 11560 int pending_vec, max_bits; 11561 int mmu_reset_needed = 0; 11562 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); 11563 11564 if (ret) 11565 return ret; 11566 11567 if (mmu_reset_needed) 11568 kvm_mmu_reset_context(vcpu); 11569 11570 max_bits = KVM_NR_INTERRUPTS; 11571 pending_vec = find_first_bit( 11572 (const unsigned long *)sregs->interrupt_bitmap, max_bits); 11573 11574 if (pending_vec < max_bits) { 11575 kvm_queue_interrupt(vcpu, pending_vec, false); 11576 pr_debug("Set back pending irq %d\n", pending_vec); 11577 kvm_make_request(KVM_REQ_EVENT, vcpu); 11578 } 11579 return 0; 11580 } 11581 11582 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) 11583 { 11584 int mmu_reset_needed = 0; 11585 bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID; 11586 bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) && 11587 !(sregs2->efer & EFER_LMA); 11588 int i, ret; 11589 11590 if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID) 11591 return -EINVAL; 11592 11593 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) 11594 return -EINVAL; 11595 11596 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, 11597 &mmu_reset_needed, !valid_pdptrs); 11598 if (ret) 11599 return ret; 11600 11601 if (valid_pdptrs) { 11602 for (i = 0; i < 4 ; i++) 11603 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); 11604 11605 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); 11606 mmu_reset_needed = 1; 11607 vcpu->arch.pdptrs_from_userspace = true; 11608 } 11609 if (mmu_reset_needed) 11610 kvm_mmu_reset_context(vcpu); 11611 return 0; 11612 } 11613 11614 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 11615 struct kvm_sregs *sregs) 11616 { 11617 int ret; 11618 11619 vcpu_load(vcpu); 11620 ret = __set_sregs(vcpu, sregs); 11621 vcpu_put(vcpu); 11622 return ret; 11623 } 11624 11625 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm) 11626 { 11627 bool set = false; 11628 struct kvm_vcpu *vcpu; 11629 unsigned long i; 11630 11631 if (!enable_apicv) 11632 return; 11633 11634 down_write(&kvm->arch.apicv_update_lock); 11635 11636 kvm_for_each_vcpu(i, vcpu, kvm) { 11637 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { 11638 set = true; 11639 break; 11640 } 11641 } 11642 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set); 11643 up_write(&kvm->arch.apicv_update_lock); 11644 } 11645 11646 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 11647 struct kvm_guest_debug *dbg) 11648 { 11649 unsigned long rflags; 11650 int i, r; 11651 11652 if (vcpu->arch.guest_state_protected) 11653 return -EINVAL; 11654 11655 vcpu_load(vcpu); 11656 11657 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { 11658 r = -EBUSY; 11659 if (kvm_is_exception_pending(vcpu)) 11660 goto out; 11661 if (dbg->control & KVM_GUESTDBG_INJECT_DB) 11662 kvm_queue_exception(vcpu, DB_VECTOR); 11663 else 11664 kvm_queue_exception(vcpu, BP_VECTOR); 11665 } 11666 11667 /* 11668 * Read rflags as long as potentially injected trace flags are still 11669 * filtered out. 11670 */ 11671 rflags = kvm_get_rflags(vcpu); 11672 11673 vcpu->guest_debug = dbg->control; 11674 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) 11675 vcpu->guest_debug = 0; 11676 11677 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 11678 for (i = 0; i < KVM_NR_DB_REGS; ++i) 11679 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 11680 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; 11681 } else { 11682 for (i = 0; i < KVM_NR_DB_REGS; i++) 11683 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 11684 } 11685 kvm_update_dr7(vcpu); 11686 11687 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 11688 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); 11689 11690 /* 11691 * Trigger an rflags update that will inject or remove the trace 11692 * flags. 11693 */ 11694 kvm_set_rflags(vcpu, rflags); 11695 11696 static_call(kvm_x86_update_exception_bitmap)(vcpu); 11697 11698 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); 11699 11700 r = 0; 11701 11702 out: 11703 vcpu_put(vcpu); 11704 return r; 11705 } 11706 11707 /* 11708 * Translate a guest virtual address to a guest physical address. 11709 */ 11710 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 11711 struct kvm_translation *tr) 11712 { 11713 unsigned long vaddr = tr->linear_address; 11714 gpa_t gpa; 11715 int idx; 11716 11717 vcpu_load(vcpu); 11718 11719 idx = srcu_read_lock(&vcpu->kvm->srcu); 11720 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); 11721 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11722 tr->physical_address = gpa; 11723 tr->valid = gpa != INVALID_GPA; 11724 tr->writeable = 1; 11725 tr->usermode = 0; 11726 11727 vcpu_put(vcpu); 11728 return 0; 11729 } 11730 11731 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11732 { 11733 struct fxregs_state *fxsave; 11734 11735 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11736 return 0; 11737 11738 vcpu_load(vcpu); 11739 11740 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11741 memcpy(fpu->fpr, fxsave->st_space, 128); 11742 fpu->fcw = fxsave->cwd; 11743 fpu->fsw = fxsave->swd; 11744 fpu->ftwx = fxsave->twd; 11745 fpu->last_opcode = fxsave->fop; 11746 fpu->last_ip = fxsave->rip; 11747 fpu->last_dp = fxsave->rdp; 11748 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 11749 11750 vcpu_put(vcpu); 11751 return 0; 11752 } 11753 11754 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 11755 { 11756 struct fxregs_state *fxsave; 11757 11758 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) 11759 return 0; 11760 11761 vcpu_load(vcpu); 11762 11763 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; 11764 11765 memcpy(fxsave->st_space, fpu->fpr, 128); 11766 fxsave->cwd = fpu->fcw; 11767 fxsave->swd = fpu->fsw; 11768 fxsave->twd = fpu->ftwx; 11769 fxsave->fop = fpu->last_opcode; 11770 fxsave->rip = fpu->last_ip; 11771 fxsave->rdp = fpu->last_dp; 11772 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 11773 11774 vcpu_put(vcpu); 11775 return 0; 11776 } 11777 11778 static void store_regs(struct kvm_vcpu *vcpu) 11779 { 11780 BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES); 11781 11782 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) 11783 __get_regs(vcpu, &vcpu->run->s.regs.regs); 11784 11785 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) 11786 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); 11787 11788 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) 11789 kvm_vcpu_ioctl_x86_get_vcpu_events( 11790 vcpu, &vcpu->run->s.regs.events); 11791 } 11792 11793 static int sync_regs(struct kvm_vcpu *vcpu) 11794 { 11795 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { 11796 __set_regs(vcpu, &vcpu->run->s.regs.regs); 11797 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; 11798 } 11799 11800 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { 11801 struct kvm_sregs sregs = vcpu->run->s.regs.sregs; 11802 11803 if (__set_sregs(vcpu, &sregs)) 11804 return -EINVAL; 11805 11806 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; 11807 } 11808 11809 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { 11810 struct kvm_vcpu_events events = vcpu->run->s.regs.events; 11811 11812 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events)) 11813 return -EINVAL; 11814 11815 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; 11816 } 11817 11818 return 0; 11819 } 11820 11821 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11822 { 11823 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11824 pr_warn_once("SMP vm created on host with unstable TSC; " 11825 "guest TSC will not be reliable\n"); 11826 11827 if (!kvm->arch.max_vcpu_ids) 11828 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; 11829 11830 if (id >= kvm->arch.max_vcpu_ids) 11831 return -EINVAL; 11832 11833 return static_call(kvm_x86_vcpu_precreate)(kvm); 11834 } 11835 11836 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) 11837 { 11838 struct page *page; 11839 int r; 11840 11841 vcpu->arch.last_vmentry_cpu = -1; 11842 vcpu->arch.regs_avail = ~0; 11843 vcpu->arch.regs_dirty = ~0; 11844 11845 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN); 11846 11847 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 11848 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 11849 else 11850 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 11851 11852 r = kvm_mmu_create(vcpu); 11853 if (r < 0) 11854 return r; 11855 11856 if (irqchip_in_kernel(vcpu->kvm)) { 11857 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); 11858 if (r < 0) 11859 goto fail_mmu_destroy; 11860 11861 /* 11862 * Defer evaluating inhibits until the vCPU is first run, as 11863 * this vCPU will not get notified of any changes until this 11864 * vCPU is visible to other vCPUs (marked online and added to 11865 * the set of vCPUs). Opportunistically mark APICv active as 11866 * VMX in particularly is highly unlikely to have inhibits. 11867 * Ignore the current per-VM APICv state so that vCPU creation 11868 * is guaranteed to run with a deterministic value, the request 11869 * will ensure the vCPU gets the correct state before VM-Entry. 11870 */ 11871 if (enable_apicv) { 11872 vcpu->arch.apic->apicv_active = true; 11873 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 11874 } 11875 } else 11876 static_branch_inc(&kvm_has_noapic_vcpu); 11877 11878 r = -ENOMEM; 11879 11880 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 11881 if (!page) 11882 goto fail_free_lapic; 11883 vcpu->arch.pio_data = page_address(page); 11884 11885 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), 11886 GFP_KERNEL_ACCOUNT); 11887 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), 11888 GFP_KERNEL_ACCOUNT); 11889 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) 11890 goto fail_free_mce_banks; 11891 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; 11892 11893 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, 11894 GFP_KERNEL_ACCOUNT)) 11895 goto fail_free_mce_banks; 11896 11897 if (!alloc_emulate_ctxt(vcpu)) 11898 goto free_wbinvd_dirty_mask; 11899 11900 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11901 pr_err("failed to allocate vcpu's fpu\n"); 11902 goto free_emulate_ctxt; 11903 } 11904 11905 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 11906 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 11907 11908 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; 11909 11910 kvm_async_pf_hash_reset(vcpu); 11911 11912 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; 11913 kvm_pmu_init(vcpu); 11914 11915 vcpu->arch.pending_external_vector = -1; 11916 vcpu->arch.preempted_in_kernel = false; 11917 11918 #if IS_ENABLED(CONFIG_HYPERV) 11919 vcpu->arch.hv_root_tdp = INVALID_PAGE; 11920 #endif 11921 11922 r = static_call(kvm_x86_vcpu_create)(vcpu); 11923 if (r) 11924 goto free_guest_fpu; 11925 11926 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); 11927 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 11928 kvm_xen_init_vcpu(vcpu); 11929 kvm_vcpu_mtrr_init(vcpu); 11930 vcpu_load(vcpu); 11931 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); 11932 kvm_vcpu_reset(vcpu, false); 11933 kvm_init_mmu(vcpu); 11934 vcpu_put(vcpu); 11935 return 0; 11936 11937 free_guest_fpu: 11938 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11939 free_emulate_ctxt: 11940 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11941 free_wbinvd_dirty_mask: 11942 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11943 fail_free_mce_banks: 11944 kfree(vcpu->arch.mce_banks); 11945 kfree(vcpu->arch.mci_ctl2_banks); 11946 free_page((unsigned long)vcpu->arch.pio_data); 11947 fail_free_lapic: 11948 kvm_free_lapic(vcpu); 11949 fail_mmu_destroy: 11950 kvm_mmu_destroy(vcpu); 11951 return r; 11952 } 11953 11954 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 11955 { 11956 struct kvm *kvm = vcpu->kvm; 11957 11958 if (mutex_lock_killable(&vcpu->mutex)) 11959 return; 11960 vcpu_load(vcpu); 11961 kvm_synchronize_tsc(vcpu, 0); 11962 vcpu_put(vcpu); 11963 11964 /* poll control enabled by default */ 11965 vcpu->arch.msr_kvm_poll_control = 1; 11966 11967 mutex_unlock(&vcpu->mutex); 11968 11969 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) 11970 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, 11971 KVMCLOCK_SYNC_PERIOD); 11972 } 11973 11974 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 11975 { 11976 int idx; 11977 11978 kvmclock_reset(vcpu); 11979 11980 static_call(kvm_x86_vcpu_free)(vcpu); 11981 11982 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 11983 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); 11984 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); 11985 11986 kvm_xen_destroy_vcpu(vcpu); 11987 kvm_hv_vcpu_uninit(vcpu); 11988 kvm_pmu_destroy(vcpu); 11989 kfree(vcpu->arch.mce_banks); 11990 kfree(vcpu->arch.mci_ctl2_banks); 11991 kvm_free_lapic(vcpu); 11992 idx = srcu_read_lock(&vcpu->kvm->srcu); 11993 kvm_mmu_destroy(vcpu); 11994 srcu_read_unlock(&vcpu->kvm->srcu, idx); 11995 free_page((unsigned long)vcpu->arch.pio_data); 11996 kvfree(vcpu->arch.cpuid_entries); 11997 if (!lapic_in_kernel(vcpu)) 11998 static_branch_dec(&kvm_has_noapic_vcpu); 11999 } 12000 12001 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 12002 { 12003 struct kvm_cpuid_entry2 *cpuid_0x1; 12004 unsigned long old_cr0 = kvm_read_cr0(vcpu); 12005 unsigned long new_cr0; 12006 12007 /* 12008 * Several of the "set" flows, e.g. ->set_cr0(), read other registers 12009 * to handle side effects. RESET emulation hits those flows and relies 12010 * on emulated/virtualized registers, including those that are loaded 12011 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel 12012 * to detect improper or missing initialization. 12013 */ 12014 WARN_ON_ONCE(!init_event && 12015 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); 12016 12017 /* 12018 * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's 12019 * possible to INIT the vCPU while L2 is active. Force the vCPU back 12020 * into L1 as EFER.SVME is cleared on INIT (along with all other EFER 12021 * bits), i.e. virtualization is disabled. 12022 */ 12023 if (is_guest_mode(vcpu)) 12024 kvm_leave_nested(vcpu); 12025 12026 kvm_lapic_reset(vcpu, init_event); 12027 12028 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); 12029 vcpu->arch.hflags = 0; 12030 12031 vcpu->arch.smi_pending = 0; 12032 vcpu->arch.smi_count = 0; 12033 atomic_set(&vcpu->arch.nmi_queued, 0); 12034 vcpu->arch.nmi_pending = 0; 12035 vcpu->arch.nmi_injected = false; 12036 kvm_clear_interrupt_queue(vcpu); 12037 kvm_clear_exception_queue(vcpu); 12038 12039 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 12040 kvm_update_dr0123(vcpu); 12041 vcpu->arch.dr6 = DR6_ACTIVE_LOW; 12042 vcpu->arch.dr7 = DR7_FIXED_1; 12043 kvm_update_dr7(vcpu); 12044 12045 vcpu->arch.cr2 = 0; 12046 12047 kvm_make_request(KVM_REQ_EVENT, vcpu); 12048 vcpu->arch.apf.msr_en_val = 0; 12049 vcpu->arch.apf.msr_int_val = 0; 12050 vcpu->arch.st.msr_val = 0; 12051 12052 kvmclock_reset(vcpu); 12053 12054 kvm_clear_async_pf_completion_queue(vcpu); 12055 kvm_async_pf_hash_reset(vcpu); 12056 vcpu->arch.apf.halted = false; 12057 12058 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { 12059 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; 12060 12061 /* 12062 * All paths that lead to INIT are required to load the guest's 12063 * FPU state (because most paths are buried in KVM_RUN). 12064 */ 12065 if (init_event) 12066 kvm_put_guest_fpu(vcpu); 12067 12068 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS); 12069 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR); 12070 12071 if (init_event) 12072 kvm_load_guest_fpu(vcpu); 12073 } 12074 12075 if (!init_event) { 12076 kvm_pmu_reset(vcpu); 12077 vcpu->arch.smbase = 0x30000; 12078 12079 vcpu->arch.msr_misc_features_enables = 0; 12080 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | 12081 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; 12082 12083 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); 12084 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); 12085 } 12086 12087 /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */ 12088 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); 12089 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); 12090 12091 /* 12092 * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon) 12093 * if no CPUID match is found. Note, it's impossible to get a match at 12094 * RESET since KVM emulates RESET before exposing the vCPU to userspace, 12095 * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry 12096 * on RESET. But, go through the motions in case that's ever remedied. 12097 */ 12098 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); 12099 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); 12100 12101 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 12102 12103 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 12104 kvm_rip_write(vcpu, 0xfff0); 12105 12106 vcpu->arch.cr3 = 0; 12107 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 12108 12109 /* 12110 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions 12111 * of Intel's SDM list CD/NW as being set on INIT, but they contradict 12112 * (or qualify) that with a footnote stating that CD/NW are preserved. 12113 */ 12114 new_cr0 = X86_CR0_ET; 12115 if (init_event) 12116 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD)); 12117 else 12118 new_cr0 |= X86_CR0_NW | X86_CR0_CD; 12119 12120 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); 12121 static_call(kvm_x86_set_cr4)(vcpu, 0); 12122 static_call(kvm_x86_set_efer)(vcpu, 0); 12123 static_call(kvm_x86_update_exception_bitmap)(vcpu); 12124 12125 /* 12126 * On the standard CR0/CR4/EFER modification paths, there are several 12127 * complex conditions determining whether the MMU has to be reset and/or 12128 * which PCIDs have to be flushed. However, CR0.WP and the paging-related 12129 * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush 12130 * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as 12131 * CR0 will be '0' prior to RESET). So we only need to check CR0.PG here. 12132 */ 12133 if (old_cr0 & X86_CR0_PG) { 12134 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12135 kvm_mmu_reset_context(vcpu); 12136 } 12137 12138 /* 12139 * Intel's SDM states that all TLB entries are flushed on INIT. AMD's 12140 * APM states the TLBs are untouched by INIT, but it also states that 12141 * the TLBs are flushed on "External initialization of the processor." 12142 * Flush the guest TLB regardless of vendor, there is no meaningful 12143 * benefit in relying on the guest to flush the TLB immediately after 12144 * INIT. A spurious TLB flush is benign and likely negligible from a 12145 * performance perspective. 12146 */ 12147 if (init_event) 12148 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12149 } 12150 EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 12151 12152 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 12153 { 12154 struct kvm_segment cs; 12155 12156 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); 12157 cs.selector = vector << 8; 12158 cs.base = vector << 12; 12159 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 12160 kvm_rip_write(vcpu, 0); 12161 } 12162 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 12163 12164 int kvm_arch_hardware_enable(void) 12165 { 12166 struct kvm *kvm; 12167 struct kvm_vcpu *vcpu; 12168 unsigned long i; 12169 int ret; 12170 u64 local_tsc; 12171 u64 max_tsc = 0; 12172 bool stable, backwards_tsc = false; 12173 12174 kvm_user_return_msr_cpu_online(); 12175 12176 ret = kvm_x86_check_processor_compatibility(); 12177 if (ret) 12178 return ret; 12179 12180 ret = static_call(kvm_x86_hardware_enable)(); 12181 if (ret != 0) 12182 return ret; 12183 12184 local_tsc = rdtsc(); 12185 stable = !kvm_check_tsc_unstable(); 12186 list_for_each_entry(kvm, &vm_list, vm_list) { 12187 kvm_for_each_vcpu(i, vcpu, kvm) { 12188 if (!stable && vcpu->cpu == smp_processor_id()) 12189 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 12190 if (stable && vcpu->arch.last_host_tsc > local_tsc) { 12191 backwards_tsc = true; 12192 if (vcpu->arch.last_host_tsc > max_tsc) 12193 max_tsc = vcpu->arch.last_host_tsc; 12194 } 12195 } 12196 } 12197 12198 /* 12199 * Sometimes, even reliable TSCs go backwards. This happens on 12200 * platforms that reset TSC during suspend or hibernate actions, but 12201 * maintain synchronization. We must compensate. Fortunately, we can 12202 * detect that condition here, which happens early in CPU bringup, 12203 * before any KVM threads can be running. Unfortunately, we can't 12204 * bring the TSCs fully up to date with real time, as we aren't yet far 12205 * enough into CPU bringup that we know how much real time has actually 12206 * elapsed; our helper function, ktime_get_boottime_ns() will be using boot 12207 * variables that haven't been updated yet. 12208 * 12209 * So we simply find the maximum observed TSC above, then record the 12210 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, 12211 * the adjustment will be applied. Note that we accumulate 12212 * adjustments, in case multiple suspend cycles happen before some VCPU 12213 * gets a chance to run again. In the event that no KVM threads get a 12214 * chance to run, we will miss the entire elapsed period, as we'll have 12215 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may 12216 * loose cycle time. This isn't too big a deal, since the loss will be 12217 * uniform across all VCPUs (not to mention the scenario is extremely 12218 * unlikely). It is possible that a second hibernate recovery happens 12219 * much faster than a first, causing the observed TSC here to be 12220 * smaller; this would require additional padding adjustment, which is 12221 * why we set last_host_tsc to the local tsc observed here. 12222 * 12223 * N.B. - this code below runs only on platforms with reliable TSC, 12224 * as that is the only way backwards_tsc is set above. Also note 12225 * that this runs for ALL vcpus, which is not a bug; all VCPUs should 12226 * have the same delta_cyc adjustment applied if backwards_tsc 12227 * is detected. Note further, this adjustment is only done once, 12228 * as we reset last_host_tsc on all VCPUs to stop this from being 12229 * called multiple times (one for each physical CPU bringup). 12230 * 12231 * Platforms with unreliable TSCs don't have to deal with this, they 12232 * will be compensated by the logic in vcpu_load, which sets the TSC to 12233 * catchup mode. This will catchup all VCPUs to real time, but cannot 12234 * guarantee that they stay in perfect synchronization. 12235 */ 12236 if (backwards_tsc) { 12237 u64 delta_cyc = max_tsc - local_tsc; 12238 list_for_each_entry(kvm, &vm_list, vm_list) { 12239 kvm->arch.backwards_tsc_observed = true; 12240 kvm_for_each_vcpu(i, vcpu, kvm) { 12241 vcpu->arch.tsc_offset_adjustment += delta_cyc; 12242 vcpu->arch.last_host_tsc = local_tsc; 12243 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 12244 } 12245 12246 /* 12247 * We have to disable TSC offset matching.. if you were 12248 * booting a VM while issuing an S4 host suspend.... 12249 * you may have some problem. Solving this issue is 12250 * left as an exercise to the reader. 12251 */ 12252 kvm->arch.last_tsc_nsec = 0; 12253 kvm->arch.last_tsc_write = 0; 12254 } 12255 12256 } 12257 return 0; 12258 } 12259 12260 void kvm_arch_hardware_disable(void) 12261 { 12262 static_call(kvm_x86_hardware_disable)(); 12263 drop_user_return_notifiers(); 12264 } 12265 12266 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) 12267 { 12268 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 12269 } 12270 12271 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 12272 { 12273 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; 12274 } 12275 12276 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 12277 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 12278 12279 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) 12280 { 12281 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12282 12283 vcpu->arch.l1tf_flush_l1d = true; 12284 if (pmu->version && unlikely(pmu->event_count)) { 12285 pmu->need_cleanup = true; 12286 kvm_make_request(KVM_REQ_PMU, vcpu); 12287 } 12288 static_call(kvm_x86_sched_in)(vcpu, cpu); 12289 } 12290 12291 void kvm_arch_free_vm(struct kvm *kvm) 12292 { 12293 kfree(to_kvm_hv(kvm)->hv_pa_pg); 12294 __kvm_arch_free_vm(kvm); 12295 } 12296 12297 12298 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 12299 { 12300 int ret; 12301 unsigned long flags; 12302 12303 if (type) 12304 return -EINVAL; 12305 12306 ret = kvm_page_track_init(kvm); 12307 if (ret) 12308 goto out; 12309 12310 ret = kvm_mmu_init_vm(kvm); 12311 if (ret) 12312 goto out_page_track; 12313 12314 ret = static_call(kvm_x86_vm_init)(kvm); 12315 if (ret) 12316 goto out_uninit_mmu; 12317 12318 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 12319 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 12320 atomic_set(&kvm->arch.noncoherent_dma_count, 0); 12321 12322 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ 12323 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); 12324 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ 12325 set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, 12326 &kvm->arch.irq_sources_bitmap); 12327 12328 raw_spin_lock_init(&kvm->arch.tsc_write_lock); 12329 mutex_init(&kvm->arch.apic_map_lock); 12330 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); 12331 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); 12332 12333 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 12334 pvclock_update_vm_gtod_copy(kvm); 12335 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 12336 12337 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; 12338 kvm->arch.guest_can_read_msr_platform_info = true; 12339 kvm->arch.enable_pmu = enable_pmu; 12340 12341 #if IS_ENABLED(CONFIG_HYPERV) 12342 spin_lock_init(&kvm->arch.hv_root_tdp_lock); 12343 kvm->arch.hv_root_tdp = INVALID_PAGE; 12344 #endif 12345 12346 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 12347 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 12348 12349 kvm_apicv_init(kvm); 12350 kvm_hv_init_vm(kvm); 12351 kvm_xen_init_vm(kvm); 12352 12353 return 0; 12354 12355 out_uninit_mmu: 12356 kvm_mmu_uninit_vm(kvm); 12357 out_page_track: 12358 kvm_page_track_cleanup(kvm); 12359 out: 12360 return ret; 12361 } 12362 12363 int kvm_arch_post_init_vm(struct kvm *kvm) 12364 { 12365 return kvm_mmu_post_init_vm(kvm); 12366 } 12367 12368 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) 12369 { 12370 vcpu_load(vcpu); 12371 kvm_mmu_unload(vcpu); 12372 vcpu_put(vcpu); 12373 } 12374 12375 static void kvm_unload_vcpu_mmus(struct kvm *kvm) 12376 { 12377 unsigned long i; 12378 struct kvm_vcpu *vcpu; 12379 12380 kvm_for_each_vcpu(i, vcpu, kvm) { 12381 kvm_clear_async_pf_completion_queue(vcpu); 12382 kvm_unload_vcpu_mmu(vcpu); 12383 } 12384 } 12385 12386 void kvm_arch_sync_events(struct kvm *kvm) 12387 { 12388 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); 12389 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); 12390 kvm_free_pit(kvm); 12391 } 12392 12393 /** 12394 * __x86_set_memory_region: Setup KVM internal memory slot 12395 * 12396 * @kvm: the kvm pointer to the VM. 12397 * @id: the slot ID to setup. 12398 * @gpa: the GPA to install the slot (unused when @size == 0). 12399 * @size: the size of the slot. Set to zero to uninstall a slot. 12400 * 12401 * This function helps to setup a KVM internal memory slot. Specify 12402 * @size > 0 to install a new slot, while @size == 0 to uninstall a 12403 * slot. The return code can be one of the following: 12404 * 12405 * HVA: on success (uninstall will return a bogus HVA) 12406 * -errno: on error 12407 * 12408 * The caller should always use IS_ERR() to check the return value 12409 * before use. Note, the KVM internal memory slots are guaranteed to 12410 * remain valid and unchanged until the VM is destroyed, i.e., the 12411 * GPA->HVA translation will not change. However, the HVA is a user 12412 * address, i.e. its accessibility is not guaranteed, and must be 12413 * accessed via __copy_{to,from}_user(). 12414 */ 12415 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, 12416 u32 size) 12417 { 12418 int i, r; 12419 unsigned long hva, old_npages; 12420 struct kvm_memslots *slots = kvm_memslots(kvm); 12421 struct kvm_memory_slot *slot; 12422 12423 /* Called with kvm->slots_lock held. */ 12424 if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 12425 return ERR_PTR_USR(-EINVAL); 12426 12427 slot = id_to_memslot(slots, id); 12428 if (size) { 12429 if (slot && slot->npages) 12430 return ERR_PTR_USR(-EEXIST); 12431 12432 /* 12433 * MAP_SHARED to prevent internal slot pages from being moved 12434 * by fork()/COW. 12435 */ 12436 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 12437 MAP_SHARED | MAP_ANONYMOUS, 0); 12438 if (IS_ERR_VALUE(hva)) 12439 return (void __user *)hva; 12440 } else { 12441 if (!slot || !slot->npages) 12442 return NULL; 12443 12444 old_npages = slot->npages; 12445 hva = slot->userspace_addr; 12446 } 12447 12448 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 12449 struct kvm_userspace_memory_region m; 12450 12451 m.slot = id | (i << 16); 12452 m.flags = 0; 12453 m.guest_phys_addr = gpa; 12454 m.userspace_addr = hva; 12455 m.memory_size = size; 12456 r = __kvm_set_memory_region(kvm, &m); 12457 if (r < 0) 12458 return ERR_PTR_USR(r); 12459 } 12460 12461 if (!size) 12462 vm_munmap(hva, old_npages * PAGE_SIZE); 12463 12464 return (void __user *)hva; 12465 } 12466 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 12467 12468 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 12469 { 12470 kvm_mmu_pre_destroy_vm(kvm); 12471 } 12472 12473 void kvm_arch_destroy_vm(struct kvm *kvm) 12474 { 12475 if (current->mm == kvm->mm) { 12476 /* 12477 * Free memory regions allocated on behalf of userspace, 12478 * unless the memory map has changed due to process exit 12479 * or fd copying. 12480 */ 12481 mutex_lock(&kvm->slots_lock); 12482 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 12483 0, 0); 12484 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 12485 0, 0); 12486 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 12487 mutex_unlock(&kvm->slots_lock); 12488 } 12489 kvm_unload_vcpu_mmus(kvm); 12490 static_call_cond(kvm_x86_vm_destroy)(kvm); 12491 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12492 kvm_pic_destroy(kvm); 12493 kvm_ioapic_destroy(kvm); 12494 kvm_destroy_vcpus(kvm); 12495 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 12496 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); 12497 kvm_mmu_uninit_vm(kvm); 12498 kvm_page_track_cleanup(kvm); 12499 kvm_xen_destroy_vm(kvm); 12500 kvm_hv_destroy_vm(kvm); 12501 } 12502 12503 static void memslot_rmap_free(struct kvm_memory_slot *slot) 12504 { 12505 int i; 12506 12507 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12508 kvfree(slot->arch.rmap[i]); 12509 slot->arch.rmap[i] = NULL; 12510 } 12511 } 12512 12513 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 12514 { 12515 int i; 12516 12517 memslot_rmap_free(slot); 12518 12519 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12520 kvfree(slot->arch.lpage_info[i - 1]); 12521 slot->arch.lpage_info[i - 1] = NULL; 12522 } 12523 12524 kvm_page_track_free_memslot(slot); 12525 } 12526 12527 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages) 12528 { 12529 const int sz = sizeof(*slot->arch.rmap[0]); 12530 int i; 12531 12532 for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { 12533 int level = i + 1; 12534 int lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12535 12536 if (slot->arch.rmap[i]) 12537 continue; 12538 12539 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); 12540 if (!slot->arch.rmap[i]) { 12541 memslot_rmap_free(slot); 12542 return -ENOMEM; 12543 } 12544 } 12545 12546 return 0; 12547 } 12548 12549 static int kvm_alloc_memslot_metadata(struct kvm *kvm, 12550 struct kvm_memory_slot *slot) 12551 { 12552 unsigned long npages = slot->npages; 12553 int i, r; 12554 12555 /* 12556 * Clear out the previous array pointers for the KVM_MR_MOVE case. The 12557 * old arrays will be freed by __kvm_set_memory_region() if installing 12558 * the new memslot is successful. 12559 */ 12560 memset(&slot->arch, 0, sizeof(slot->arch)); 12561 12562 if (kvm_memslots_have_rmaps(kvm)) { 12563 r = memslot_rmap_alloc(slot, npages); 12564 if (r) 12565 return r; 12566 } 12567 12568 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12569 struct kvm_lpage_info *linfo; 12570 unsigned long ugfn; 12571 int lpages; 12572 int level = i + 1; 12573 12574 lpages = __kvm_mmu_slot_lpages(slot, npages, level); 12575 12576 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT); 12577 if (!linfo) 12578 goto out_free; 12579 12580 slot->arch.lpage_info[i - 1] = linfo; 12581 12582 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) 12583 linfo[0].disallow_lpage = 1; 12584 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) 12585 linfo[lpages - 1].disallow_lpage = 1; 12586 ugfn = slot->userspace_addr >> PAGE_SHIFT; 12587 /* 12588 * If the gfn and userspace address are not aligned wrt each 12589 * other, disable large page support for this slot. 12590 */ 12591 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { 12592 unsigned long j; 12593 12594 for (j = 0; j < lpages; ++j) 12595 linfo[j].disallow_lpage = 1; 12596 } 12597 } 12598 12599 if (kvm_page_track_create_memslot(kvm, slot, npages)) 12600 goto out_free; 12601 12602 return 0; 12603 12604 out_free: 12605 memslot_rmap_free(slot); 12606 12607 for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) { 12608 kvfree(slot->arch.lpage_info[i - 1]); 12609 slot->arch.lpage_info[i - 1] = NULL; 12610 } 12611 return -ENOMEM; 12612 } 12613 12614 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 12615 { 12616 struct kvm_vcpu *vcpu; 12617 unsigned long i; 12618 12619 /* 12620 * memslots->generation has been incremented. 12621 * mmio generation may have reached its maximum value. 12622 */ 12623 kvm_mmu_invalidate_mmio_sptes(kvm, gen); 12624 12625 /* Force re-initialization of steal_time cache */ 12626 kvm_for_each_vcpu(i, vcpu, kvm) 12627 kvm_vcpu_kick(vcpu); 12628 } 12629 12630 int kvm_arch_prepare_memory_region(struct kvm *kvm, 12631 const struct kvm_memory_slot *old, 12632 struct kvm_memory_slot *new, 12633 enum kvm_mr_change change) 12634 { 12635 if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12636 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12637 return -EINVAL; 12638 12639 return kvm_alloc_memslot_metadata(kvm, new); 12640 } 12641 12642 if (change == KVM_MR_FLAGS_ONLY) 12643 memcpy(&new->arch, &old->arch, sizeof(old->arch)); 12644 else if (WARN_ON_ONCE(change != KVM_MR_DELETE)) 12645 return -EIO; 12646 12647 return 0; 12648 } 12649 12650 12651 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable) 12652 { 12653 int nr_slots; 12654 12655 if (!kvm_x86_ops.cpu_dirty_log_size) 12656 return; 12657 12658 nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging); 12659 if ((enable && nr_slots == 1) || !nr_slots) 12660 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING); 12661 } 12662 12663 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, 12664 struct kvm_memory_slot *old, 12665 const struct kvm_memory_slot *new, 12666 enum kvm_mr_change change) 12667 { 12668 u32 old_flags = old ? old->flags : 0; 12669 u32 new_flags = new ? new->flags : 0; 12670 bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; 12671 12672 /* 12673 * Update CPU dirty logging if dirty logging is being toggled. This 12674 * applies to all operations. 12675 */ 12676 if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) 12677 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages); 12678 12679 /* 12680 * Nothing more to do for RO slots (which can't be dirtied and can't be 12681 * made writable) or CREATE/MOVE/DELETE of a slot. 12682 * 12683 * For a memslot with dirty logging disabled: 12684 * CREATE: No dirty mappings will already exist. 12685 * MOVE/DELETE: The old mappings will already have been cleaned up by 12686 * kvm_arch_flush_shadow_memslot() 12687 * 12688 * For a memslot with dirty logging enabled: 12689 * CREATE: No shadow pages exist, thus nothing to write-protect 12690 * and no dirty bits to clear. 12691 * MOVE/DELETE: The old mappings will already have been cleaned up by 12692 * kvm_arch_flush_shadow_memslot(). 12693 */ 12694 if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY)) 12695 return; 12696 12697 /* 12698 * READONLY and non-flags changes were filtered out above, and the only 12699 * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty 12700 * logging isn't being toggled on or off. 12701 */ 12702 if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES))) 12703 return; 12704 12705 if (!log_dirty_pages) { 12706 /* 12707 * Dirty logging tracks sptes in 4k granularity, meaning that 12708 * large sptes have to be split. If live migration succeeds, 12709 * the guest in the source machine will be destroyed and large 12710 * sptes will be created in the destination. However, if the 12711 * guest continues to run in the source machine (for example if 12712 * live migration fails), small sptes will remain around and 12713 * cause bad performance. 12714 * 12715 * Scan sptes if dirty logging has been stopped, dropping those 12716 * which can be collapsed into a single large-page spte. Later 12717 * page faults will create the large-page sptes. 12718 */ 12719 kvm_mmu_zap_collapsible_sptes(kvm, new); 12720 } else { 12721 /* 12722 * Initially-all-set does not require write protecting any page, 12723 * because they're all assumed to be dirty. 12724 */ 12725 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) 12726 return; 12727 12728 if (READ_ONCE(eager_page_split)) 12729 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K); 12730 12731 if (kvm_x86_ops.cpu_dirty_log_size) { 12732 kvm_mmu_slot_leaf_clear_dirty(kvm, new); 12733 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M); 12734 } else { 12735 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); 12736 } 12737 12738 /* 12739 * Unconditionally flush the TLBs after enabling dirty logging. 12740 * A flush is almost always going to be necessary (see below), 12741 * and unconditionally flushing allows the helpers to omit 12742 * the subtly complex checks when removing write access. 12743 * 12744 * Do the flush outside of mmu_lock to reduce the amount of 12745 * time mmu_lock is held. Flushing after dropping mmu_lock is 12746 * safe as KVM only needs to guarantee the slot is fully 12747 * write-protected before returning to userspace, i.e. before 12748 * userspace can consume the dirty status. 12749 * 12750 * Flushing outside of mmu_lock requires KVM to be careful when 12751 * making decisions based on writable status of an SPTE, e.g. a 12752 * !writable SPTE doesn't guarantee a CPU can't perform writes. 12753 * 12754 * Specifically, KVM also write-protects guest page tables to 12755 * monitor changes when using shadow paging, and must guarantee 12756 * no CPUs can write to those page before mmu_lock is dropped. 12757 * Because CPUs may have stale TLB entries at this point, a 12758 * !writable SPTE doesn't guarantee CPUs can't perform writes. 12759 * 12760 * KVM also allows making SPTES writable outside of mmu_lock, 12761 * e.g. to allow dirty logging without taking mmu_lock. 12762 * 12763 * To handle these scenarios, KVM uses a separate software-only 12764 * bit (MMU-writable) to track if a SPTE is !writable due to 12765 * a guest page table being write-protected (KVM clears the 12766 * MMU-writable flag when write-protecting for shadow paging). 12767 * 12768 * The use of MMU-writable is also the primary motivation for 12769 * the unconditional flush. Because KVM must guarantee that a 12770 * CPU doesn't contain stale, writable TLB entries for a 12771 * !MMU-writable SPTE, KVM must flush if it encounters any 12772 * MMU-writable SPTE regardless of whether the actual hardware 12773 * writable bit was set. I.e. KVM is almost guaranteed to need 12774 * to flush, while unconditionally flushing allows the "remove 12775 * write access" helpers to ignore MMU-writable entirely. 12776 * 12777 * See is_writable_pte() for more details (the case involving 12778 * access-tracked SPTEs is particularly relevant). 12779 */ 12780 kvm_flush_remote_tlbs_memslot(kvm, new); 12781 } 12782 } 12783 12784 void kvm_arch_commit_memory_region(struct kvm *kvm, 12785 struct kvm_memory_slot *old, 12786 const struct kvm_memory_slot *new, 12787 enum kvm_mr_change change) 12788 { 12789 if (!kvm->arch.n_requested_mmu_pages && 12790 (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) { 12791 unsigned long nr_mmu_pages; 12792 12793 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO; 12794 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 12795 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); 12796 } 12797 12798 kvm_mmu_slot_apply_flags(kvm, old, new, change); 12799 12800 /* Free the arrays associated with the old memslot. */ 12801 if (change == KVM_MR_MOVE) 12802 kvm_arch_free_memslot(kvm, old); 12803 } 12804 12805 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 12806 { 12807 return (is_guest_mode(vcpu) && 12808 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 12809 } 12810 12811 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 12812 { 12813 if (!list_empty_careful(&vcpu->async_pf.done)) 12814 return true; 12815 12816 if (kvm_apic_has_pending_init_or_sipi(vcpu) && 12817 kvm_apic_init_sipi_allowed(vcpu)) 12818 return true; 12819 12820 if (vcpu->arch.pv.pv_unhalted) 12821 return true; 12822 12823 if (kvm_is_exception_pending(vcpu)) 12824 return true; 12825 12826 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12827 (vcpu->arch.nmi_pending && 12828 static_call(kvm_x86_nmi_allowed)(vcpu, false))) 12829 return true; 12830 12831 #ifdef CONFIG_KVM_SMM 12832 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 12833 (vcpu->arch.smi_pending && 12834 static_call(kvm_x86_smi_allowed)(vcpu, false))) 12835 return true; 12836 #endif 12837 12838 if (kvm_arch_interrupt_allowed(vcpu) && 12839 (kvm_cpu_has_interrupt(vcpu) || 12840 kvm_guest_apic_has_interrupt(vcpu))) 12841 return true; 12842 12843 if (kvm_hv_has_stimer_pending(vcpu)) 12844 return true; 12845 12846 if (is_guest_mode(vcpu) && 12847 kvm_x86_ops.nested_ops->has_events && 12848 kvm_x86_ops.nested_ops->has_events(vcpu)) 12849 return true; 12850 12851 if (kvm_xen_has_pending_events(vcpu)) 12852 return true; 12853 12854 return false; 12855 } 12856 12857 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 12858 { 12859 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 12860 } 12861 12862 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 12863 { 12864 if (kvm_vcpu_apicv_active(vcpu) && 12865 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 12866 return true; 12867 12868 return false; 12869 } 12870 12871 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) 12872 { 12873 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) 12874 return true; 12875 12876 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 12877 #ifdef CONFIG_KVM_SMM 12878 kvm_test_request(KVM_REQ_SMI, vcpu) || 12879 #endif 12880 kvm_test_request(KVM_REQ_EVENT, vcpu)) 12881 return true; 12882 12883 return kvm_arch_dy_has_pending_interrupt(vcpu); 12884 } 12885 12886 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) 12887 { 12888 if (vcpu->arch.guest_state_protected) 12889 return true; 12890 12891 return vcpu->arch.preempted_in_kernel; 12892 } 12893 12894 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) 12895 { 12896 return kvm_rip_read(vcpu); 12897 } 12898 12899 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 12900 { 12901 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 12902 } 12903 12904 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 12905 { 12906 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 12907 } 12908 12909 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) 12910 { 12911 /* Can't read the RIP when guest state is protected, just return 0 */ 12912 if (vcpu->arch.guest_state_protected) 12913 return 0; 12914 12915 if (is_64_bit_mode(vcpu)) 12916 return kvm_rip_read(vcpu); 12917 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 12918 kvm_rip_read(vcpu)); 12919 } 12920 EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 12921 12922 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 12923 { 12924 return kvm_get_linear_rip(vcpu) == linear_rip; 12925 } 12926 EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 12927 12928 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 12929 { 12930 unsigned long rflags; 12931 12932 rflags = static_call(kvm_x86_get_rflags)(vcpu); 12933 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 12934 rflags &= ~X86_EFLAGS_TF; 12935 return rflags; 12936 } 12937 EXPORT_SYMBOL_GPL(kvm_get_rflags); 12938 12939 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12940 { 12941 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 12942 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 12943 rflags |= X86_EFLAGS_TF; 12944 static_call(kvm_x86_set_rflags)(vcpu, rflags); 12945 } 12946 12947 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 12948 { 12949 __kvm_set_rflags(vcpu, rflags); 12950 kvm_make_request(KVM_REQ_EVENT, vcpu); 12951 } 12952 EXPORT_SYMBOL_GPL(kvm_set_rflags); 12953 12954 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 12955 { 12956 BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU)); 12957 12958 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 12959 } 12960 12961 static inline u32 kvm_async_pf_next_probe(u32 key) 12962 { 12963 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); 12964 } 12965 12966 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12967 { 12968 u32 key = kvm_async_pf_hash_fn(gfn); 12969 12970 while (vcpu->arch.apf.gfns[key] != ~0) 12971 key = kvm_async_pf_next_probe(key); 12972 12973 vcpu->arch.apf.gfns[key] = gfn; 12974 } 12975 12976 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) 12977 { 12978 int i; 12979 u32 key = kvm_async_pf_hash_fn(gfn); 12980 12981 for (i = 0; i < ASYNC_PF_PER_VCPU && 12982 (vcpu->arch.apf.gfns[key] != gfn && 12983 vcpu->arch.apf.gfns[key] != ~0); i++) 12984 key = kvm_async_pf_next_probe(key); 12985 12986 return key; 12987 } 12988 12989 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12990 { 12991 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; 12992 } 12993 12994 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 12995 { 12996 u32 i, j, k; 12997 12998 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); 12999 13000 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) 13001 return; 13002 13003 while (true) { 13004 vcpu->arch.apf.gfns[i] = ~0; 13005 do { 13006 j = kvm_async_pf_next_probe(j); 13007 if (vcpu->arch.apf.gfns[j] == ~0) 13008 return; 13009 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); 13010 /* 13011 * k lies cyclically in ]i,j] 13012 * | i.k.j | 13013 * |....j i.k.| or |.k..j i...| 13014 */ 13015 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); 13016 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; 13017 i = j; 13018 } 13019 } 13020 13021 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) 13022 { 13023 u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT; 13024 13025 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, 13026 sizeof(reason)); 13027 } 13028 13029 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) 13030 { 13031 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 13032 13033 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 13034 &token, offset, sizeof(token)); 13035 } 13036 13037 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) 13038 { 13039 unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token); 13040 u32 val; 13041 13042 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, 13043 &val, offset, sizeof(val))) 13044 return false; 13045 13046 return !val; 13047 } 13048 13049 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) 13050 { 13051 13052 if (!kvm_pv_async_pf_enabled(vcpu)) 13053 return false; 13054 13055 if (vcpu->arch.apf.send_user_only && 13056 static_call(kvm_x86_get_cpl)(vcpu) == 0) 13057 return false; 13058 13059 if (is_guest_mode(vcpu)) { 13060 /* 13061 * L1 needs to opt into the special #PF vmexits that are 13062 * used to deliver async page faults. 13063 */ 13064 return vcpu->arch.apf.delivery_as_pf_vmexit; 13065 } else { 13066 /* 13067 * Play it safe in case the guest temporarily disables paging. 13068 * The real mode IDT in particular is unlikely to have a #PF 13069 * exception setup. 13070 */ 13071 return is_paging(vcpu); 13072 } 13073 } 13074 13075 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 13076 { 13077 if (unlikely(!lapic_in_kernel(vcpu) || 13078 kvm_event_needs_reinjection(vcpu) || 13079 kvm_is_exception_pending(vcpu))) 13080 return false; 13081 13082 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) 13083 return false; 13084 13085 /* 13086 * If interrupts are off we cannot even use an artificial 13087 * halt state. 13088 */ 13089 return kvm_arch_interrupt_allowed(vcpu); 13090 } 13091 13092 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 13093 struct kvm_async_pf *work) 13094 { 13095 struct x86_exception fault; 13096 13097 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); 13098 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); 13099 13100 if (kvm_can_deliver_async_pf(vcpu) && 13101 !apf_put_user_notpresent(vcpu)) { 13102 fault.vector = PF_VECTOR; 13103 fault.error_code_valid = true; 13104 fault.error_code = 0; 13105 fault.nested_page_fault = false; 13106 fault.address = work->arch.token; 13107 fault.async_page_fault = true; 13108 kvm_inject_page_fault(vcpu, &fault); 13109 return true; 13110 } else { 13111 /* 13112 * It is not possible to deliver a paravirtualized asynchronous 13113 * page fault, but putting the guest in an artificial halt state 13114 * can be beneficial nevertheless: if an interrupt arrives, we 13115 * can deliver it timely and perhaps the guest will schedule 13116 * another process. When the instruction that triggered a page 13117 * fault is retried, hopefully the page will be ready in the host. 13118 */ 13119 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 13120 return false; 13121 } 13122 } 13123 13124 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 13125 struct kvm_async_pf *work) 13126 { 13127 struct kvm_lapic_irq irq = { 13128 .delivery_mode = APIC_DM_FIXED, 13129 .vector = vcpu->arch.apf.vec 13130 }; 13131 13132 if (work->wakeup_all) 13133 work->arch.token = ~0; /* broadcast wakeup */ 13134 else 13135 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 13136 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); 13137 13138 if ((work->wakeup_all || work->notpresent_injected) && 13139 kvm_pv_async_pf_enabled(vcpu) && 13140 !apf_put_user_ready(vcpu, work->arch.token)) { 13141 vcpu->arch.apf.pageready_pending = true; 13142 kvm_apic_set_irq(vcpu, &irq, NULL); 13143 } 13144 13145 vcpu->arch.apf.halted = false; 13146 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 13147 } 13148 13149 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) 13150 { 13151 kvm_make_request(KVM_REQ_APF_READY, vcpu); 13152 if (!vcpu->arch.apf.pageready_pending) 13153 kvm_vcpu_kick(vcpu); 13154 } 13155 13156 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) 13157 { 13158 if (!kvm_pv_async_pf_enabled(vcpu)) 13159 return true; 13160 else 13161 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); 13162 } 13163 13164 void kvm_arch_start_assignment(struct kvm *kvm) 13165 { 13166 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 13167 static_call_cond(kvm_x86_pi_start_assignment)(kvm); 13168 } 13169 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 13170 13171 void kvm_arch_end_assignment(struct kvm *kvm) 13172 { 13173 atomic_dec(&kvm->arch.assigned_device_count); 13174 } 13175 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); 13176 13177 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 13178 { 13179 return raw_atomic_read(&kvm->arch.assigned_device_count); 13180 } 13181 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 13182 13183 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 13184 { 13185 atomic_inc(&kvm->arch.noncoherent_dma_count); 13186 } 13187 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); 13188 13189 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 13190 { 13191 atomic_dec(&kvm->arch.noncoherent_dma_count); 13192 } 13193 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); 13194 13195 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 13196 { 13197 return atomic_read(&kvm->arch.noncoherent_dma_count); 13198 } 13199 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 13200 13201 bool kvm_arch_has_irq_bypass(void) 13202 { 13203 return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP); 13204 } 13205 13206 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, 13207 struct irq_bypass_producer *prod) 13208 { 13209 struct kvm_kernel_irqfd *irqfd = 13210 container_of(cons, struct kvm_kernel_irqfd, consumer); 13211 int ret; 13212 13213 irqfd->producer = prod; 13214 kvm_arch_start_assignment(irqfd->kvm); 13215 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, 13216 prod->irq, irqfd->gsi, 1); 13217 13218 if (ret) 13219 kvm_arch_end_assignment(irqfd->kvm); 13220 13221 return ret; 13222 } 13223 13224 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, 13225 struct irq_bypass_producer *prod) 13226 { 13227 int ret; 13228 struct kvm_kernel_irqfd *irqfd = 13229 container_of(cons, struct kvm_kernel_irqfd, consumer); 13230 13231 WARN_ON(irqfd->producer != prod); 13232 irqfd->producer = NULL; 13233 13234 /* 13235 * When producer of consumer is unregistered, we change back to 13236 * remapped mode, so we can re-use the current implementation 13237 * when the irq is masked/disabled or the consumer side (KVM 13238 * int this case doesn't want to receive the interrupts. 13239 */ 13240 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 13241 if (ret) 13242 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 13243 " fails: %d\n", irqfd->consumer.token, ret); 13244 13245 kvm_arch_end_assignment(irqfd->kvm); 13246 } 13247 13248 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 13249 uint32_t guest_irq, bool set) 13250 { 13251 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); 13252 } 13253 13254 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, 13255 struct kvm_kernel_irq_routing_entry *new) 13256 { 13257 if (new->type != KVM_IRQ_ROUTING_MSI) 13258 return true; 13259 13260 return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); 13261 } 13262 13263 bool kvm_vector_hashing_enabled(void) 13264 { 13265 return vector_hashing; 13266 } 13267 13268 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13269 { 13270 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; 13271 } 13272 EXPORT_SYMBOL_GPL(kvm_arch_no_poll); 13273 13274 13275 int kvm_spec_ctrl_test_value(u64 value) 13276 { 13277 /* 13278 * test that setting IA32_SPEC_CTRL to given value 13279 * is allowed by the host processor 13280 */ 13281 13282 u64 saved_value; 13283 unsigned long flags; 13284 int ret = 0; 13285 13286 local_irq_save(flags); 13287 13288 if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value)) 13289 ret = 1; 13290 else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value)) 13291 ret = 1; 13292 else 13293 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value); 13294 13295 local_irq_restore(flags); 13296 13297 return ret; 13298 } 13299 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13300 13301 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13302 { 13303 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 13304 struct x86_exception fault; 13305 u64 access = error_code & 13306 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 13307 13308 if (!(error_code & PFERR_PRESENT_MASK) || 13309 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { 13310 /* 13311 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 13312 * tables probably do not match the TLB. Just proceed 13313 * with the error code that the processor gave. 13314 */ 13315 fault.vector = PF_VECTOR; 13316 fault.error_code_valid = true; 13317 fault.error_code = error_code; 13318 fault.nested_page_fault = false; 13319 fault.address = gva; 13320 fault.async_page_fault = false; 13321 } 13322 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 13323 } 13324 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 13325 13326 /* 13327 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns 13328 * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value 13329 * indicates whether exit to userspace is needed. 13330 */ 13331 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 13332 struct x86_exception *e) 13333 { 13334 if (r == X86EMUL_PROPAGATE_FAULT) { 13335 if (KVM_BUG_ON(!e, vcpu->kvm)) 13336 return -EIO; 13337 13338 kvm_inject_emulated_page_fault(vcpu, e); 13339 return 1; 13340 } 13341 13342 /* 13343 * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED 13344 * while handling a VMX instruction KVM could've handled the request 13345 * correctly by exiting to userspace and performing I/O but there 13346 * doesn't seem to be a real use-case behind such requests, just return 13347 * KVM_EXIT_INTERNAL_ERROR for now. 13348 */ 13349 kvm_prepare_emulation_failure_exit(vcpu); 13350 13351 return 0; 13352 } 13353 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 13354 13355 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 13356 { 13357 bool pcid_enabled; 13358 struct x86_exception e; 13359 struct { 13360 u64 pcid; 13361 u64 gla; 13362 } operand; 13363 int r; 13364 13365 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); 13366 if (r != X86EMUL_CONTINUE) 13367 return kvm_handle_memory_failure(vcpu, r, &e); 13368 13369 if (operand.pcid >> 12 != 0) { 13370 kvm_inject_gp(vcpu, 0); 13371 return 1; 13372 } 13373 13374 pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE); 13375 13376 switch (type) { 13377 case INVPCID_TYPE_INDIV_ADDR: 13378 if ((!pcid_enabled && (operand.pcid != 0)) || 13379 is_noncanonical_address(operand.gla, vcpu)) { 13380 kvm_inject_gp(vcpu, 0); 13381 return 1; 13382 } 13383 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); 13384 return kvm_skip_emulated_instruction(vcpu); 13385 13386 case INVPCID_TYPE_SINGLE_CTXT: 13387 if (!pcid_enabled && (operand.pcid != 0)) { 13388 kvm_inject_gp(vcpu, 0); 13389 return 1; 13390 } 13391 13392 kvm_invalidate_pcid(vcpu, operand.pcid); 13393 return kvm_skip_emulated_instruction(vcpu); 13394 13395 case INVPCID_TYPE_ALL_NON_GLOBAL: 13396 /* 13397 * Currently, KVM doesn't mark global entries in the shadow 13398 * page tables, so a non-global flush just degenerates to a 13399 * global flush. If needed, we could optimize this later by 13400 * keeping track of global entries in shadow page tables. 13401 */ 13402 13403 fallthrough; 13404 case INVPCID_TYPE_ALL_INCL_GLOBAL: 13405 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 13406 return kvm_skip_emulated_instruction(vcpu); 13407 13408 default: 13409 kvm_inject_gp(vcpu, 0); 13410 return 1; 13411 } 13412 } 13413 EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 13414 13415 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 13416 { 13417 struct kvm_run *run = vcpu->run; 13418 struct kvm_mmio_fragment *frag; 13419 unsigned int len; 13420 13421 BUG_ON(!vcpu->mmio_needed); 13422 13423 /* Complete previous fragment */ 13424 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; 13425 len = min(8u, frag->len); 13426 if (!vcpu->mmio_is_write) 13427 memcpy(frag->data, run->mmio.data, len); 13428 13429 if (frag->len <= 8) { 13430 /* Switch to the next fragment. */ 13431 frag++; 13432 vcpu->mmio_cur_fragment++; 13433 } else { 13434 /* Go forward to the next mmio piece. */ 13435 frag->data += len; 13436 frag->gpa += len; 13437 frag->len -= len; 13438 } 13439 13440 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 13441 vcpu->mmio_needed = 0; 13442 13443 // VMG change, at this point, we're always done 13444 // RIP has already been advanced 13445 return 1; 13446 } 13447 13448 // More MMIO is needed 13449 run->mmio.phys_addr = frag->gpa; 13450 run->mmio.len = min(8u, frag->len); 13451 run->mmio.is_write = vcpu->mmio_is_write; 13452 if (run->mmio.is_write) 13453 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); 13454 run->exit_reason = KVM_EXIT_MMIO; 13455 13456 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13457 13458 return 0; 13459 } 13460 13461 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13462 void *data) 13463 { 13464 int handled; 13465 struct kvm_mmio_fragment *frag; 13466 13467 if (!data) 13468 return -EINVAL; 13469 13470 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13471 if (handled == bytes) 13472 return 1; 13473 13474 bytes -= handled; 13475 gpa += handled; 13476 data += handled; 13477 13478 /*TODO: Check if need to increment number of frags */ 13479 frag = vcpu->mmio_fragments; 13480 vcpu->mmio_nr_fragments = 1; 13481 frag->len = bytes; 13482 frag->gpa = gpa; 13483 frag->data = data; 13484 13485 vcpu->mmio_needed = 1; 13486 vcpu->mmio_cur_fragment = 0; 13487 13488 vcpu->run->mmio.phys_addr = gpa; 13489 vcpu->run->mmio.len = min(8u, frag->len); 13490 vcpu->run->mmio.is_write = 1; 13491 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); 13492 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13493 13494 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13495 13496 return 0; 13497 } 13498 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 13499 13500 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 13501 void *data) 13502 { 13503 int handled; 13504 struct kvm_mmio_fragment *frag; 13505 13506 if (!data) 13507 return -EINVAL; 13508 13509 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); 13510 if (handled == bytes) 13511 return 1; 13512 13513 bytes -= handled; 13514 gpa += handled; 13515 data += handled; 13516 13517 /*TODO: Check if need to increment number of frags */ 13518 frag = vcpu->mmio_fragments; 13519 vcpu->mmio_nr_fragments = 1; 13520 frag->len = bytes; 13521 frag->gpa = gpa; 13522 frag->data = data; 13523 13524 vcpu->mmio_needed = 1; 13525 vcpu->mmio_cur_fragment = 0; 13526 13527 vcpu->run->mmio.phys_addr = gpa; 13528 vcpu->run->mmio.len = min(8u, frag->len); 13529 vcpu->run->mmio.is_write = 0; 13530 vcpu->run->exit_reason = KVM_EXIT_MMIO; 13531 13532 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; 13533 13534 return 0; 13535 } 13536 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 13537 13538 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 13539 { 13540 vcpu->arch.sev_pio_count -= count; 13541 vcpu->arch.sev_pio_data += count * size; 13542 } 13543 13544 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13545 unsigned int port); 13546 13547 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) 13548 { 13549 int size = vcpu->arch.pio.size; 13550 int port = vcpu->arch.pio.port; 13551 13552 vcpu->arch.pio.count = 0; 13553 if (vcpu->arch.sev_pio_count) 13554 return kvm_sev_es_outs(vcpu, size, port); 13555 return 1; 13556 } 13557 13558 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, 13559 unsigned int port) 13560 { 13561 for (;;) { 13562 unsigned int count = 13563 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13564 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); 13565 13566 /* memcpy done already by emulator_pio_out. */ 13567 advance_sev_es_emulated_pio(vcpu, count, size); 13568 if (!ret) 13569 break; 13570 13571 /* Emulation done by the kernel. */ 13572 if (!vcpu->arch.sev_pio_count) 13573 return 1; 13574 } 13575 13576 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; 13577 return 0; 13578 } 13579 13580 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13581 unsigned int port); 13582 13583 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) 13584 { 13585 unsigned count = vcpu->arch.pio.count; 13586 int size = vcpu->arch.pio.size; 13587 int port = vcpu->arch.pio.port; 13588 13589 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); 13590 advance_sev_es_emulated_pio(vcpu, count, size); 13591 if (vcpu->arch.sev_pio_count) 13592 return kvm_sev_es_ins(vcpu, size, port); 13593 return 1; 13594 } 13595 13596 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, 13597 unsigned int port) 13598 { 13599 for (;;) { 13600 unsigned int count = 13601 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); 13602 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) 13603 break; 13604 13605 /* Emulation done by the kernel. */ 13606 advance_sev_es_emulated_pio(vcpu, count, size); 13607 if (!vcpu->arch.sev_pio_count) 13608 return 1; 13609 } 13610 13611 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; 13612 return 0; 13613 } 13614 13615 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 13616 unsigned int port, void *data, unsigned int count, 13617 int in) 13618 { 13619 vcpu->arch.sev_pio_data = data; 13620 vcpu->arch.sev_pio_count = count; 13621 return in ? kvm_sev_es_ins(vcpu, size, port) 13622 : kvm_sev_es_outs(vcpu, size, port); 13623 } 13624 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 13625 13626 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 13627 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); 13628 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); 13629 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); 13630 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); 13631 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); 13632 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); 13633 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter); 13634 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); 13635 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); 13636 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); 13637 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed); 13638 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); 13639 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); 13640 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); 13641 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset); 13642 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update); 13643 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full); 13644 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update); 13645 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); 13646 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); 13647 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); 13648 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath); 13649 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell); 13650 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq); 13651 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter); 13652 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); 13653 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); 13654 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); 13655 13656 static int __init kvm_x86_init(void) 13657 { 13658 kvm_mmu_x86_module_init(); 13659 mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible(); 13660 return 0; 13661 } 13662 module_init(kvm_x86_init); 13663 13664 static void __exit kvm_x86_exit(void) 13665 { 13666 /* 13667 * If module_init() is implemented, module_exit() must also be 13668 * implemented to allow module unload. 13669 */ 13670 } 13671 module_exit(kvm_x86_exit); 13672